content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
data <- 90*1:100 - (1:100)^2 + 1000
?which
length(data)
data[1]
data[7]
data[length(data)]
data[100]
summary(data)
#What isthe first, the seventeenth and the last entry of the vector data?
1089, 1581, 0
#What is the maximum of the vector data? At which index is the maximum attained?
3025, 45
summary(data)
which.max(data)
#Plot the vector data with plot(data)and visually confirm your last result.
plot(data)
#At which indices are the entries of data between 2000 and 2500?
between <- (data>=2000 & data <= 2500)
which(between)
#13 14 15 16 17 18 19 20 21 22 68 69 70 71 72 73 74 75 76 77
#Exercise 7: Define a new matrix m by
m <- matrix( 11:35, nrow=5, byrow=TRUE )
#What is the entry in the third row and forth column?
# = 24
#Briefly describe in words what m[2:4,3:5]returns.
m[2:4,3:5]
m[3:4,1:2]
m[2:3,2:3]
m[1:5,1:2]
# the statement above returns the elements of the matrix m for the sequence 2-4 of the row and the elements for the sequence 3-5 for the columns
Calculate the matrix product of m with itself
| /ASG2.R | no_license | wkepner/700A | R | false | false | 1,047 | r | data <- 90*1:100 - (1:100)^2 + 1000
?which
length(data)
data[1]
data[7]
data[length(data)]
data[100]
summary(data)
#What isthe first, the seventeenth and the last entry of the vector data?
1089, 1581, 0
#What is the maximum of the vector data? At which index is the maximum attained?
3025, 45
summary(data)
which.max(data)
#Plot the vector data with plot(data)and visually confirm your last result.
plot(data)
#At which indices are the entries of data between 2000 and 2500?
between <- (data>=2000 & data <= 2500)
which(between)
#13 14 15 16 17 18 19 20 21 22 68 69 70 71 72 73 74 75 76 77
#Exercise 7: Define a new matrix m by
m <- matrix( 11:35, nrow=5, byrow=TRUE )
#What is the entry in the third row and forth column?
# = 24
#Briefly describe in words what m[2:4,3:5]returns.
m[2:4,3:5]
m[3:4,1:2]
m[2:3,2:3]
m[1:5,1:2]
# the statement above returns the elements of the matrix m for the sequence 2-4 of the row and the elements for the sequence 3-5 for the columns
Calculate the matrix product of m with itself
|
library(ggpubr)
pp1<-ggarrange(pic_1, pic_2, nrow=2, ncol=1,
#labels=c("a", "b", "", ""),
common.legend=T,
align="hv", legend="bottom")
pp2<-ggarrange(pic_3, pic_4, nrow=2, ncol=1,
#labels=c("a", "b", "", ""),
common.legend=T,
align="hv", legend="bottom")
pp<-ggarrange(pp1, pp2, nrow=1, ncol=2,
labels=c("a", "b", "", ""),
common.legend=F,
align="hv", legend="top")
pp | /hgt/lfr_span/combine_pics.R | no_license | Chen-Lab123/MECOS | R | false | false | 499 | r | library(ggpubr)
pp1<-ggarrange(pic_1, pic_2, nrow=2, ncol=1,
#labels=c("a", "b", "", ""),
common.legend=T,
align="hv", legend="bottom")
pp2<-ggarrange(pic_3, pic_4, nrow=2, ncol=1,
#labels=c("a", "b", "", ""),
common.legend=T,
align="hv", legend="bottom")
pp<-ggarrange(pp1, pp2, nrow=1, ncol=2,
labels=c("a", "b", "", ""),
common.legend=F,
align="hv", legend="top")
pp |
## RNA-seq FPKM (for fantom_human)
library(GenomicAlignments)
library(GenomicFeatures)
library(rtracklayer)
library(plyr)
### get transcript database
txdb_can <- makeTxDbFromGFF("/Home/ii/katchyz/DATA/genomes/GTF/Homo_sapiens.GRCh38.79.chr.gtf", format = "gtf")
exons <- exonsBy(txdb_can, by="tx", use.names=TRUE)
exons <- exons[order(names(exons))]
txLengths <- transcriptLengths(txdb_can, with.cds_len=TRUE, with.utr5_len=TRUE, with.utr3_len=TRUE)
rownames(txLengths) <- txLengths$tx_name
txLengths <- txLengths[order(rownames(txLengths)),]
## BAM files
libs_path <- "/export/valenfs/projects/fantom6/fancode/data/BAM/RNA-seq"
libs <- list.files(path = libs_path)
libs <- libs[-c(45,46)]
FPKM <- data.table(tx = sort(names(exons)))
for (i in 1:length(libs)) {
## load file
RNAseq <- readGAlignments(libs[i])
read_number_mRNA <- length(RNAseq)
exons_RNA <- countOverlaps(exons, RNAseq)
exons_RNA <- exons_RNA[order(names(exons_RNA))]
exons_len <- txLengths[rownames(txLengths) %in% names(exons_RNA),]
exons_len <- exons_len[order(rownames(exons_len)),]$tx_len
RNA_FPKM <- (exons_RNA / exons_len) * (10^9 / read_number_mRNA)
n <- unlist(strsplit(libs[i], split = "[.]"))
lib <- paste0(n[1], "_", n[length(n)-1])
FPKM[[lib]] <- RNA_FPKM
}
save(FPKM, file = "~/RNA_FPKM.Rsave")
save(FPKM, file = "/export/valenfs/projects/fantom6/fancode/data/BAM/RNA_FPKM.Rsave")
| /scripts/RNA_FPKM.R | no_license | katchyz/micropeptides | R | false | false | 1,394 | r | ## RNA-seq FPKM (for fantom_human)
library(GenomicAlignments)
library(GenomicFeatures)
library(rtracklayer)
library(plyr)
### get transcript database
txdb_can <- makeTxDbFromGFF("/Home/ii/katchyz/DATA/genomes/GTF/Homo_sapiens.GRCh38.79.chr.gtf", format = "gtf")
exons <- exonsBy(txdb_can, by="tx", use.names=TRUE)
exons <- exons[order(names(exons))]
txLengths <- transcriptLengths(txdb_can, with.cds_len=TRUE, with.utr5_len=TRUE, with.utr3_len=TRUE)
rownames(txLengths) <- txLengths$tx_name
txLengths <- txLengths[order(rownames(txLengths)),]
## BAM files
libs_path <- "/export/valenfs/projects/fantom6/fancode/data/BAM/RNA-seq"
libs <- list.files(path = libs_path)
libs <- libs[-c(45,46)]
FPKM <- data.table(tx = sort(names(exons)))
for (i in 1:length(libs)) {
## load file
RNAseq <- readGAlignments(libs[i])
read_number_mRNA <- length(RNAseq)
exons_RNA <- countOverlaps(exons, RNAseq)
exons_RNA <- exons_RNA[order(names(exons_RNA))]
exons_len <- txLengths[rownames(txLengths) %in% names(exons_RNA),]
exons_len <- exons_len[order(rownames(exons_len)),]$tx_len
RNA_FPKM <- (exons_RNA / exons_len) * (10^9 / read_number_mRNA)
n <- unlist(strsplit(libs[i], split = "[.]"))
lib <- paste0(n[1], "_", n[length(n)-1])
FPKM[[lib]] <- RNA_FPKM
}
save(FPKM, file = "~/RNA_FPKM.Rsave")
save(FPKM, file = "/export/valenfs/projects/fantom6/fancode/data/BAM/RNA_FPKM.Rsave")
|
\alias{GtkVolumeButton}
\alias{gtkVolumeButton}
\name{GtkVolumeButton}
\title{GtkVolumeButton}
\description{A button which pops up a volume control}
\section{Methods and Functions}{
\code{\link{gtkVolumeButtonNew}(show = TRUE)}\cr
\code{gtkVolumeButton(show = TRUE)}
}
\section{Hierarchy}{\preformatted{GObject
+----GInitiallyUnowned
+----GtkObject
+----GtkWidget
+----GtkContainer
+----GtkBin
+----GtkButton
+----GtkScaleButton
+----GtkVolumeButton}}
\section{Interfaces}{GtkVolumeButton implements
AtkImplementorIface, \code{\link{GtkBuildable}}, \code{\link{GtkActivatable}} and \code{\link{GtkOrientable}}.}
\section{Detailed Description}{\code{\link{GtkVolumeButton}} is a subclass of \code{\link{GtkScaleButton}} that has
been tailored for use as a volume control widget with suitable
icons, tooltips and accessible labels.}
\section{Structures}{\describe{\item{\verb{GtkVolumeButton}}{
\emph{undocumented
}
}}}
\section{Convenient Construction}{\code{gtkVolumeButton} is the equivalent of \code{\link{gtkVolumeButtonNew}}.}
\references{\url{https://developer.gnome.org/gtk2/stable/GtkVolumeButton.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /man/GtkVolumeButton.Rd | no_license | cran/RGtk2 | R | false | false | 1,379 | rd | \alias{GtkVolumeButton}
\alias{gtkVolumeButton}
\name{GtkVolumeButton}
\title{GtkVolumeButton}
\description{A button which pops up a volume control}
\section{Methods and Functions}{
\code{\link{gtkVolumeButtonNew}(show = TRUE)}\cr
\code{gtkVolumeButton(show = TRUE)}
}
\section{Hierarchy}{\preformatted{GObject
+----GInitiallyUnowned
+----GtkObject
+----GtkWidget
+----GtkContainer
+----GtkBin
+----GtkButton
+----GtkScaleButton
+----GtkVolumeButton}}
\section{Interfaces}{GtkVolumeButton implements
AtkImplementorIface, \code{\link{GtkBuildable}}, \code{\link{GtkActivatable}} and \code{\link{GtkOrientable}}.}
\section{Detailed Description}{\code{\link{GtkVolumeButton}} is a subclass of \code{\link{GtkScaleButton}} that has
been tailored for use as a volume control widget with suitable
icons, tooltips and accessible labels.}
\section{Structures}{\describe{\item{\verb{GtkVolumeButton}}{
\emph{undocumented
}
}}}
\section{Convenient Construction}{\code{gtkVolumeButton} is the equivalent of \code{\link{gtkVolumeButtonNew}}.}
\references{\url{https://developer.gnome.org/gtk2/stable/GtkVolumeButton.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
#' Add number of events to a regression table
#'
#' Adds a column of the number of events to tables created with
#' [tbl_regression] or [tbl_uvregression]. Supported
#' model types are among GLMs with binomial distribution family (e.g.
#' [stats::glm], `lme4::glmer`, and
#' `geepack::geeglm`) and Cox
#' Proportion Hazards regression models ([survival::coxph]).
#'
#' @param x `tbl_regression` or `tbl_uvregression` object
#' @param ... Additional arguments passed to or from other methods.
#' @export
#' @author Daniel D. Sjoberg
#' @seealso [add_nevent.tbl_regression], [add_nevent.tbl_uvregression],
#' [add_nevent.tbl_survfit]
add_nevent <- function(x, ...) UseMethod("add_nevent")
#' Add event N to regression table
#'
#' @inheritParams add_n_regression
#' @name add_nevent_regression
#'
#' @examples
#' # Example 1 ----------------------------------
#' add_nevent.tbl_regression_ex1 <-
#' trial %>%
#' select(response, trt, grade) %>%
#' tbl_uvregression(
#' y = response,
#' method = glm,
#' method.args = list(family = binomial),
#' ) %>%
#' add_nevent()
#
#' # Example 2 ----------------------------------
#' add_nevent.tbl_regression_ex2 <-
#' glm(response ~ age + grade, trial, family = binomial) %>%
#' tbl_regression(exponentiate = TRUE) %>%
#' add_nevent(location = "level")
#' @section Example Output:
#' \if{html}{Example 1}
#'
#' \if{html}{\figure{add_nevent.tbl_regression_ex1.png}{options: width=64\%}}
#'
#' \if{html}{Example 2}
#'
#' \if{html}{\figure{add_nevent.tbl_regression_ex2.png}{options: width=64\%}}
NULL
#' @rdname add_nevent_regression
#' @export
add_nevent.tbl_regression <- function(x, location = NULL, ...) {
location <- match.arg(location, choices = c("label", "level"), several.ok = TRUE)
if ("level" %in% location && !"n_event" %in% x$table_styling$header$column)
abort("Reporting event N on level rows is not available for this model type.")
if ("label" %in% location && !"N_event" %in% x$table_styling$header$column)
abort("Reporting event N on label rows is not available for this model type.")
x$table_body$stat_nevent <- NA_integer_
if ("N_event" %in% names(x$table_body))
x$table_body$stat_nevent <- ifelse(x$table_body$row_type == "label",
x$table_body$N_event %>% as.integer(),
x$table_body$stat_nevent)
if ("n_event" %in% names(x$table_body))
x$table_body$stat_nevent <- ifelse(x$table_body$row_type == "level",
x$table_body$n_event %>% as.integer(),
x$table_body$stat_nevent)
x %>%
modify_table_body(
mutate,
stat_nevent =
case_when(
!"level" %in% .env$location & .data$row_type %in% "level" ~ NA_integer_,
!"label" %in% .env$location & .data$row_type %in% "label" &
.data$var_type %in% c("categorical", "dichotomous") ~ NA_integer_,
TRUE ~ .data$stat_nevent
)
) %>%
modify_table_body(
dplyr::relocate,
.data$stat_nevent,
.before = .data$estimate
) %>%
modify_header(stat_nevent ~ "**Event N**")
}
#' @export
#' @rdname add_nevent_regression
add_nevent.tbl_uvregression <- add_nevent.tbl_regression
#' Add column with number of observed events
#'
#' \lifecycle{experimental}
#' For each `survfit()` object summarized with `tbl_survfit()` this function
#' will add the total number of events observed in a new column.
#'
#' @param x object of class 'tbl_survfit'
#' @param ... Not used
#' @export
#' @family tbl_survfit tools
#' @examples
#' library(survival)
#' fit1 <- survfit(Surv(ttdeath, death) ~ 1, trial)
#' fit2 <- survfit(Surv(ttdeath, death) ~ trt, trial)
#'
#' # Example 1 ----------------------------------
#' add_nevent.tbl_survfit_ex1 <-
#' list(fit1, fit2) %>%
#' tbl_survfit(times = c(12, 24)) %>%
#' add_n() %>%
#' add_nevent()
#' @section Example Output:
#' \if{html}{Example 1}
#'
#' \if{html}{\figure{add_nevent.tbl_survfit_ex1.png}{options: width=64\%}}
add_nevent.tbl_survfit <- function(x, ...) {
# checking survfit is a standard (not multi-state)
if (!purrr::every(x$meta_data$survfit, ~identical(class(.x), "survfit"))) {
paste("Each of the `survfit()` objects must have class 'survfit' only.",
"Multi-state models are not supported by this function.") %>%
stringr::str_wrap() %>%
stop(call. = FALSE)
}
# calculating event N --------------------------------------------------------
x$table_body <-
purrr::map2_dfr(
x$meta_data$survfit, x$meta_data$variable,
~ tibble(
nevent = broom::tidy(.x) %>% pull(.data$n.event) %>% sum(),
variable = .y,
row_type = "label"
)
) %>%
{left_join(
x$table_body, .,
by = c("variable", "row_type")
)} %>%
select(any_of(c("variable", "row_type", "label", "N", "nevent")), everything())
# adding N to table_styling and assigning header label -----------------------
x <-
modify_table_styling(
x,
columns = "nevent",
label = "**Event N**",
fmt_fun = style_number,
hide = FALSE
)
# adding indicator to output that add_n was run on this data
x$call_list <- c(x$call_list, list(add_nevent = match.call()))
x
}
| /R/add_nevent.R | permissive | mtysar/gtsummary | R | false | false | 5,312 | r | #' Add number of events to a regression table
#'
#' Adds a column of the number of events to tables created with
#' [tbl_regression] or [tbl_uvregression]. Supported
#' model types are among GLMs with binomial distribution family (e.g.
#' [stats::glm], `lme4::glmer`, and
#' `geepack::geeglm`) and Cox
#' Proportion Hazards regression models ([survival::coxph]).
#'
#' @param x `tbl_regression` or `tbl_uvregression` object
#' @param ... Additional arguments passed to or from other methods.
#' @export
#' @author Daniel D. Sjoberg
#' @seealso [add_nevent.tbl_regression], [add_nevent.tbl_uvregression],
#' [add_nevent.tbl_survfit]
add_nevent <- function(x, ...) UseMethod("add_nevent")
#' Add event N to regression table
#'
#' @inheritParams add_n_regression
#' @name add_nevent_regression
#'
#' @examples
#' # Example 1 ----------------------------------
#' add_nevent.tbl_regression_ex1 <-
#' trial %>%
#' select(response, trt, grade) %>%
#' tbl_uvregression(
#' y = response,
#' method = glm,
#' method.args = list(family = binomial),
#' ) %>%
#' add_nevent()
#
#' # Example 2 ----------------------------------
#' add_nevent.tbl_regression_ex2 <-
#' glm(response ~ age + grade, trial, family = binomial) %>%
#' tbl_regression(exponentiate = TRUE) %>%
#' add_nevent(location = "level")
#' @section Example Output:
#' \if{html}{Example 1}
#'
#' \if{html}{\figure{add_nevent.tbl_regression_ex1.png}{options: width=64\%}}
#'
#' \if{html}{Example 2}
#'
#' \if{html}{\figure{add_nevent.tbl_regression_ex2.png}{options: width=64\%}}
NULL
#' @rdname add_nevent_regression
#' @export
add_nevent.tbl_regression <- function(x, location = NULL, ...) {
location <- match.arg(location, choices = c("label", "level"), several.ok = TRUE)
if ("level" %in% location && !"n_event" %in% x$table_styling$header$column)
abort("Reporting event N on level rows is not available for this model type.")
if ("label" %in% location && !"N_event" %in% x$table_styling$header$column)
abort("Reporting event N on label rows is not available for this model type.")
x$table_body$stat_nevent <- NA_integer_
if ("N_event" %in% names(x$table_body))
x$table_body$stat_nevent <- ifelse(x$table_body$row_type == "label",
x$table_body$N_event %>% as.integer(),
x$table_body$stat_nevent)
if ("n_event" %in% names(x$table_body))
x$table_body$stat_nevent <- ifelse(x$table_body$row_type == "level",
x$table_body$n_event %>% as.integer(),
x$table_body$stat_nevent)
x %>%
modify_table_body(
mutate,
stat_nevent =
case_when(
!"level" %in% .env$location & .data$row_type %in% "level" ~ NA_integer_,
!"label" %in% .env$location & .data$row_type %in% "label" &
.data$var_type %in% c("categorical", "dichotomous") ~ NA_integer_,
TRUE ~ .data$stat_nevent
)
) %>%
modify_table_body(
dplyr::relocate,
.data$stat_nevent,
.before = .data$estimate
) %>%
modify_header(stat_nevent ~ "**Event N**")
}
#' @export
#' @rdname add_nevent_regression
add_nevent.tbl_uvregression <- add_nevent.tbl_regression
#' Add column with number of observed events
#'
#' \lifecycle{experimental}
#' For each `survfit()` object summarized with `tbl_survfit()` this function
#' will add the total number of events observed in a new column.
#'
#' @param x object of class 'tbl_survfit'
#' @param ... Not used
#' @export
#' @family tbl_survfit tools
#' @examples
#' library(survival)
#' fit1 <- survfit(Surv(ttdeath, death) ~ 1, trial)
#' fit2 <- survfit(Surv(ttdeath, death) ~ trt, trial)
#'
#' # Example 1 ----------------------------------
#' add_nevent.tbl_survfit_ex1 <-
#' list(fit1, fit2) %>%
#' tbl_survfit(times = c(12, 24)) %>%
#' add_n() %>%
#' add_nevent()
#' @section Example Output:
#' \if{html}{Example 1}
#'
#' \if{html}{\figure{add_nevent.tbl_survfit_ex1.png}{options: width=64\%}}
add_nevent.tbl_survfit <- function(x, ...) {
# checking survfit is a standard (not multi-state)
if (!purrr::every(x$meta_data$survfit, ~identical(class(.x), "survfit"))) {
paste("Each of the `survfit()` objects must have class 'survfit' only.",
"Multi-state models are not supported by this function.") %>%
stringr::str_wrap() %>%
stop(call. = FALSE)
}
# calculating event N --------------------------------------------------------
x$table_body <-
purrr::map2_dfr(
x$meta_data$survfit, x$meta_data$variable,
~ tibble(
nevent = broom::tidy(.x) %>% pull(.data$n.event) %>% sum(),
variable = .y,
row_type = "label"
)
) %>%
{left_join(
x$table_body, .,
by = c("variable", "row_type")
)} %>%
select(any_of(c("variable", "row_type", "label", "N", "nevent")), everything())
# adding N to table_styling and assigning header label -----------------------
x <-
modify_table_styling(
x,
columns = "nevent",
label = "**Event N**",
fmt_fun = style_number,
hide = FALSE
)
# adding indicator to output that add_n was run on this data
x$call_list <- c(x$call_list, list(add_nevent = match.call()))
x
}
|
#### Sample RQTL2 Analysis: Arabidopsis recombinant inbred lines (RIL) ####
################################################################################
# qtl2 mapping example.
# Moore et al. (2013) Genetics 195:1077-1086
# Anji Trujillo
# etrujillo2@wisc.edu
# July 12, 2017
################################################################################
##############################
# Load and install packages. #
##############################
install.packages("qtl2", repos="http://rqtl.org/qtl2cran/bin/windows/contrib/3.4/") #install R/qtl2 via mini-CRAN at rqtl.org
install_github("rqtl/qtl2")
install.packages("qtl2", repos="http://rqtl.org/qtl2cran")
options(stringsAsFactors = F)
library(devtools)
library(qtl2) # Loads qtl2geno, qtl2scan & qtl2plot.
library(qtl2convert)
library(RSQLite)
library(dplyr)
library(qtl)
#####################
# Load in the data. #
#####################
# Data are in qtl2geno/extdata/grav2.zip
grav2 <- read_cross2( system.file("extdata", "grav2.zip", package="qtl2geno") )
file <- paste0("https://raw.githubusercontent.com/rqtl/",
"qtl2data/master/DOex/DOex.zip")
DOex <- read_cross2(file)
####################################
# Calculate genotype probabilities #
####################################
# First task in QTL analysis is to calculate conditional genotype probabilities
# given observed marker data, at each putative QTL position.
# Use calc_genoprob() in glt2geno package.
# Result is returned as a list of 3-D arrays (one per chromosome)
iron <- read_cross2( system.file("extdata", "iron.zip", package="qtl2geno") )
str(iron) #with chromosome and marker
map <- insert_pseudomarkers(iron$gmap, step=1) # insert psuedomarkers between markers
pr <- calc_genoprob(iron, map, err=0.002) #calculate QTL genotype probabilites at each marker and psuedomarker
pr <- calc_genoprob(DOex, error_prob=0.002) # calculate genotype probabilities for DO
apr <- genoprob_to_alleleprob(pr) # convert to allele probabilities
############################
# Calculate kinship matrix #
############################
# By default genotype probabilites are converted to allel probabilities
# kinship matrix calculates the portion of shared allels
# To eliminate the effect of varying marker density accross the genome only use probabilites
# along the grid of psudedomarker (defined by the step argument in insert_psuedomarkers())
kinship <- calc_kinship(pr, use_allele_probs = FALSE, omit_x = TRUE)
grid <- calc_grid(iron$gmap, step=1) # determine the grid of pseudomarkers
pr_grid <- probs_to_grid(pr, grid) # determine probabilities for positions that are not on the grid
kinship_grid <- calc_kinship(pr_grid)
kinship_loco <- calc_kinship(pr, "loco") # for linearl mixed model genome scan
kinship_loco[[1]]
k <- calc_kinship(apr, "loco") # calculate kinship for for DO
##################################
# Covariates for the X chromosome#
##################################
Xcovar <- get_x_covar(iron)
sex <- (DOex$covar$Sex == "male")*1
names(sex) <- rownames(DOex$covar) # include individual IDs as names
#########
# Scan1 #
#########
out <- scan1(pr, iron$pheno, Xcovar=Xcovar)
out <- scan1(apr, DOex$pheno, k, sex)
#################
# Plot the data #
#################
par(mar=c(4.1, 4.1, 0.6, 0.6))
plot(out, DOex$gmap)
DOex$gmap
plot(sug) #
sug <- calc.genoprob(sug, step = 1) # insert the QTL genotype probabilites along with the step (density of mapping units)
out.em <- scanone(sug) # performs a single-QTL genome scan
summary(out.em, threshold = 3) # return chromosomes with LOD scores greater than 3
plot(out.em) # plots LOD curves
out.hk <- scanone(sug, method = "hk") #genome scan via Haley Knott regression
plot(out.em, out.hk, col = c("blue", "red")) # plot out.hk
plot(out.hk - out.em, ylim = c(-0.3, 0.3), ylab = "LOD(HK) - LOD(EM)") # plot difference between two genome scans (hk - single)
sug <- sim.geno(sug, step = 1, n.draws = 64) # perform a genome scan by multiple imputations using sim.geno function, ex. 64 imputations
out.imp <- scanone(sug, method = "imp") #
plot(out.em, out.hk, out.imp, col = c("blue", "red", "green")) # plot the three curves
plot(out.em, out.hk, out.imp, col = c("blue", "red", "green"), chr = c(7,15)) # plot the three curves for chromosomes 7 and 15
plot(out.imp - out.em , out.hk - out.em, col = c("blue", "red", "green"), ylim = c(-1,1)) # plot difference between genome scans
operm <- scanone(sug, method = "hk", n.perm = 1000) #
plot(operm) #1000 genome wide scans with a maximum LOD Scores
summary(operm, perms = operm, alpha = 0.2)
summary(operm)
| /Sample_RQTL2_Analysis_ArabidopsisRIL.R | no_license | anjitrue/DiversityOutcross | R | false | false | 4,720 | r | #### Sample RQTL2 Analysis: Arabidopsis recombinant inbred lines (RIL) ####
################################################################################
# qtl2 mapping example.
# Moore et al. (2013) Genetics 195:1077-1086
# Anji Trujillo
# etrujillo2@wisc.edu
# July 12, 2017
################################################################################
##############################
# Load and install packages. #
##############################
install.packages("qtl2", repos="http://rqtl.org/qtl2cran/bin/windows/contrib/3.4/") #install R/qtl2 via mini-CRAN at rqtl.org
install_github("rqtl/qtl2")
install.packages("qtl2", repos="http://rqtl.org/qtl2cran")
options(stringsAsFactors = F)
library(devtools)
library(qtl2) # Loads qtl2geno, qtl2scan & qtl2plot.
library(qtl2convert)
library(RSQLite)
library(dplyr)
library(qtl)
#####################
# Load in the data. #
#####################
# Data are in qtl2geno/extdata/grav2.zip
grav2 <- read_cross2( system.file("extdata", "grav2.zip", package="qtl2geno") )
file <- paste0("https://raw.githubusercontent.com/rqtl/",
"qtl2data/master/DOex/DOex.zip")
DOex <- read_cross2(file)
####################################
# Calculate genotype probabilities #
####################################
# First task in QTL analysis is to calculate conditional genotype probabilities
# given observed marker data, at each putative QTL position.
# Use calc_genoprob() in glt2geno package.
# Result is returned as a list of 3-D arrays (one per chromosome)
iron <- read_cross2( system.file("extdata", "iron.zip", package="qtl2geno") )
str(iron) #with chromosome and marker
map <- insert_pseudomarkers(iron$gmap, step=1) # insert psuedomarkers between markers
pr <- calc_genoprob(iron, map, err=0.002) #calculate QTL genotype probabilites at each marker and psuedomarker
pr <- calc_genoprob(DOex, error_prob=0.002) # calculate genotype probabilities for DO
apr <- genoprob_to_alleleprob(pr) # convert to allele probabilities
############################
# Calculate kinship matrix #
############################
# By default genotype probabilites are converted to allel probabilities
# kinship matrix calculates the portion of shared allels
# To eliminate the effect of varying marker density accross the genome only use probabilites
# along the grid of psudedomarker (defined by the step argument in insert_psuedomarkers())
kinship <- calc_kinship(pr, use_allele_probs = FALSE, omit_x = TRUE)
grid <- calc_grid(iron$gmap, step=1) # determine the grid of pseudomarkers
pr_grid <- probs_to_grid(pr, grid) # determine probabilities for positions that are not on the grid
kinship_grid <- calc_kinship(pr_grid)
kinship_loco <- calc_kinship(pr, "loco") # for linearl mixed model genome scan
kinship_loco[[1]]
k <- calc_kinship(apr, "loco") # calculate kinship for for DO
##################################
# Covariates for the X chromosome#
##################################
Xcovar <- get_x_covar(iron)
sex <- (DOex$covar$Sex == "male")*1
names(sex) <- rownames(DOex$covar) # include individual IDs as names
#########
# Scan1 #
#########
out <- scan1(pr, iron$pheno, Xcovar=Xcovar)
out <- scan1(apr, DOex$pheno, k, sex)
#################
# Plot the data #
#################
par(mar=c(4.1, 4.1, 0.6, 0.6))
plot(out, DOex$gmap)
DOex$gmap
plot(sug) #
sug <- calc.genoprob(sug, step = 1) # insert the QTL genotype probabilites along with the step (density of mapping units)
out.em <- scanone(sug) # performs a single-QTL genome scan
summary(out.em, threshold = 3) # return chromosomes with LOD scores greater than 3
plot(out.em) # plots LOD curves
out.hk <- scanone(sug, method = "hk") #genome scan via Haley Knott regression
plot(out.em, out.hk, col = c("blue", "red")) # plot out.hk
plot(out.hk - out.em, ylim = c(-0.3, 0.3), ylab = "LOD(HK) - LOD(EM)") # plot difference between two genome scans (hk - single)
sug <- sim.geno(sug, step = 1, n.draws = 64) # perform a genome scan by multiple imputations using sim.geno function, ex. 64 imputations
out.imp <- scanone(sug, method = "imp") #
plot(out.em, out.hk, out.imp, col = c("blue", "red", "green")) # plot the three curves
plot(out.em, out.hk, out.imp, col = c("blue", "red", "green"), chr = c(7,15)) # plot the three curves for chromosomes 7 and 15
plot(out.imp - out.em , out.hk - out.em, col = c("blue", "red", "green"), ylim = c(-1,1)) # plot difference between genome scans
operm <- scanone(sug, method = "hk", n.perm = 1000) #
plot(operm) #1000 genome wide scans with a maximum LOD Scores
summary(operm, perms = operm, alpha = 0.2)
summary(operm)
|
\name{sim.nmat}
\alias{sim.nmat}
\docType{data}
\title{Matrix of neighbours}
\description{
Matrix containing neighbors of each region and number of neighbours of each region.
}
\usage{data(sim.nmat)}
\format{
A data frame with 100 observations. Matrix of neighbours, number of neighbours in last column, number of region in first column.
%\describe{
% \item{\code{V1}}{a numeric vector}
% \item{\code{V2}}{a numeric vector}
% \item{\code{V3}}{a numeric vector}
% \item{\code{V4}}{a numeric vector}
% \item{\code{V5}}{a numeric vector}
% \item{\code{V6}}{a numeric vector}
%}
}
%\details{
% ~~ If necessary, more details than the __description__ above ~~
%}
%\source{
% ~~ reference to a publication or URL from which the data were obtained ~~
%}
%\references{
% ~~ possibly secondary sources and usages ~~
%}
\examples{
data(sim.nmat)
## maybe str(sim.nmat) ; plot(sim.nmat) ...
}
\keyword{datasets}
| /man/sim.nmat.Rd | no_license | cran/spatcounts | R | false | false | 927 | rd | \name{sim.nmat}
\alias{sim.nmat}
\docType{data}
\title{Matrix of neighbours}
\description{
Matrix containing neighbors of each region and number of neighbours of each region.
}
\usage{data(sim.nmat)}
\format{
A data frame with 100 observations. Matrix of neighbours, number of neighbours in last column, number of region in first column.
%\describe{
% \item{\code{V1}}{a numeric vector}
% \item{\code{V2}}{a numeric vector}
% \item{\code{V3}}{a numeric vector}
% \item{\code{V4}}{a numeric vector}
% \item{\code{V5}}{a numeric vector}
% \item{\code{V6}}{a numeric vector}
%}
}
%\details{
% ~~ If necessary, more details than the __description__ above ~~
%}
%\source{
% ~~ reference to a publication or URL from which the data were obtained ~~
%}
%\references{
% ~~ possibly secondary sources and usages ~~
%}
\examples{
data(sim.nmat)
## maybe str(sim.nmat) ; plot(sim.nmat) ...
}
\keyword{datasets}
|
library(vegtable)
### Name: layers2samples
### Title: Add information from slot 'layers' into slot 'samples'.
### Aliases: layers2samples
### layers2samples,vegtable,character,character-method
### layers2samples,vegtable,character,missing-method
### ** Examples
## No example available for this function.
| /data/genthat_extracted_code/vegtable/examples/layers2samples.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 316 | r | library(vegtable)
### Name: layers2samples
### Title: Add information from slot 'layers' into slot 'samples'.
### Aliases: layers2samples
### layers2samples,vegtable,character,character-method
### layers2samples,vegtable,character,missing-method
### ** Examples
## No example available for this function.
|
# The function "makeCacheMatrix" creates a list contaning the following elements:
# 1) Set the matrix
# 2) Get the matrix
# 3) Set the inverse of the matrix
# 4) Get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
B <- NULL
set <- function(y) {
x <<- y
B <<- NULL
}
get <- function() x
setinverse <- function(inverse) B <<- inverse
getinverse <- function() B
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# The function "cacheSolve" gets the inverse of the matrix from the cache
# and skips the calculation if it has been already done. If not, the inverse
# is calculated via the function "solve" and the result is store in the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
B <- x$getinverse()
if(!is.null(B)) {
message("getting cached data")
return(B)
}
data <- x$get()
B <- solve(data, ...)
x$setinverse(B)
B
}
| /cachematrix.R | no_license | gutidaniel/ProgrammingAssignment2 | R | false | false | 980 | r |
# The function "makeCacheMatrix" creates a list contaning the following elements:
# 1) Set the matrix
# 2) Get the matrix
# 3) Set the inverse of the matrix
# 4) Get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
B <- NULL
set <- function(y) {
x <<- y
B <<- NULL
}
get <- function() x
setinverse <- function(inverse) B <<- inverse
getinverse <- function() B
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# The function "cacheSolve" gets the inverse of the matrix from the cache
# and skips the calculation if it has been already done. If not, the inverse
# is calculated via the function "solve" and the result is store in the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
B <- x$getinverse()
if(!is.null(B)) {
message("getting cached data")
return(B)
}
data <- x$get()
B <- solve(data, ...)
x$setinverse(B)
B
}
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
.sparkREnv <- new.env()
# Utility function that returns TRUE if we have an active connection to the
# backend and FALSE otherwise
connExists <- function(env) {
tryCatch({
exists(".sparkRCon", envir = env) && isOpen(env[[".sparkRCon"]])
},
error = function(err) {
return(FALSE)
})
}
#' Stop the Spark Session and Spark Context
#'
#' Stop the Spark Session and Spark Context.
#'
#' Also terminates the backend this R session is connected to.
#' @rdname sparkR.session.stop
#' @name sparkR.session.stop
#' @export
#' @note sparkR.session.stop since 2.0.0
sparkR.session.stop <- function() {
env <- .sparkREnv
if (exists(".sparkRCon", envir = env)) {
if (exists(".sparkRjsc", envir = env)) {
sc <- get(".sparkRjsc", envir = env)
callJMethod(sc, "stop")
rm(".sparkRjsc", envir = env)
if (exists(".sparkRsession", envir = env)) {
rm(".sparkRsession", envir = env)
}
}
# Remove the R package lib path from .libPaths()
if (exists(".libPath", envir = env)) {
libPath <- get(".libPath", envir = env)
.libPaths(.libPaths()[.libPaths() != libPath])
}
if (exists(".backendLaunched", envir = env)) {
callJStatic("SparkRHandler", "stopBackend")
}
# Also close the connection and remove it from our env
conn <- get(".sparkRCon", envir = env)
close(conn)
rm(".sparkRCon", envir = env)
rm(".scStartTime", envir = env)
}
if (exists(".monitorConn", envir = env)) {
conn <- get(".monitorConn", envir = env)
close(conn)
rm(".monitorConn", envir = env)
}
# Clear all broadcast variables we have
# as the jobj will not be valid if we restart the JVM
clearBroadcastVariables()
# Clear jobj maps
clearJobjs()
}
#' @rdname sparkR.session.stop
#' @name sparkR.stop
#' @export
#' @note sparkR.stop since 1.4.0
sparkR.stop <- function() {
sparkR.session.stop()
}
#' (Deprecated) Initialize a new Spark Context
#'
#' This function initializes a new SparkContext.
#'
#' @param master The Spark master URL
#' @param appName Application name to register with cluster manager
#' @param sparkHome Spark Home directory
#' @param sparkEnvir Named list of environment variables to set on worker nodes
#' @param sparkExecutorEnv Named list of environment variables to be used when launching executors
#' @param sparkJars Character vector of jar files to pass to the worker nodes
#' @param sparkPackages Character vector of package coordinates
#' @seealso \link{sparkR.session}
#' @rdname sparkR.init-deprecated
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init("local[2]", "SparkR", "/home/spark")
#' sc <- sparkR.init("local[2]", "SparkR", "/home/spark",
#' list(spark.executor.memory="1g"))
#' sc <- sparkR.init("yarn-client", "SparkR", "/home/spark",
#' list(spark.executor.memory="4g"),
#' list(LD_LIBRARY_PATH="/directory of JVM libraries (libjvm.so) on workers/"),
#' c("one.jar", "two.jar", "three.jar"),
#' c("com.databricks:spark-avro_2.10:2.0.1"))
#'}
#' @note sparkR.init since 1.4.0
sparkR.init <- function(
master = "",
appName = "SparkR",
sparkHome = Sys.getenv("SPARK_HOME"),
sparkEnvir = list(),
sparkExecutorEnv = list(),
sparkJars = "",
sparkPackages = "") {
.Deprecated("sparkR.session")
sparkR.sparkContext(master,
appName,
sparkHome,
convertNamedListToEnv(sparkEnvir),
convertNamedListToEnv(sparkExecutorEnv),
sparkJars,
sparkPackages)
}
# Internal function to handle creating the SparkContext.
sparkR.sparkContext <- function(
master = "",
appName = "SparkR",
sparkHome = Sys.getenv("SPARK_HOME"),
sparkEnvirMap = new.env(),
sparkExecutorEnvMap = new.env(),
sparkJars = "",
sparkPackages = "") {
if (exists(".sparkRjsc", envir = .sparkREnv)) {
cat(paste("Re-using existing Spark Context.",
"Call sparkR.session.stop() or restart R to create a new Spark Context\n"))
return(get(".sparkRjsc", envir = .sparkREnv))
}
jars <- processSparkJars(sparkJars)
packages <- processSparkPackages(sparkPackages)
existingPort <- Sys.getenv("EXISTING_SPARKR_BACKEND_PORT", "")
connectionTimeout <- as.numeric(Sys.getenv("SPARKR_BACKEND_CONNECTION_TIMEOUT", "6000"))
if (existingPort != "") {
if (length(packages) != 0) {
warning(paste("sparkPackages has no effect when using spark-submit or sparkR shell",
" please use the --packages commandline instead", sep = ","))
}
backendPort <- existingPort
} else {
path <- tempfile(pattern = "backend_port")
submitOps <- getClientModeSparkSubmitOpts(
Sys.getenv("SPARKR_SUBMIT_ARGS", "sparkr-shell"),
sparkEnvirMap)
launchBackend(
args = path,
sparkHome = sparkHome,
jars = jars,
sparkSubmitOpts = submitOps,
packages = packages)
# wait atmost 100 seconds for JVM to launch
wait <- 0.1
for (i in 1:25) {
Sys.sleep(wait)
if (file.exists(path)) {
break
}
wait <- wait * 1.25
}
if (!file.exists(path)) {
stop("JVM is not ready after 10 seconds")
}
f <- file(path, open = "rb")
backendPort <- readInt(f)
monitorPort <- readInt(f)
rLibPath <- readString(f)
connectionTimeout <- readInt(f)
close(f)
file.remove(path)
if (length(backendPort) == 0 || backendPort == 0 ||
length(monitorPort) == 0 || monitorPort == 0 ||
length(rLibPath) != 1) {
stop("JVM failed to launch")
}
assign(".monitorConn",
socketConnection(port = monitorPort, timeout = connectionTimeout),
envir = .sparkREnv)
assign(".backendLaunched", 1, envir = .sparkREnv)
if (rLibPath != "") {
assign(".libPath", rLibPath, envir = .sparkREnv)
.libPaths(c(rLibPath, .libPaths()))
}
}
.sparkREnv$backendPort <- backendPort
tryCatch({
connectBackend("localhost", backendPort, timeout = connectionTimeout)
},
error = function(err) {
stop("Failed to connect JVM\n")
})
if (nchar(sparkHome) != 0) {
sparkHome <- suppressWarnings(normalizePath(sparkHome))
}
if (is.null(sparkExecutorEnvMap$LD_LIBRARY_PATH)) {
sparkExecutorEnvMap[["LD_LIBRARY_PATH"]] <-
paste0("$LD_LIBRARY_PATH:", Sys.getenv("LD_LIBRARY_PATH"))
}
# Classpath separator is ";" on Windows
# URI needs four /// as from http://stackoverflow.com/a/18522792
if (.Platform$OS.type == "unix") {
uriSep <- "//"
} else {
uriSep <- "////"
}
localJarPaths <- lapply(jars,
function(j) { utils::URLencode(paste("file:", uriSep, j, sep = "")) })
# Set the start time to identify jobjs
# Seconds resolution is good enough for this purpose, so use ints
assign(".scStartTime", as.integer(Sys.time()), envir = .sparkREnv)
assign(
".sparkRjsc",
callJStatic(
"org.apache.spark.api.r.RRDD",
"createSparkContext",
master,
appName,
as.character(sparkHome),
localJarPaths,
sparkEnvirMap,
sparkExecutorEnvMap),
envir = .sparkREnv
)
sc <- get(".sparkRjsc", envir = .sparkREnv)
# Register a finalizer to sleep 1 seconds on R exit to make RStudio happy
reg.finalizer(.sparkREnv, function(x) { Sys.sleep(1) }, onexit = TRUE)
sc
}
#' (Deprecated) Initialize a new SQLContext
#'
#' This function creates a SparkContext from an existing JavaSparkContext and
#' then uses it to initialize a new SQLContext
#'
#' Starting SparkR 2.0, a SparkSession is initialized and returned instead.
#' This API is deprecated and kept for backward compatibility only.
#'
#' @param jsc The existing JavaSparkContext created with SparkR.init()
#' @seealso \link{sparkR.session}
#' @rdname sparkRSQL.init-deprecated
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#'}
#' @note sparkRSQL.init since 1.4.0
sparkRSQL.init <- function(jsc = NULL) {
.Deprecated("sparkR.session")
if (exists(".sparkRsession", envir = .sparkREnv)) {
return(get(".sparkRsession", envir = .sparkREnv))
}
# Default to without Hive support for backward compatibility.
sparkR.session(enableHiveSupport = FALSE)
}
#' (Deprecated) Initialize a new HiveContext
#'
#' This function creates a HiveContext from an existing JavaSparkContext
#'
#' Starting SparkR 2.0, a SparkSession is initialized and returned instead.
#' This API is deprecated and kept for backward compatibility only.
#'
#' @param jsc The existing JavaSparkContext created with SparkR.init()
#' @seealso \link{sparkR.session}
#' @rdname sparkRHive.init-deprecated
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRHive.init(sc)
#'}
#' @note sparkRHive.init since 1.4.0
sparkRHive.init <- function(jsc = NULL) {
.Deprecated("sparkR.session")
if (exists(".sparkRsession", envir = .sparkREnv)) {
return(get(".sparkRsession", envir = .sparkREnv))
}
# Default to without Hive support for backward compatibility.
sparkR.session(enableHiveSupport = TRUE)
}
#' Get the existing SparkSession or initialize a new SparkSession.
#'
#' SparkSession is the entry point into SparkR. \code{sparkR.session} gets the existing
#' SparkSession or initializes a new SparkSession.
#' Additional Spark properties can be set in \code{...}, and these named parameters take priority
#' over values in \code{master}, \code{appName}, named lists of \code{sparkConfig}.
#' When called in an interactive session, this checks for the Spark installation, and, if not
#' found, it will be downloaded and cached automatically. Alternatively, \code{install.spark} can
#' be called manually.
#'
#' For details on how to initialize and use SparkR, refer to SparkR programming guide at
#' \url{http://spark.apache.org/docs/latest/sparkr.html#starting-up-sparksession}.
#'
#' @param master the Spark master URL.
#' @param appName application name to register with cluster manager.
#' @param sparkHome Spark Home directory.
#' @param sparkConfig named list of Spark configuration to set on worker nodes.
#' @param sparkJars character vector of jar files to pass to the worker nodes.
#' @param sparkPackages character vector of package coordinates
#' @param enableHiveSupport enable support for Hive, fallback if not built with Hive support; once
#' set, this cannot be turned off on an existing session
#' @param ... named Spark properties passed to the method.
#' @export
#' @examples
#'\dontrun{
#' sparkR.session()
#' df <- read.json(path)
#'
#' sparkR.session("local[2]", "SparkR", "/home/spark")
#' sparkR.session("yarn-client", "SparkR", "/home/spark",
#' list(spark.executor.memory="4g"),
#' c("one.jar", "two.jar", "three.jar"),
#' c("com.databricks:spark-avro_2.10:2.0.1"))
#' sparkR.session(spark.master = "yarn-client", spark.executor.memory = "4g")
#'}
#' @note sparkR.session since 2.0.0
sparkR.session <- function(
master = "",
appName = "SparkR",
sparkHome = Sys.getenv("SPARK_HOME"),
sparkConfig = list(),
sparkJars = "",
sparkPackages = "",
enableHiveSupport = TRUE,
...) {
sparkConfigMap <- convertNamedListToEnv(sparkConfig)
namedParams <- list(...)
if (length(namedParams) > 0) {
paramMap <- convertNamedListToEnv(namedParams)
# Override for certain named parameters
if (exists("spark.master", envir = paramMap)) {
master <- paramMap[["spark.master"]]
}
if (exists("spark.app.name", envir = paramMap)) {
appName <- paramMap[["spark.app.name"]]
}
overrideEnvs(sparkConfigMap, paramMap)
}
deployMode <- ""
if (exists("spark.submit.deployMode", envir = sparkConfigMap)) {
deployMode <- sparkConfigMap[["spark.submit.deployMode"]]
}
if (!exists(".sparkRjsc", envir = .sparkREnv)) {
retHome <- sparkCheckInstall(sparkHome, master, deployMode)
if (!is.null(retHome)) sparkHome <- retHome
sparkExecutorEnvMap <- new.env()
sparkR.sparkContext(master, appName, sparkHome, sparkConfigMap, sparkExecutorEnvMap,
sparkJars, sparkPackages)
stopifnot(exists(".sparkRjsc", envir = .sparkREnv))
}
if (exists(".sparkRsession", envir = .sparkREnv)) {
sparkSession <- get(".sparkRsession", envir = .sparkREnv)
# Apply config to Spark Context and Spark Session if already there
# Cannot change enableHiveSupport
callJStatic("org.apache.spark.sql.api.r.SQLUtils",
"setSparkContextSessionConf",
sparkSession,
sparkConfigMap)
} else {
jsc <- get(".sparkRjsc", envir = .sparkREnv)
sparkSession <- callJStatic("org.apache.spark.sql.api.r.SQLUtils",
"getOrCreateSparkSession",
jsc,
sparkConfigMap,
enableHiveSupport)
assign(".sparkRsession", sparkSession, envir = .sparkREnv)
}
sparkSession
}
#' Assigns a group ID to all the jobs started by this thread until the group ID is set to a
#' different value or cleared.
#'
#' @param groupId the ID to be assigned to job groups.
#' @param description description for the job group ID.
#' @param interruptOnCancel flag to indicate if the job is interrupted on job cancellation.
#' @rdname setJobGroup
#' @name setJobGroup
#' @examples
#'\dontrun{
#' sparkR.session()
#' setJobGroup("myJobGroup", "My job group description", TRUE)
#'}
#' @note setJobGroup since 1.5.0
#' @method setJobGroup default
setJobGroup.default <- function(groupId, description, interruptOnCancel) {
sc <- getSparkContext()
invisible(callJMethod(sc, "setJobGroup", groupId, description, interruptOnCancel))
}
setJobGroup <- function(sc, groupId, description, interruptOnCancel) {
if (class(sc) == "jobj" && any(grepl("JavaSparkContext", getClassName.jobj(sc)))) {
.Deprecated("setJobGroup(groupId, description, interruptOnCancel)",
old = "setJobGroup(sc, groupId, description, interruptOnCancel)")
setJobGroup.default(groupId, description, interruptOnCancel)
} else {
# Parameter order is shifted
groupIdToUse <- sc
descriptionToUse <- groupId
interruptOnCancelToUse <- description
setJobGroup.default(groupIdToUse, descriptionToUse, interruptOnCancelToUse)
}
}
#' Clear current job group ID and its description
#'
#' @rdname clearJobGroup
#' @name clearJobGroup
#' @examples
#'\dontrun{
#' sparkR.session()
#' clearJobGroup()
#'}
#' @note clearJobGroup since 1.5.0
#' @method clearJobGroup default
clearJobGroup.default <- function() {
sc <- getSparkContext()
invisible(callJMethod(sc, "clearJobGroup"))
}
clearJobGroup <- function(sc) {
if (!missing(sc) &&
class(sc) == "jobj" &&
any(grepl("JavaSparkContext", getClassName.jobj(sc)))) {
.Deprecated("clearJobGroup()", old = "clearJobGroup(sc)")
}
clearJobGroup.default()
}
#' Cancel active jobs for the specified group
#'
#' @param groupId the ID of job group to be cancelled
#' @rdname cancelJobGroup
#' @name cancelJobGroup
#' @examples
#'\dontrun{
#' sparkR.session()
#' cancelJobGroup("myJobGroup")
#'}
#' @note cancelJobGroup since 1.5.0
#' @method cancelJobGroup default
cancelJobGroup.default <- function(groupId) {
sc <- getSparkContext()
invisible(callJMethod(sc, "cancelJobGroup", groupId))
}
cancelJobGroup <- function(sc, groupId) {
if (class(sc) == "jobj" && any(grepl("JavaSparkContext", getClassName.jobj(sc)))) {
.Deprecated("cancelJobGroup(groupId)", old = "cancelJobGroup(sc, groupId)")
cancelJobGroup.default(groupId)
} else {
# Parameter order is shifted
groupIdToUse <- sc
cancelJobGroup.default(groupIdToUse)
}
}
sparkConfToSubmitOps <- new.env()
sparkConfToSubmitOps[["spark.driver.memory"]] <- "--driver-memory"
sparkConfToSubmitOps[["spark.driver.extraClassPath"]] <- "--driver-class-path"
sparkConfToSubmitOps[["spark.driver.extraJavaOptions"]] <- "--driver-java-options"
sparkConfToSubmitOps[["spark.driver.extraLibraryPath"]] <- "--driver-library-path"
sparkConfToSubmitOps[["spark.master"]] <- "--master"
sparkConfToSubmitOps[["spark.yarn.keytab"]] <- "--keytab"
sparkConfToSubmitOps[["spark.yarn.principal"]] <- "--principal"
# Utility function that returns Spark Submit arguments as a string
#
# A few Spark Application and Runtime environment properties cannot take effect after driver
# JVM has started, as documented in:
# http://spark.apache.org/docs/latest/configuration.html#application-properties
# When starting SparkR without using spark-submit, for example, from Rstudio, add them to
# spark-submit commandline if not already set in SPARKR_SUBMIT_ARGS so that they can be effective.
getClientModeSparkSubmitOpts <- function(submitOps, sparkEnvirMap) {
envirToOps <- lapply(ls(sparkConfToSubmitOps), function(conf) {
opsValue <- sparkEnvirMap[[conf]]
# process only if --option is not already specified
if (!is.null(opsValue) &&
nchar(opsValue) > 1 &&
!grepl(sparkConfToSubmitOps[[conf]], submitOps)) {
# put "" around value in case it has spaces
paste0(sparkConfToSubmitOps[[conf]], " \"", opsValue, "\" ")
} else {
""
}
})
# --option must be before the application class "sparkr-shell" in submitOps
paste0(paste0(envirToOps, collapse = ""), submitOps)
}
# Utility function that handles sparkJars argument, and normalize paths
processSparkJars <- function(jars) {
splittedJars <- splitString(jars)
if (length(splittedJars) > length(jars)) {
warning("sparkJars as a comma-separated string is deprecated, use character vector instead")
}
normalized <- suppressWarnings(normalizePath(splittedJars))
normalized
}
# Utility function that handles sparkPackages argument
processSparkPackages <- function(packages) {
splittedPackages <- splitString(packages)
if (length(splittedPackages) > length(packages)) {
warning("sparkPackages as a comma-separated string is deprecated, use character vector instead")
}
splittedPackages
}
# Utility function that checks and install Spark to local folder if not found
#
# Installation will not be triggered if it's called from sparkR shell
# or if the master url is not local
#
# @param sparkHome directory to find Spark package.
# @param master the Spark master URL, used to check local or remote mode.
# @param deployMode whether to deploy your driver on the worker nodes (cluster)
# or locally as an external client (client).
# @return NULL if no need to update sparkHome, and new sparkHome otherwise.
sparkCheckInstall <- function(sparkHome, master, deployMode) {
if (!isSparkRShell()) {
if (!is.na(file.info(sparkHome)$isdir)) {
msg <- paste0("Spark package found in SPARK_HOME: ", sparkHome)
message(msg)
NULL
} else {
if (interactive() || isMasterLocal(master)) {
msg <- paste0("Spark not found in SPARK_HOME: ", sparkHome)
message(msg)
packageLocalDir <- install.spark()
packageLocalDir
} else if (isClientMode(master) || deployMode == "client") {
msg <- paste0("Spark not found in SPARK_HOME: ",
sparkHome, "\n", installInstruction("remote"))
stop(msg)
} else {
NULL
}
}
} else {
NULL
}
}
| /R/pkg/R/sparkR.R | permissive | bloomberg/apache-spark-on-k8s | R | false | false | 20,055 | r | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
.sparkREnv <- new.env()
# Utility function that returns TRUE if we have an active connection to the
# backend and FALSE otherwise
connExists <- function(env) {
tryCatch({
exists(".sparkRCon", envir = env) && isOpen(env[[".sparkRCon"]])
},
error = function(err) {
return(FALSE)
})
}
#' Stop the Spark Session and Spark Context
#'
#' Stop the Spark Session and Spark Context.
#'
#' Also terminates the backend this R session is connected to.
#' @rdname sparkR.session.stop
#' @name sparkR.session.stop
#' @export
#' @note sparkR.session.stop since 2.0.0
sparkR.session.stop <- function() {
env <- .sparkREnv
if (exists(".sparkRCon", envir = env)) {
if (exists(".sparkRjsc", envir = env)) {
sc <- get(".sparkRjsc", envir = env)
callJMethod(sc, "stop")
rm(".sparkRjsc", envir = env)
if (exists(".sparkRsession", envir = env)) {
rm(".sparkRsession", envir = env)
}
}
# Remove the R package lib path from .libPaths()
if (exists(".libPath", envir = env)) {
libPath <- get(".libPath", envir = env)
.libPaths(.libPaths()[.libPaths() != libPath])
}
if (exists(".backendLaunched", envir = env)) {
callJStatic("SparkRHandler", "stopBackend")
}
# Also close the connection and remove it from our env
conn <- get(".sparkRCon", envir = env)
close(conn)
rm(".sparkRCon", envir = env)
rm(".scStartTime", envir = env)
}
if (exists(".monitorConn", envir = env)) {
conn <- get(".monitorConn", envir = env)
close(conn)
rm(".monitorConn", envir = env)
}
# Clear all broadcast variables we have
# as the jobj will not be valid if we restart the JVM
clearBroadcastVariables()
# Clear jobj maps
clearJobjs()
}
#' @rdname sparkR.session.stop
#' @name sparkR.stop
#' @export
#' @note sparkR.stop since 1.4.0
sparkR.stop <- function() {
sparkR.session.stop()
}
#' (Deprecated) Initialize a new Spark Context
#'
#' This function initializes a new SparkContext.
#'
#' @param master The Spark master URL
#' @param appName Application name to register with cluster manager
#' @param sparkHome Spark Home directory
#' @param sparkEnvir Named list of environment variables to set on worker nodes
#' @param sparkExecutorEnv Named list of environment variables to be used when launching executors
#' @param sparkJars Character vector of jar files to pass to the worker nodes
#' @param sparkPackages Character vector of package coordinates
#' @seealso \link{sparkR.session}
#' @rdname sparkR.init-deprecated
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init("local[2]", "SparkR", "/home/spark")
#' sc <- sparkR.init("local[2]", "SparkR", "/home/spark",
#' list(spark.executor.memory="1g"))
#' sc <- sparkR.init("yarn-client", "SparkR", "/home/spark",
#' list(spark.executor.memory="4g"),
#' list(LD_LIBRARY_PATH="/directory of JVM libraries (libjvm.so) on workers/"),
#' c("one.jar", "two.jar", "three.jar"),
#' c("com.databricks:spark-avro_2.10:2.0.1"))
#'}
#' @note sparkR.init since 1.4.0
sparkR.init <- function(
master = "",
appName = "SparkR",
sparkHome = Sys.getenv("SPARK_HOME"),
sparkEnvir = list(),
sparkExecutorEnv = list(),
sparkJars = "",
sparkPackages = "") {
.Deprecated("sparkR.session")
sparkR.sparkContext(master,
appName,
sparkHome,
convertNamedListToEnv(sparkEnvir),
convertNamedListToEnv(sparkExecutorEnv),
sparkJars,
sparkPackages)
}
# Internal function to handle creating the SparkContext.
sparkR.sparkContext <- function(
master = "",
appName = "SparkR",
sparkHome = Sys.getenv("SPARK_HOME"),
sparkEnvirMap = new.env(),
sparkExecutorEnvMap = new.env(),
sparkJars = "",
sparkPackages = "") {
if (exists(".sparkRjsc", envir = .sparkREnv)) {
cat(paste("Re-using existing Spark Context.",
"Call sparkR.session.stop() or restart R to create a new Spark Context\n"))
return(get(".sparkRjsc", envir = .sparkREnv))
}
jars <- processSparkJars(sparkJars)
packages <- processSparkPackages(sparkPackages)
existingPort <- Sys.getenv("EXISTING_SPARKR_BACKEND_PORT", "")
connectionTimeout <- as.numeric(Sys.getenv("SPARKR_BACKEND_CONNECTION_TIMEOUT", "6000"))
if (existingPort != "") {
if (length(packages) != 0) {
warning(paste("sparkPackages has no effect when using spark-submit or sparkR shell",
" please use the --packages commandline instead", sep = ","))
}
backendPort <- existingPort
} else {
path <- tempfile(pattern = "backend_port")
submitOps <- getClientModeSparkSubmitOpts(
Sys.getenv("SPARKR_SUBMIT_ARGS", "sparkr-shell"),
sparkEnvirMap)
launchBackend(
args = path,
sparkHome = sparkHome,
jars = jars,
sparkSubmitOpts = submitOps,
packages = packages)
# wait atmost 100 seconds for JVM to launch
wait <- 0.1
for (i in 1:25) {
Sys.sleep(wait)
if (file.exists(path)) {
break
}
wait <- wait * 1.25
}
if (!file.exists(path)) {
stop("JVM is not ready after 10 seconds")
}
f <- file(path, open = "rb")
backendPort <- readInt(f)
monitorPort <- readInt(f)
rLibPath <- readString(f)
connectionTimeout <- readInt(f)
close(f)
file.remove(path)
if (length(backendPort) == 0 || backendPort == 0 ||
length(monitorPort) == 0 || monitorPort == 0 ||
length(rLibPath) != 1) {
stop("JVM failed to launch")
}
assign(".monitorConn",
socketConnection(port = monitorPort, timeout = connectionTimeout),
envir = .sparkREnv)
assign(".backendLaunched", 1, envir = .sparkREnv)
if (rLibPath != "") {
assign(".libPath", rLibPath, envir = .sparkREnv)
.libPaths(c(rLibPath, .libPaths()))
}
}
.sparkREnv$backendPort <- backendPort
tryCatch({
connectBackend("localhost", backendPort, timeout = connectionTimeout)
},
error = function(err) {
stop("Failed to connect JVM\n")
})
if (nchar(sparkHome) != 0) {
sparkHome <- suppressWarnings(normalizePath(sparkHome))
}
if (is.null(sparkExecutorEnvMap$LD_LIBRARY_PATH)) {
sparkExecutorEnvMap[["LD_LIBRARY_PATH"]] <-
paste0("$LD_LIBRARY_PATH:", Sys.getenv("LD_LIBRARY_PATH"))
}
# Classpath separator is ";" on Windows
# URI needs four /// as from http://stackoverflow.com/a/18522792
if (.Platform$OS.type == "unix") {
uriSep <- "//"
} else {
uriSep <- "////"
}
localJarPaths <- lapply(jars,
function(j) { utils::URLencode(paste("file:", uriSep, j, sep = "")) })
# Set the start time to identify jobjs
# Seconds resolution is good enough for this purpose, so use ints
assign(".scStartTime", as.integer(Sys.time()), envir = .sparkREnv)
assign(
".sparkRjsc",
callJStatic(
"org.apache.spark.api.r.RRDD",
"createSparkContext",
master,
appName,
as.character(sparkHome),
localJarPaths,
sparkEnvirMap,
sparkExecutorEnvMap),
envir = .sparkREnv
)
sc <- get(".sparkRjsc", envir = .sparkREnv)
# Register a finalizer to sleep 1 seconds on R exit to make RStudio happy
reg.finalizer(.sparkREnv, function(x) { Sys.sleep(1) }, onexit = TRUE)
sc
}
#' (Deprecated) Initialize a new SQLContext
#'
#' This function creates a SparkContext from an existing JavaSparkContext and
#' then uses it to initialize a new SQLContext
#'
#' Starting SparkR 2.0, a SparkSession is initialized and returned instead.
#' This API is deprecated and kept for backward compatibility only.
#'
#' @param jsc The existing JavaSparkContext created with SparkR.init()
#' @seealso \link{sparkR.session}
#' @rdname sparkRSQL.init-deprecated
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#'}
#' @note sparkRSQL.init since 1.4.0
sparkRSQL.init <- function(jsc = NULL) {
.Deprecated("sparkR.session")
if (exists(".sparkRsession", envir = .sparkREnv)) {
return(get(".sparkRsession", envir = .sparkREnv))
}
# Default to without Hive support for backward compatibility.
sparkR.session(enableHiveSupport = FALSE)
}
#' (Deprecated) Initialize a new HiveContext
#'
#' This function creates a HiveContext from an existing JavaSparkContext
#'
#' Starting SparkR 2.0, a SparkSession is initialized and returned instead.
#' This API is deprecated and kept for backward compatibility only.
#'
#' @param jsc The existing JavaSparkContext created with SparkR.init()
#' @seealso \link{sparkR.session}
#' @rdname sparkRHive.init-deprecated
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRHive.init(sc)
#'}
#' @note sparkRHive.init since 1.4.0
sparkRHive.init <- function(jsc = NULL) {
.Deprecated("sparkR.session")
if (exists(".sparkRsession", envir = .sparkREnv)) {
return(get(".sparkRsession", envir = .sparkREnv))
}
# Default to without Hive support for backward compatibility.
sparkR.session(enableHiveSupport = TRUE)
}
#' Get the existing SparkSession or initialize a new SparkSession.
#'
#' SparkSession is the entry point into SparkR. \code{sparkR.session} gets the existing
#' SparkSession or initializes a new SparkSession.
#' Additional Spark properties can be set in \code{...}, and these named parameters take priority
#' over values in \code{master}, \code{appName}, named lists of \code{sparkConfig}.
#' When called in an interactive session, this checks for the Spark installation, and, if not
#' found, it will be downloaded and cached automatically. Alternatively, \code{install.spark} can
#' be called manually.
#'
#' For details on how to initialize and use SparkR, refer to SparkR programming guide at
#' \url{http://spark.apache.org/docs/latest/sparkr.html#starting-up-sparksession}.
#'
#' @param master the Spark master URL.
#' @param appName application name to register with cluster manager.
#' @param sparkHome Spark Home directory.
#' @param sparkConfig named list of Spark configuration to set on worker nodes.
#' @param sparkJars character vector of jar files to pass to the worker nodes.
#' @param sparkPackages character vector of package coordinates
#' @param enableHiveSupport enable support for Hive, fallback if not built with Hive support; once
#' set, this cannot be turned off on an existing session
#' @param ... named Spark properties passed to the method.
#' @export
#' @examples
#'\dontrun{
#' sparkR.session()
#' df <- read.json(path)
#'
#' sparkR.session("local[2]", "SparkR", "/home/spark")
#' sparkR.session("yarn-client", "SparkR", "/home/spark",
#' list(spark.executor.memory="4g"),
#' c("one.jar", "two.jar", "three.jar"),
#' c("com.databricks:spark-avro_2.10:2.0.1"))
#' sparkR.session(spark.master = "yarn-client", spark.executor.memory = "4g")
#'}
#' @note sparkR.session since 2.0.0
sparkR.session <- function(
master = "",
appName = "SparkR",
sparkHome = Sys.getenv("SPARK_HOME"),
sparkConfig = list(),
sparkJars = "",
sparkPackages = "",
enableHiveSupport = TRUE,
...) {
sparkConfigMap <- convertNamedListToEnv(sparkConfig)
namedParams <- list(...)
if (length(namedParams) > 0) {
paramMap <- convertNamedListToEnv(namedParams)
# Override for certain named parameters
if (exists("spark.master", envir = paramMap)) {
master <- paramMap[["spark.master"]]
}
if (exists("spark.app.name", envir = paramMap)) {
appName <- paramMap[["spark.app.name"]]
}
overrideEnvs(sparkConfigMap, paramMap)
}
deployMode <- ""
if (exists("spark.submit.deployMode", envir = sparkConfigMap)) {
deployMode <- sparkConfigMap[["spark.submit.deployMode"]]
}
if (!exists(".sparkRjsc", envir = .sparkREnv)) {
retHome <- sparkCheckInstall(sparkHome, master, deployMode)
if (!is.null(retHome)) sparkHome <- retHome
sparkExecutorEnvMap <- new.env()
sparkR.sparkContext(master, appName, sparkHome, sparkConfigMap, sparkExecutorEnvMap,
sparkJars, sparkPackages)
stopifnot(exists(".sparkRjsc", envir = .sparkREnv))
}
if (exists(".sparkRsession", envir = .sparkREnv)) {
sparkSession <- get(".sparkRsession", envir = .sparkREnv)
# Apply config to Spark Context and Spark Session if already there
# Cannot change enableHiveSupport
callJStatic("org.apache.spark.sql.api.r.SQLUtils",
"setSparkContextSessionConf",
sparkSession,
sparkConfigMap)
} else {
jsc <- get(".sparkRjsc", envir = .sparkREnv)
sparkSession <- callJStatic("org.apache.spark.sql.api.r.SQLUtils",
"getOrCreateSparkSession",
jsc,
sparkConfigMap,
enableHiveSupport)
assign(".sparkRsession", sparkSession, envir = .sparkREnv)
}
sparkSession
}
#' Assigns a group ID to all the jobs started by this thread until the group ID is set to a
#' different value or cleared.
#'
#' @param groupId the ID to be assigned to job groups.
#' @param description description for the job group ID.
#' @param interruptOnCancel flag to indicate if the job is interrupted on job cancellation.
#' @rdname setJobGroup
#' @name setJobGroup
#' @examples
#'\dontrun{
#' sparkR.session()
#' setJobGroup("myJobGroup", "My job group description", TRUE)
#'}
#' @note setJobGroup since 1.5.0
#' @method setJobGroup default
setJobGroup.default <- function(groupId, description, interruptOnCancel) {
sc <- getSparkContext()
invisible(callJMethod(sc, "setJobGroup", groupId, description, interruptOnCancel))
}
setJobGroup <- function(sc, groupId, description, interruptOnCancel) {
if (class(sc) == "jobj" && any(grepl("JavaSparkContext", getClassName.jobj(sc)))) {
.Deprecated("setJobGroup(groupId, description, interruptOnCancel)",
old = "setJobGroup(sc, groupId, description, interruptOnCancel)")
setJobGroup.default(groupId, description, interruptOnCancel)
} else {
# Parameter order is shifted
groupIdToUse <- sc
descriptionToUse <- groupId
interruptOnCancelToUse <- description
setJobGroup.default(groupIdToUse, descriptionToUse, interruptOnCancelToUse)
}
}
#' Clear current job group ID and its description
#'
#' @rdname clearJobGroup
#' @name clearJobGroup
#' @examples
#'\dontrun{
#' sparkR.session()
#' clearJobGroup()
#'}
#' @note clearJobGroup since 1.5.0
#' @method clearJobGroup default
clearJobGroup.default <- function() {
sc <- getSparkContext()
invisible(callJMethod(sc, "clearJobGroup"))
}
clearJobGroup <- function(sc) {
if (!missing(sc) &&
class(sc) == "jobj" &&
any(grepl("JavaSparkContext", getClassName.jobj(sc)))) {
.Deprecated("clearJobGroup()", old = "clearJobGroup(sc)")
}
clearJobGroup.default()
}
#' Cancel active jobs for the specified group
#'
#' @param groupId the ID of job group to be cancelled
#' @rdname cancelJobGroup
#' @name cancelJobGroup
#' @examples
#'\dontrun{
#' sparkR.session()
#' cancelJobGroup("myJobGroup")
#'}
#' @note cancelJobGroup since 1.5.0
#' @method cancelJobGroup default
cancelJobGroup.default <- function(groupId) {
sc <- getSparkContext()
invisible(callJMethod(sc, "cancelJobGroup", groupId))
}
cancelJobGroup <- function(sc, groupId) {
if (class(sc) == "jobj" && any(grepl("JavaSparkContext", getClassName.jobj(sc)))) {
.Deprecated("cancelJobGroup(groupId)", old = "cancelJobGroup(sc, groupId)")
cancelJobGroup.default(groupId)
} else {
# Parameter order is shifted
groupIdToUse <- sc
cancelJobGroup.default(groupIdToUse)
}
}
sparkConfToSubmitOps <- new.env()
sparkConfToSubmitOps[["spark.driver.memory"]] <- "--driver-memory"
sparkConfToSubmitOps[["spark.driver.extraClassPath"]] <- "--driver-class-path"
sparkConfToSubmitOps[["spark.driver.extraJavaOptions"]] <- "--driver-java-options"
sparkConfToSubmitOps[["spark.driver.extraLibraryPath"]] <- "--driver-library-path"
sparkConfToSubmitOps[["spark.master"]] <- "--master"
sparkConfToSubmitOps[["spark.yarn.keytab"]] <- "--keytab"
sparkConfToSubmitOps[["spark.yarn.principal"]] <- "--principal"
# Utility function that returns Spark Submit arguments as a string
#
# A few Spark Application and Runtime environment properties cannot take effect after driver
# JVM has started, as documented in:
# http://spark.apache.org/docs/latest/configuration.html#application-properties
# When starting SparkR without using spark-submit, for example, from Rstudio, add them to
# spark-submit commandline if not already set in SPARKR_SUBMIT_ARGS so that they can be effective.
getClientModeSparkSubmitOpts <- function(submitOps, sparkEnvirMap) {
envirToOps <- lapply(ls(sparkConfToSubmitOps), function(conf) {
opsValue <- sparkEnvirMap[[conf]]
# process only if --option is not already specified
if (!is.null(opsValue) &&
nchar(opsValue) > 1 &&
!grepl(sparkConfToSubmitOps[[conf]], submitOps)) {
# put "" around value in case it has spaces
paste0(sparkConfToSubmitOps[[conf]], " \"", opsValue, "\" ")
} else {
""
}
})
# --option must be before the application class "sparkr-shell" in submitOps
paste0(paste0(envirToOps, collapse = ""), submitOps)
}
# Utility function that handles sparkJars argument, and normalize paths
processSparkJars <- function(jars) {
splittedJars <- splitString(jars)
if (length(splittedJars) > length(jars)) {
warning("sparkJars as a comma-separated string is deprecated, use character vector instead")
}
normalized <- suppressWarnings(normalizePath(splittedJars))
normalized
}
# Utility function that handles sparkPackages argument
processSparkPackages <- function(packages) {
splittedPackages <- splitString(packages)
if (length(splittedPackages) > length(packages)) {
warning("sparkPackages as a comma-separated string is deprecated, use character vector instead")
}
splittedPackages
}
# Utility function that checks and install Spark to local folder if not found
#
# Installation will not be triggered if it's called from sparkR shell
# or if the master url is not local
#
# @param sparkHome directory to find Spark package.
# @param master the Spark master URL, used to check local or remote mode.
# @param deployMode whether to deploy your driver on the worker nodes (cluster)
# or locally as an external client (client).
# @return NULL if no need to update sparkHome, and new sparkHome otherwise.
sparkCheckInstall <- function(sparkHome, master, deployMode) {
if (!isSparkRShell()) {
if (!is.na(file.info(sparkHome)$isdir)) {
msg <- paste0("Spark package found in SPARK_HOME: ", sparkHome)
message(msg)
NULL
} else {
if (interactive() || isMasterLocal(master)) {
msg <- paste0("Spark not found in SPARK_HOME: ", sparkHome)
message(msg)
packageLocalDir <- install.spark()
packageLocalDir
} else if (isClientMode(master) || deployMode == "client") {
msg <- paste0("Spark not found in SPARK_HOME: ",
sparkHome, "\n", installInstruction("remote"))
stop(msg)
} else {
NULL
}
}
} else {
NULL
}
}
|
testlist <- list(doy = c(-Inf, 0), latitude = c(-6.93116025206376e-107, 1.86807199752012e+112, -Inf, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, NaN, -1.51345790188863e+21, 1.44942408802595e-285, -1.72131968218895e+83, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) | /meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615837124-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 421 | r | testlist <- list(doy = c(-Inf, 0), latitude = c(-6.93116025206376e-107, 1.86807199752012e+112, -Inf, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, NaN, -1.51345790188863e+21, 1.44942408802595e-285, -1.72131968218895e+83, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) |
test_that("add_resource() returns a valid Data Package", {
p <- example_package
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
df_csv <- test_path("data/df.csv")
schema <- create_schema(df)
expect_true(check_package(add_resource(p, "new", df)))
expect_true(check_package(add_resource(p, "new", df, schema)))
expect_true(check_package(add_resource(p, "new", df_csv)))
expect_true(check_package(
add_resource(p, "new", df, title = "New", foo = "bar")
))
})
test_that("add_resource() returns error on incorrect Data Package", {
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
expect_error(
add_resource(list(), "new", df),
paste(
"`package` must be a list describing a Data Package,",
"created with `read_package()` or `create_package()`."
),
fixed = TRUE
)
})
test_that("add_resource() returns error when resource name contains invalid
characters", {
p <- example_package
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
expect_error(
add_resource(p, "New", df),
paste(
"`New` must only contain lowercase alphanumeric characters plus",
"`.`, `-` and `_`."
),
fixed = TRUE
)
expect_error(add_resource(p, "nëw", df), "only contain lowercase")
expect_error(add_resource(p, " new", df), "only contain lowercase")
expect_error(add_resource(p, "new ", df), "only contain lowercase")
expect_error(add_resource(p, "n ew", df), "only contain lowercase")
expect_error(add_resource(p, "n/ew", df), "only contain lowercase")
expect_true(check_package(add_resource(p, "n.ew", df)))
expect_true(check_package(add_resource(p, "n-ew", df)))
expect_true(check_package(add_resource(p, "n_ew", df)))
expect_true(check_package(add_resource(p, "n3w", df)))
expect_true(check_package(add_resource(p, "n.3-w_10", df)))
})
test_that("add_resource() returns error when resource of that name already
exists", {
p <- example_package
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
expect_error(
add_resource(p, "deployments", df),
"`package` already contains a resource named `deployments`.",
fixed = TRUE
)
})
test_that("add_resource() returns error when data is not data frame or
character", {
p <- example_package
expect_error(
add_resource(p, "new", list()),
"`data` must be a data frame or path(s) to CSV file(s).",
fixed = TRUE
)
})
test_that("add_resource() returns error on invalid or empty data frame", {
p <- example_package
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
schema <- create_schema(df)
expect_error(
add_resource(p, "new", data.frame("col_1" = character(0))),
"`data` must be a data frame containing data.",
fixed = TRUE
)
expect_error(
add_resource(p, "new", data.frame("col_1" = character(0)), schema),
"`data` must be a data frame containing data.",
fixed = TRUE
)
# For more tests see test-check_schema.R
})
test_that("add_resource() returns error if CSV file cannot be found", {
skip_if_offline()
p <- example_package
df_csv <- test_path("data/df.csv")
schema <- create_schema(data.frame("col_1" = c(1, 2), "col_2" = c("a", "b")))
expect_error(
add_resource(p, "new", "no_such_file.csv"),
"Can't find file at `no_such_file.csv`.",
fixed = TRUE
)
expect_error(
add_resource(p, "new", "no_such_file.csv", schema),
"Can't find file at `no_such_file.csv`.",
fixed = TRUE
)
expect_error(
add_resource(p, "new", c(df_csv, "no_such_file.csv")),
"Can't find file at `no_such_file.csv`.",
fixed = TRUE
)
expect_error(
add_resource(p, "new", c("no_such_file.csv", df_csv)),
"Can't find file at `no_such_file.csv`.",
fixed = TRUE
)
expect_error(
add_resource(p, "new", c("no_such_file_1.csv", "no_such_file_2.csv")),
"Can't find file at `no_such_file_1.csv`.",
fixed = TRUE
)
expect_error(
add_resource(p, "new", "http://example.com/no_such_file.csv"),
"Can't find file at `http://example.com/no_such_file.csv`.",
fixed = TRUE
)
})
test_that("add_resource() returns error on mismatching schema and data", {
p <- example_package
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
df_csv <- test_path("data/df.csv")
schema_invalid <- create_schema(df) # Not yet invalid
schema_invalid$fields[[1]]$name <- "no_such_col"
# df
expect_error(
add_resource(p, "new", df, schema_invalid),
paste(
"Field names in `schema` must match column names in data:",
"ℹ Field names: `no_such_col`, `col_2`",
"ℹ Column names: `col_1`, `col_2`",
sep = "\n"
),
fixed = TRUE
)
# csv
expect_error(
add_resource(p, "new", df_csv, schema_invalid),
paste(
"Field names in `schema` must match column names in data:",
"ℹ Field names: `no_such_col`, `col_2`",
"ℹ Column names: `col_1`, `col_2`",
sep = "\n"
),
fixed = TRUE
)
# For more tests see test-check_schema.R
})
test_that("add_resource() returns error if ... arguments are unnamed", {
p <- create_package()
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
schema <- create_schema(df)
expect_error(
add_resource(p, "new", df, schema, delim = ",", "unnamed_value"),
"All arguments in `...` must be named.",
fixed = TRUE
)
})
test_that("add_resource() returns error if ... arguments are reserved", {
p <- create_package()
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
expect_error(
add_resource(p, "new", df, name = "custom_name"),
paste(
"`name` must be removed as an argument.",
"It is automatically added as a resource property by the function."
),
fixed = TRUE
)
expect_error(
add_resource(p, "new", df, path = "custom_path", encoding = "utf8"),
paste(
"`path` must be removed as an argument.", # First conflicting argument
"It is automatically added as a resource property by the function."
),
fixed = TRUE
)
})
test_that("add_resource() adds resource", {
p <- example_package
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
df_csv <- test_path("data/df.csv")
# df
p <- add_resource(p, "new_df", df)
expect_length(p$resources, 4) # Remains a list, now of length 4
expect_identical(p$resources[[4]][["name"]], "new_df")
expect_identical(p$resources[[4]][["profile"]], "tabular-data-resource")
expect_identical(p$resources[[4]][["data"]], df)
expect_identical(
resources(p),
c("deployments", "observations", "media", "new_df")
)
# csv
p <- add_resource(p, "new_csv", df_csv)
expect_length(p$resources, 5) # Remains a list, now of length 5
expect_identical(p$resources[[5]][["name"]], "new_csv")
expect_identical(p$resources[[5]][["profile"]], "tabular-data-resource")
expect_identical(p$resources[[5]][["data"]], NULL)
expect_identical(
resources(p),
c("deployments", "observations", "media", "new_df", "new_csv")
)
})
test_that("add_resource() uses provided schema (list or path) or creates one", {
p <- create_package()
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
df_csv <- test_path("data/df.csv")
schema <- create_schema(df)
schema_custom <- list(fields = list(
list(name = "col_1", type = "number", title = "Column 1"),
list(name = "col_2", type = "string", title = "Column 2")
))
schema_file <- test_path("data/schema_custom.json")
# df
p <- add_resource(p, "new_df", df)
p <- add_resource(p, "new_df_with_list_schema", df, schema_custom)
p <- add_resource(p, "new_df_with_file_schema", df, schema_file)
expect_identical(p$resources[[1]]$schema, schema)
expect_identical(p$resources[[2]]$schema, schema_custom)
expect_identical(p$resources[[3]]$schema, schema_custom)
expect_identical(get_schema(p, "new_df"), schema)
expect_identical(get_schema(p, "new_df_with_list_schema"), schema_custom)
expect_identical(get_schema(p, "new_df_with_file_schema"), schema_custom)
# csv
p <- add_resource(p, "new_csv", df)
p <- add_resource(p, "new_csv_with_list_schema", df, schema_custom)
p <- add_resource(p, "new_csv_with_file_schema", df, schema_file)
expect_identical(p$resources[[4]]$schema, schema)
expect_identical(p$resources[[5]]$schema, schema_custom)
expect_identical(p$resources[[6]]$schema, schema_custom)
expect_identical(get_schema(p, "new_csv"), schema)
expect_identical(get_schema(p, "new_csv_with_list_schema"), schema_custom)
expect_identical(get_schema(p, "new_csv_with_file_schema"), schema_custom)
})
test_that("add_resource() can add resource from data frame, readable by
read_resource()", {
p <- example_package
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
p <- add_resource(p, "new", df)
expect_identical(read_resource(p, "new"), dplyr::as_tibble(df))
})
test_that("add_resource() can add resource from local, relative, absolute,
remote or compressed CSV file, readable by read_resource()", {
skip_if_offline()
p <- example_package
schema <- get_schema(p, "deployments")
# Local
local_path <- "data/df.csv"
p <- add_resource(p, "local", local_path)
expect_identical(p$resources[[4]]$path, local_path)
expect_s3_class(read_resource(p, "local"), "tbl")
# Relative (doesn't throw unsafe error)
relative_path <- "../testthat/data/df.csv"
p <- add_resource(p, "relative", relative_path)
expect_identical(p$resources[[5]]$path, relative_path)
expect_s3_class(read_resource(p, "relative"), "tbl")
# Absolute (doesn't throw unsafe error)
absolute_path <- system.file(
"extdata", "deployments.csv", package = "frictionless" # Will start with /
)
p <- add_resource(p, "absolute", absolute_path, schema)
expect_identical(p$resources[[6]]$path, absolute_path)
expect_s3_class(read_resource(p, "absolute"), "tbl")
# Remote
remote_path <- file.path(
"https://github.com/frictionlessdata/frictionless-r",
"raw/main/inst/extdata/deployments.csv"
)
p <- add_resource(p, "remote", remote_path, schema)
expect_identical(p$resources[[7]]$path, remote_path)
expect_s3_class(read_resource(p, "remote"), "tbl")
# Compressed
compressed_file <- test_path("data/deployments.csv.gz")
p <- add_resource(p, "compressed", compressed_file, schema)
expect_identical(p$resources[[8]]$path, compressed_file)
expect_s3_class(read_resource(p, "compressed"), "tbl")
})
test_that("add_resource() can add resource from CSV file with other delimiter,
readable by read_resource()", {
p <- create_package()
p <- add_resource(p, "df", test_path("data/df.csv"))
expect_identical(p$resources[[1]]$dialect$delimiter, NULL)
p <- add_resource(p, "df_delim_1", test_path("data/df_delim_1.txt"),
delim = ";")
expect_identical(p$resources[[2]]$dialect$delimiter, ";")
expect_identical(read_resource(p, "df_delim_1"), read_resource(p, "df"))
p <- add_resource(p, "df_delim_2", test_path("data/df_delim_2.tsv"),
delim = "\t")
expect_identical(p$resources[[3]]$dialect$delimiter, "\t")
expect_identical(read_resource(p, "df_delim_2"), read_resource(p, "df"))
})
test_that("add_resource() sets correct properties for CSV resources", {
p <- create_package()
path <- system.file("extdata", "deployments.csv", package = "frictionless")
# Encoding UTF-8 (0.8), ISO-8859-1 (0.59), ISO-8859-2 (0.26)
p <- add_resource(p, "deployments", path)
expect_identical(p$resources[[1]]$format, "csv")
expect_identical(p$resources[[1]]$mediatype, "text/csv")
expect_identical(p$resources[[1]]$encoding, "UTF-8")
# Encoding ISO-8859-1 (0.6), ISO-8859-1 (0.26)
p <- add_resource(p, "deployments_encoding",
test_path("data/deployments_encoding.csv"))
expect_identical(p$resources[[2]]$format, "csv")
expect_identical(p$resources[[2]]$mediatype, "text/csv")
expect_identical(p$resources[[2]]$encoding, "ISO-8859-1")
expect_identical(
read_resource(p, "deployments_encoding"), # read_resource understands encod.
read_resource(p, "deployments")
)
# Encoding UTF-8 (0.8), ISO-8859-1 (0.59), ISO-8859-2 (0.26), zip compressed
p <- add_resource(p, "deployments_zip", test_path("data/deployments.csv.zip"))
expect_identical(p$resources[[3]]$format, "csv") # .zip extension ignored
expect_identical(p$resources[[3]]$mediatype, "text/csv")
expect_identical(p$resources[[3]]$encoding, "UTF-8")
expect_identical(
read_resource(p, "deployments_zip"),
read_resource(p, "deployments")
)
# Encoding ASCII, delimiter ","
p <- add_resource(p, "df", test_path("data/df.csv"))
expect_identical(p$resources[[4]]$format, "csv")
expect_identical(p$resources[[4]]$mediatype, "text/csv")
expect_identical(p$resources[[4]]$encoding, "UTF-8") # ASCII is set to UTF-8
# Encoding ASCII, delimiter ";", extension "txt"
p <- add_resource(p, "df_delim_1", test_path("data/df_delim_1.txt"),
delim = ";")
expect_identical(p$resources[[5]]$format, "csv")
expect_identical(p$resources[[5]]$mediatype, "text/csv")
expect_identical(p$resources[[5]]$encoding, "UTF-8")
expect_identical(read_resource(p, "df_delim_1"), read_resource(p, "df"))
# Encoding ASCII, delimiter "\t", extension "tsv"
p <- add_resource(p, "df_delim_2", test_path("data/df_delim_2.tsv"),
delim = "\t")
expect_identical(p$resources[[6]]$format, "tsv")
expect_identical(p$resources[[6]]$mediatype, "text/tab-separated-values")
expect_identical(p$resources[[6]]$encoding, "UTF-8")
expect_identical(read_resource(p, "df_delim_2"), read_resource(p, "df"))
})
test_that("add_resource() sets ... arguments as extra properties", {
p <- create_package()
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
df_csv <- test_path("data/df.csv")
# df
p <- add_resource(p, "new_df", df, title = "custom_title", foo = "bar")
expect_identical(p$resources[[1]]$title, "custom_title")
expect_identical(p$resources[[1]]$foo, "bar")
# csv
p <- add_resource(p, "new_csv", df_csv, title = "custom_title", foo = "bar")
expect_identical(p$resources[[2]]$title, "custom_title")
expect_identical(p$resources[[2]]$foo, "bar")
})
| /tests/testthat/test-add_resource.R | permissive | frictionlessdata/frictionless-r | R | false | false | 14,257 | r | test_that("add_resource() returns a valid Data Package", {
p <- example_package
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
df_csv <- test_path("data/df.csv")
schema <- create_schema(df)
expect_true(check_package(add_resource(p, "new", df)))
expect_true(check_package(add_resource(p, "new", df, schema)))
expect_true(check_package(add_resource(p, "new", df_csv)))
expect_true(check_package(
add_resource(p, "new", df, title = "New", foo = "bar")
))
})
test_that("add_resource() returns error on incorrect Data Package", {
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
expect_error(
add_resource(list(), "new", df),
paste(
"`package` must be a list describing a Data Package,",
"created with `read_package()` or `create_package()`."
),
fixed = TRUE
)
})
test_that("add_resource() returns error when resource name contains invalid
characters", {
p <- example_package
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
expect_error(
add_resource(p, "New", df),
paste(
"`New` must only contain lowercase alphanumeric characters plus",
"`.`, `-` and `_`."
),
fixed = TRUE
)
expect_error(add_resource(p, "nëw", df), "only contain lowercase")
expect_error(add_resource(p, " new", df), "only contain lowercase")
expect_error(add_resource(p, "new ", df), "only contain lowercase")
expect_error(add_resource(p, "n ew", df), "only contain lowercase")
expect_error(add_resource(p, "n/ew", df), "only contain lowercase")
expect_true(check_package(add_resource(p, "n.ew", df)))
expect_true(check_package(add_resource(p, "n-ew", df)))
expect_true(check_package(add_resource(p, "n_ew", df)))
expect_true(check_package(add_resource(p, "n3w", df)))
expect_true(check_package(add_resource(p, "n.3-w_10", df)))
})
test_that("add_resource() returns error when resource of that name already
exists", {
p <- example_package
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
expect_error(
add_resource(p, "deployments", df),
"`package` already contains a resource named `deployments`.",
fixed = TRUE
)
})
test_that("add_resource() returns error when data is not data frame or
character", {
p <- example_package
expect_error(
add_resource(p, "new", list()),
"`data` must be a data frame or path(s) to CSV file(s).",
fixed = TRUE
)
})
test_that("add_resource() returns error on invalid or empty data frame", {
p <- example_package
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
schema <- create_schema(df)
expect_error(
add_resource(p, "new", data.frame("col_1" = character(0))),
"`data` must be a data frame containing data.",
fixed = TRUE
)
expect_error(
add_resource(p, "new", data.frame("col_1" = character(0)), schema),
"`data` must be a data frame containing data.",
fixed = TRUE
)
# For more tests see test-check_schema.R
})
test_that("add_resource() returns error if CSV file cannot be found", {
skip_if_offline()
p <- example_package
df_csv <- test_path("data/df.csv")
schema <- create_schema(data.frame("col_1" = c(1, 2), "col_2" = c("a", "b")))
expect_error(
add_resource(p, "new", "no_such_file.csv"),
"Can't find file at `no_such_file.csv`.",
fixed = TRUE
)
expect_error(
add_resource(p, "new", "no_such_file.csv", schema),
"Can't find file at `no_such_file.csv`.",
fixed = TRUE
)
expect_error(
add_resource(p, "new", c(df_csv, "no_such_file.csv")),
"Can't find file at `no_such_file.csv`.",
fixed = TRUE
)
expect_error(
add_resource(p, "new", c("no_such_file.csv", df_csv)),
"Can't find file at `no_such_file.csv`.",
fixed = TRUE
)
expect_error(
add_resource(p, "new", c("no_such_file_1.csv", "no_such_file_2.csv")),
"Can't find file at `no_such_file_1.csv`.",
fixed = TRUE
)
expect_error(
add_resource(p, "new", "http://example.com/no_such_file.csv"),
"Can't find file at `http://example.com/no_such_file.csv`.",
fixed = TRUE
)
})
test_that("add_resource() returns error on mismatching schema and data", {
p <- example_package
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
df_csv <- test_path("data/df.csv")
schema_invalid <- create_schema(df) # Not yet invalid
schema_invalid$fields[[1]]$name <- "no_such_col"
# df
expect_error(
add_resource(p, "new", df, schema_invalid),
paste(
"Field names in `schema` must match column names in data:",
"ℹ Field names: `no_such_col`, `col_2`",
"ℹ Column names: `col_1`, `col_2`",
sep = "\n"
),
fixed = TRUE
)
# csv
expect_error(
add_resource(p, "new", df_csv, schema_invalid),
paste(
"Field names in `schema` must match column names in data:",
"ℹ Field names: `no_such_col`, `col_2`",
"ℹ Column names: `col_1`, `col_2`",
sep = "\n"
),
fixed = TRUE
)
# For more tests see test-check_schema.R
})
test_that("add_resource() returns error if ... arguments are unnamed", {
p <- create_package()
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
schema <- create_schema(df)
expect_error(
add_resource(p, "new", df, schema, delim = ",", "unnamed_value"),
"All arguments in `...` must be named.",
fixed = TRUE
)
})
test_that("add_resource() returns error if ... arguments are reserved", {
p <- create_package()
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
expect_error(
add_resource(p, "new", df, name = "custom_name"),
paste(
"`name` must be removed as an argument.",
"It is automatically added as a resource property by the function."
),
fixed = TRUE
)
expect_error(
add_resource(p, "new", df, path = "custom_path", encoding = "utf8"),
paste(
"`path` must be removed as an argument.", # First conflicting argument
"It is automatically added as a resource property by the function."
),
fixed = TRUE
)
})
test_that("add_resource() adds resource", {
p <- example_package
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
df_csv <- test_path("data/df.csv")
# df
p <- add_resource(p, "new_df", df)
expect_length(p$resources, 4) # Remains a list, now of length 4
expect_identical(p$resources[[4]][["name"]], "new_df")
expect_identical(p$resources[[4]][["profile"]], "tabular-data-resource")
expect_identical(p$resources[[4]][["data"]], df)
expect_identical(
resources(p),
c("deployments", "observations", "media", "new_df")
)
# csv
p <- add_resource(p, "new_csv", df_csv)
expect_length(p$resources, 5) # Remains a list, now of length 5
expect_identical(p$resources[[5]][["name"]], "new_csv")
expect_identical(p$resources[[5]][["profile"]], "tabular-data-resource")
expect_identical(p$resources[[5]][["data"]], NULL)
expect_identical(
resources(p),
c("deployments", "observations", "media", "new_df", "new_csv")
)
})
test_that("add_resource() uses provided schema (list or path) or creates one", {
p <- create_package()
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
df_csv <- test_path("data/df.csv")
schema <- create_schema(df)
schema_custom <- list(fields = list(
list(name = "col_1", type = "number", title = "Column 1"),
list(name = "col_2", type = "string", title = "Column 2")
))
schema_file <- test_path("data/schema_custom.json")
# df
p <- add_resource(p, "new_df", df)
p <- add_resource(p, "new_df_with_list_schema", df, schema_custom)
p <- add_resource(p, "new_df_with_file_schema", df, schema_file)
expect_identical(p$resources[[1]]$schema, schema)
expect_identical(p$resources[[2]]$schema, schema_custom)
expect_identical(p$resources[[3]]$schema, schema_custom)
expect_identical(get_schema(p, "new_df"), schema)
expect_identical(get_schema(p, "new_df_with_list_schema"), schema_custom)
expect_identical(get_schema(p, "new_df_with_file_schema"), schema_custom)
# csv
p <- add_resource(p, "new_csv", df)
p <- add_resource(p, "new_csv_with_list_schema", df, schema_custom)
p <- add_resource(p, "new_csv_with_file_schema", df, schema_file)
expect_identical(p$resources[[4]]$schema, schema)
expect_identical(p$resources[[5]]$schema, schema_custom)
expect_identical(p$resources[[6]]$schema, schema_custom)
expect_identical(get_schema(p, "new_csv"), schema)
expect_identical(get_schema(p, "new_csv_with_list_schema"), schema_custom)
expect_identical(get_schema(p, "new_csv_with_file_schema"), schema_custom)
})
test_that("add_resource() can add resource from data frame, readable by
read_resource()", {
p <- example_package
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
p <- add_resource(p, "new", df)
expect_identical(read_resource(p, "new"), dplyr::as_tibble(df))
})
test_that("add_resource() can add resource from local, relative, absolute,
remote or compressed CSV file, readable by read_resource()", {
skip_if_offline()
p <- example_package
schema <- get_schema(p, "deployments")
# Local
local_path <- "data/df.csv"
p <- add_resource(p, "local", local_path)
expect_identical(p$resources[[4]]$path, local_path)
expect_s3_class(read_resource(p, "local"), "tbl")
# Relative (doesn't throw unsafe error)
relative_path <- "../testthat/data/df.csv"
p <- add_resource(p, "relative", relative_path)
expect_identical(p$resources[[5]]$path, relative_path)
expect_s3_class(read_resource(p, "relative"), "tbl")
# Absolute (doesn't throw unsafe error)
absolute_path <- system.file(
"extdata", "deployments.csv", package = "frictionless" # Will start with /
)
p <- add_resource(p, "absolute", absolute_path, schema)
expect_identical(p$resources[[6]]$path, absolute_path)
expect_s3_class(read_resource(p, "absolute"), "tbl")
# Remote
remote_path <- file.path(
"https://github.com/frictionlessdata/frictionless-r",
"raw/main/inst/extdata/deployments.csv"
)
p <- add_resource(p, "remote", remote_path, schema)
expect_identical(p$resources[[7]]$path, remote_path)
expect_s3_class(read_resource(p, "remote"), "tbl")
# Compressed
compressed_file <- test_path("data/deployments.csv.gz")
p <- add_resource(p, "compressed", compressed_file, schema)
expect_identical(p$resources[[8]]$path, compressed_file)
expect_s3_class(read_resource(p, "compressed"), "tbl")
})
test_that("add_resource() can add resource from CSV file with other delimiter,
readable by read_resource()", {
p <- create_package()
p <- add_resource(p, "df", test_path("data/df.csv"))
expect_identical(p$resources[[1]]$dialect$delimiter, NULL)
p <- add_resource(p, "df_delim_1", test_path("data/df_delim_1.txt"),
delim = ";")
expect_identical(p$resources[[2]]$dialect$delimiter, ";")
expect_identical(read_resource(p, "df_delim_1"), read_resource(p, "df"))
p <- add_resource(p, "df_delim_2", test_path("data/df_delim_2.tsv"),
delim = "\t")
expect_identical(p$resources[[3]]$dialect$delimiter, "\t")
expect_identical(read_resource(p, "df_delim_2"), read_resource(p, "df"))
})
test_that("add_resource() sets correct properties for CSV resources", {
p <- create_package()
path <- system.file("extdata", "deployments.csv", package = "frictionless")
# Encoding UTF-8 (0.8), ISO-8859-1 (0.59), ISO-8859-2 (0.26)
p <- add_resource(p, "deployments", path)
expect_identical(p$resources[[1]]$format, "csv")
expect_identical(p$resources[[1]]$mediatype, "text/csv")
expect_identical(p$resources[[1]]$encoding, "UTF-8")
# Encoding ISO-8859-1 (0.6), ISO-8859-1 (0.26)
p <- add_resource(p, "deployments_encoding",
test_path("data/deployments_encoding.csv"))
expect_identical(p$resources[[2]]$format, "csv")
expect_identical(p$resources[[2]]$mediatype, "text/csv")
expect_identical(p$resources[[2]]$encoding, "ISO-8859-1")
expect_identical(
read_resource(p, "deployments_encoding"), # read_resource understands encod.
read_resource(p, "deployments")
)
# Encoding UTF-8 (0.8), ISO-8859-1 (0.59), ISO-8859-2 (0.26), zip compressed
p <- add_resource(p, "deployments_zip", test_path("data/deployments.csv.zip"))
expect_identical(p$resources[[3]]$format, "csv") # .zip extension ignored
expect_identical(p$resources[[3]]$mediatype, "text/csv")
expect_identical(p$resources[[3]]$encoding, "UTF-8")
expect_identical(
read_resource(p, "deployments_zip"),
read_resource(p, "deployments")
)
# Encoding ASCII, delimiter ","
p <- add_resource(p, "df", test_path("data/df.csv"))
expect_identical(p$resources[[4]]$format, "csv")
expect_identical(p$resources[[4]]$mediatype, "text/csv")
expect_identical(p$resources[[4]]$encoding, "UTF-8") # ASCII is set to UTF-8
# Encoding ASCII, delimiter ";", extension "txt"
p <- add_resource(p, "df_delim_1", test_path("data/df_delim_1.txt"),
delim = ";")
expect_identical(p$resources[[5]]$format, "csv")
expect_identical(p$resources[[5]]$mediatype, "text/csv")
expect_identical(p$resources[[5]]$encoding, "UTF-8")
expect_identical(read_resource(p, "df_delim_1"), read_resource(p, "df"))
# Encoding ASCII, delimiter "\t", extension "tsv"
p <- add_resource(p, "df_delim_2", test_path("data/df_delim_2.tsv"),
delim = "\t")
expect_identical(p$resources[[6]]$format, "tsv")
expect_identical(p$resources[[6]]$mediatype, "text/tab-separated-values")
expect_identical(p$resources[[6]]$encoding, "UTF-8")
expect_identical(read_resource(p, "df_delim_2"), read_resource(p, "df"))
})
test_that("add_resource() sets ... arguments as extra properties", {
p <- create_package()
df <- data.frame("col_1" = c(1, 2), "col_2" = c("a", "b"))
df_csv <- test_path("data/df.csv")
# df
p <- add_resource(p, "new_df", df, title = "custom_title", foo = "bar")
expect_identical(p$resources[[1]]$title, "custom_title")
expect_identical(p$resources[[1]]$foo, "bar")
# csv
p <- add_resource(p, "new_csv", df_csv, title = "custom_title", foo = "bar")
expect_identical(p$resources[[2]]$title, "custom_title")
expect_identical(p$resources[[2]]$foo, "bar")
})
|
## soil color mosaics
# mosaicing
library(raster);library(rgdal);library(sp)
# folder locations
root<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soilColour/spatialPredictions/tiles/"
root.short<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soilColour/spatialPredictions/"
slurm.out<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soilColour/rcode/slurm/spatialprediction/mosaics/"
fols<- as.numeric(list.files(root))
fols<- sort(fols)
fols
length(fols)
### distance
raster_list <- list() # initialise the list of rasters
f.name<- "type1_subsoil_G.tif"
#for (i in 1:50){
for (i in 1:length(fols)){
print(i)
fpath1<- paste0(root,fols[i])
rms<- length(list.files(fpath1, pattern = f.name, full.names=TRUE))
if(rms ==0){next} else {
r1<- raster(list.files(fpath1, pattern = f.name, full.names=TRUE))
raster_list <- append(raster_list, r1)}}
# SLURM output
sl1<- substr(f.name, start = 1,stop = nchar(f.name)-4)
slurm.out1<- paste0(slurm.out,sl1, "_tilemos_begin.txt")
itOuts<- c(as.character(Sys.time()))
write.table(itOuts,
file = slurm.out1,
row.names = F, col.names = F, sep=",")
#raster_list
raster_list$filename <- paste0(root.short,sl1, ".tif")
raster_list$datatype <- "INT1U"
raster_list$format <- "GTiff"
raster_list$overwrite <- TRUE
raster_list$na.rm <- TRUE
# do the mosaic
mos <- do.call(merge, raster_list)
# SLURM output
slurm.out2<- paste0(slurm.out,sl1, "_tilemos_end.txt")
itOuts<- c(as.character(Sys.time()))
write.table(itOuts,
file = slurm.out2,
row.names = F, col.names = F, sep=",")
| /Production/DSM/SoilColour/digitalsoilmapping/mosaics/mos_type1_subsoil_G.R | permissive | AusSoilsDSM/SLGA | R | false | false | 1,650 | r | ## soil color mosaics
# mosaicing
library(raster);library(rgdal);library(sp)
# folder locations
root<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soilColour/spatialPredictions/tiles/"
root.short<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soilColour/spatialPredictions/"
slurm.out<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soilColour/rcode/slurm/spatialprediction/mosaics/"
fols<- as.numeric(list.files(root))
fols<- sort(fols)
fols
length(fols)
### distance
raster_list <- list() # initialise the list of rasters
f.name<- "type1_subsoil_G.tif"
#for (i in 1:50){
for (i in 1:length(fols)){
print(i)
fpath1<- paste0(root,fols[i])
rms<- length(list.files(fpath1, pattern = f.name, full.names=TRUE))
if(rms ==0){next} else {
r1<- raster(list.files(fpath1, pattern = f.name, full.names=TRUE))
raster_list <- append(raster_list, r1)}}
# SLURM output
sl1<- substr(f.name, start = 1,stop = nchar(f.name)-4)
slurm.out1<- paste0(slurm.out,sl1, "_tilemos_begin.txt")
itOuts<- c(as.character(Sys.time()))
write.table(itOuts,
file = slurm.out1,
row.names = F, col.names = F, sep=",")
#raster_list
raster_list$filename <- paste0(root.short,sl1, ".tif")
raster_list$datatype <- "INT1U"
raster_list$format <- "GTiff"
raster_list$overwrite <- TRUE
raster_list$na.rm <- TRUE
# do the mosaic
mos <- do.call(merge, raster_list)
# SLURM output
slurm.out2<- paste0(slurm.out,sl1, "_tilemos_end.txt")
itOuts<- c(as.character(Sys.time()))
write.table(itOuts,
file = slurm.out2,
row.names = F, col.names = F, sep=",")
|
# R Intro ADF&G
# Justin Priest
# justin.priest@alaska.gov
##### MOTIVATING EXAMPLE 3 #####
##### Groundfish #####
# Difficulty: Moderate
library(tidyverse)
library(lubridate)
library(RColorBrewer) # We'll use this library later for some nice colored charts
# Read in data, then rename the columns
groundfish <- read_csv("data/OceanAK_GroundfishSpecimens_2000-2020.csv") %>%
rename("Lat_end" = "End Latitude Decimal Degrees",
"Long_end" = "End Longitude Decimal Degrees",
"G_Stat_Area" = "G Stat Area",
"Target_Sp_Code" = "Target Species Code",
"AvgDepth_Fthm" = "Avg Depth Fathoms",
"Substrate" = "Substrate Type",
"Length_mm" = "Length Millimeters",
"Weight_kg" = "Weight Kilograms",
"Count" = "Number of Specimens")
groundfish %>%
filter(Species == "Sablefish",
Age != is.na(Age), # remove unaged fish
Year >= 2011) %>%
ggplot(aes(x = Length_mm, y = Weight_kg, color = Age)) +
geom_point() +
scale_colour_gradientn(colors = rev(brewer.pal(11, "Spectral")), limits = c(1, 50)) +
facet_wrap(~Year)
groundfish %>%
filter(Species == "Sablefish",
Sex == "Male" | Sex == "Female") %>%
ggplot(aes(x=Sex, y = Length_mm, fill = Sex)) +
geom_boxplot()
# In progress
sablefish <- groundfish %>%
filter(Species == "Sablefish",
Sex == "Male" | Sex == "Female") %>%
mutate(Sex_01 = ifelse(Sex == "Male", 0, 1))
sablefish_model <- glm(Sex_01 ~ Length_mm, family = "binomial", data = sablefish)
summary(sablefish_model)
pred <- crossing(Sex_01 = c("0", "1"), Length_mm = seq(1:1000))
pred <- pred %>%
mutate(predictedsex = exp(predict.glm(sablefish_model, pred)))
pred
| /code/motivatingexample3_groundfish.R | no_license | justinpriest/R_Intro_ADFG | R | false | false | 1,720 | r | # R Intro ADF&G
# Justin Priest
# justin.priest@alaska.gov
##### MOTIVATING EXAMPLE 3 #####
##### Groundfish #####
# Difficulty: Moderate
library(tidyverse)
library(lubridate)
library(RColorBrewer) # We'll use this library later for some nice colored charts
# Read in data, then rename the columns
groundfish <- read_csv("data/OceanAK_GroundfishSpecimens_2000-2020.csv") %>%
rename("Lat_end" = "End Latitude Decimal Degrees",
"Long_end" = "End Longitude Decimal Degrees",
"G_Stat_Area" = "G Stat Area",
"Target_Sp_Code" = "Target Species Code",
"AvgDepth_Fthm" = "Avg Depth Fathoms",
"Substrate" = "Substrate Type",
"Length_mm" = "Length Millimeters",
"Weight_kg" = "Weight Kilograms",
"Count" = "Number of Specimens")
groundfish %>%
filter(Species == "Sablefish",
Age != is.na(Age), # remove unaged fish
Year >= 2011) %>%
ggplot(aes(x = Length_mm, y = Weight_kg, color = Age)) +
geom_point() +
scale_colour_gradientn(colors = rev(brewer.pal(11, "Spectral")), limits = c(1, 50)) +
facet_wrap(~Year)
groundfish %>%
filter(Species == "Sablefish",
Sex == "Male" | Sex == "Female") %>%
ggplot(aes(x=Sex, y = Length_mm, fill = Sex)) +
geom_boxplot()
# In progress
sablefish <- groundfish %>%
filter(Species == "Sablefish",
Sex == "Male" | Sex == "Female") %>%
mutate(Sex_01 = ifelse(Sex == "Male", 0, 1))
sablefish_model <- glm(Sex_01 ~ Length_mm, family = "binomial", data = sablefish)
summary(sablefish_model)
pred <- crossing(Sex_01 = c("0", "1"), Length_mm = seq(1:1000))
pred <- pred %>%
mutate(predictedsex = exp(predict.glm(sablefish_model, pred)))
pred
|
# SVM ---------------------------------------------------------------------
svm_borda = makeTuneWrapper(filter_wrapper_svm_borda, resampling = inner,
par.set = ps_svm_filter,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_info.gain = makeTuneWrapper(filter_wrapper_svm_info.gain, resampling = inner,
par.set = ps_svm_filter,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_gain.ratio = makeTuneWrapper(filter_wrapper_svm_gain.ratio, resampling = inner,
par.set = ps_svm_filter,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_variance = makeTuneWrapper(filter_wrapper_svm_variance, resampling = inner,
par.set = ps_svm_filter,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_rank.cor = makeTuneWrapper(filter_wrapper_svm_rank.cor, resampling = inner,
par.set = ps_svm_filter,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_linear.cor = makeTuneWrapper(filter_wrapper_svm_linear.cor, resampling = inner,
par.set = ps_svm_filter,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_mrmr = makeTuneWrapper(filter_wrapper_svm_mrmr, resampling = inner,
par.set = ps_svm_filter,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_cmim = makeTuneWrapper(filter_wrapper_svm_cmim, resampling = inner,
par.set = ps_svm_filter,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_carscore = makeTuneWrapper(filter_wrapper_svm_carscore, resampling = inner,
par.set = ps_svm_filter,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_no_filter = makeTuneWrapper(lrn_svm, resampling = inner,
par.set = ps_svm,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_pca = makeTuneWrapper(pca_wrapper_svm, resampling = inner,
par.set = ps_svm_pca,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
# XGBOOST -----------------------------------------------------------------
xgboost_borda = makeTuneWrapper(filter_wrapper_xgboost_borda, resampling = inner,
par.set = ps_xgboost_filter,
control = tune.ctrl_xgboost,
show.info = TRUE, measures = list(rmse))
xgboost_info.gain = makeTuneWrapper(filter_wrapper_xgboost_info.gain, resampling = inner,
par.set = ps_xgboost_filter,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
xgboost_gain.ratio = makeTuneWrapper(filter_wrapper_xgboost_gain.ratio, resampling = inner,
par.set = ps_xgboost_filter,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
xgboost_variance = makeTuneWrapper(filter_wrapper_xgboost_variance, resampling = inner,
par.set = ps_xgboost_filter,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
xgboost_rank.cor = makeTuneWrapper(filter_wrapper_xgboost_rank.cor, resampling = inner,
par.set = ps_xgboost_filter,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
xgboost_linear.cor = makeTuneWrapper(filter_wrapper_xgboost_linear.cor, resampling = inner,
par.set = ps_xgboost_filter,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
xgboost_mrmr = makeTuneWrapper(filter_wrapper_xgboost_mrmr, resampling = inner,
par.set = ps_xgboost_filter,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
xgboost_cmim = makeTuneWrapper(filter_wrapper_xgboost_cmim, resampling = inner,
par.set = ps_xgboost_filter,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
xgboost_carscore = makeTuneWrapper(filter_wrapper_xgboost_carscore, resampling = inner,
par.set = ps_xgboost_filter,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
xgboost_no_filter = makeTuneWrapper(lrn_xgboost, resampling = inner,
par.set = ps_xgboost,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
xgboost_pca = makeTuneWrapper(pca_wrapper_xgboost, resampling = inner,
par.set = ps_xgboost_pca,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
# Random Forest -----------------------------------------------------------
rf_borda = makeTuneWrapper(filter_wrapper_rf_borda, resampling = inner,
par.set = ps_rf_filter,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_info.gain = makeTuneWrapper(filter_wrapper_rf_info.gain, resampling = inner,
par.set = ps_rf_filter,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_gain.ratio = makeTuneWrapper(filter_wrapper_rf_gain.ratio, resampling = inner,
par.set = ps_rf_filter,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_variance = makeTuneWrapper(filter_wrapper_rf_variance, resampling = inner,
par.set = ps_rf_filter,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_rank.cor = makeTuneWrapper(filter_wrapper_rf_rank.cor, resampling = inner,
par.set = ps_rf_filter,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_linear.cor = makeTuneWrapper(filter_wrapper_rf_linear.cor, resampling = inner,
par.set = ps_rf_filter,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_mrmr = makeTuneWrapper(filter_wrapper_rf_mrmr, resampling = inner,
par.set = ps_rf_filter,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_cmim = makeTuneWrapper(filter_wrapper_rf_cmim, resampling = inner,
par.set = ps_rf_filter,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_carscore = makeTuneWrapper(filter_wrapper_rf_carscore, resampling = inner,
par.set = ps_rf_filter,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_no_filter = makeTuneWrapper(lrn_rf, resampling = inner,
par.set = ps_rf,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_pca = makeTuneWrapper(pca_wrapper_rf, resampling = inner,
par.set = ps_rf_pca,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
# RIDGE ---------------------------------------------------------------------
ridge_borda = makeTuneWrapper(filter_wrapper_ridge_borda, resampling = inner,
par.set = ps_ridge_filter,
control = tune.ctrl_ridge,
show.info = TRUE, measures = list(rmse))
ridge_info.gain = makeTuneWrapper(filter_wrapper_ridge_info.gain, resampling = inner,
par.set = ps_ridge_filter,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
ridge_gain.ratio = makeTuneWrapper(filter_wrapper_ridge_gain.ratio, resampling = inner,
par.set = ps_ridge_filter,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
ridge_variance = makeTuneWrapper(filter_wrapper_ridge_variance, resampling = inner,
par.set = ps_ridge_filter,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
ridge_rank.cor = makeTuneWrapper(filter_wrapper_ridge_rank.cor, resampling = inner,
par.set = ps_ridge_filter,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
ridge_linear.cor = makeTuneWrapper(filter_wrapper_ridge_linear.cor, resampling = inner,
par.set = ps_ridge_filter,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
ridge_mrmr = makeTuneWrapper(filter_wrapper_ridge_mrmr, resampling = inner,
par.set = ps_ridge_filter,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
ridge_cmim = makeTuneWrapper(filter_wrapper_ridge_cmim, resampling = inner,
par.set = ps_ridge_filter,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
ridge_carscore = makeTuneWrapper(filter_wrapper_ridge_carscore, resampling = inner,
par.set = ps_ridge_filter,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
ridge_no_filter = makeTuneWrapper(lrn_ridge, resampling = inner,
par.set = ps_ridge,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
ridge_pca = makeTuneWrapper(pca_wrapper_ridge, resampling = inner,
par.set = ps_ridge_pca,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
# LASSO ---------------------------------------------------------------------
lasso_borda = makeTuneWrapper(filter_wrapper_lasso_borda, resampling = inner,
par.set = ps_lasso_filter,
control = tune.ctrl_lasso,
show.info = TRUE, measures = list(rmse))
lasso_info.gain = makeTuneWrapper(filter_wrapper_lasso_info.gain, resampling = inner,
par.set = ps_lasso_filter,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
lasso_gain.ratio = makeTuneWrapper(filter_wrapper_lasso_gain.ratio, resampling = inner,
par.set = ps_lasso_filter,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
lasso_variance = makeTuneWrapper(filter_wrapper_lasso_variance, resampling = inner,
par.set = ps_lasso_filter,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
lasso_rank.cor = makeTuneWrapper(filter_wrapper_lasso_rank.cor, resampling = inner,
par.set = ps_lasso_filter,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
lasso_linear.cor = makeTuneWrapper(filter_wrapper_lasso_linear.cor, resampling = inner,
par.set = ps_lasso_filter,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
lasso_mrmr = makeTuneWrapper(filter_wrapper_lasso_mrmr, resampling = inner,
par.set = ps_lasso_filter,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
lasso_cmim = makeTuneWrapper(filter_wrapper_lasso_cmim, resampling = inner,
par.set = ps_lasso_filter,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
lasso_carscore = makeTuneWrapper(filter_wrapper_lasso_carscore, resampling = inner,
par.set = ps_lasso_filter,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
lasso_no_filter = makeTuneWrapper(lrn_lasso, resampling = inner,
par.set = ps_lasso,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
lasso_pca = makeTuneWrapper(pca_wrapper_lasso, resampling = inner,
par.set = ps_lasso_pca,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
| /code/05-modeling/paper/tune-wrapper.R | permissive | johnmorehouse/2019-feature-selection | R | false | false | 15,063 | r | # SVM ---------------------------------------------------------------------
svm_borda = makeTuneWrapper(filter_wrapper_svm_borda, resampling = inner,
par.set = ps_svm_filter,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_info.gain = makeTuneWrapper(filter_wrapper_svm_info.gain, resampling = inner,
par.set = ps_svm_filter,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_gain.ratio = makeTuneWrapper(filter_wrapper_svm_gain.ratio, resampling = inner,
par.set = ps_svm_filter,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_variance = makeTuneWrapper(filter_wrapper_svm_variance, resampling = inner,
par.set = ps_svm_filter,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_rank.cor = makeTuneWrapper(filter_wrapper_svm_rank.cor, resampling = inner,
par.set = ps_svm_filter,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_linear.cor = makeTuneWrapper(filter_wrapper_svm_linear.cor, resampling = inner,
par.set = ps_svm_filter,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_mrmr = makeTuneWrapper(filter_wrapper_svm_mrmr, resampling = inner,
par.set = ps_svm_filter,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_cmim = makeTuneWrapper(filter_wrapper_svm_cmim, resampling = inner,
par.set = ps_svm_filter,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_carscore = makeTuneWrapper(filter_wrapper_svm_carscore, resampling = inner,
par.set = ps_svm_filter,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_no_filter = makeTuneWrapper(lrn_svm, resampling = inner,
par.set = ps_svm,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
svm_pca = makeTuneWrapper(pca_wrapper_svm, resampling = inner,
par.set = ps_svm_pca,
control = tune.ctrl_svm, show.info = TRUE,
measures = list(rmse))
# XGBOOST -----------------------------------------------------------------
xgboost_borda = makeTuneWrapper(filter_wrapper_xgboost_borda, resampling = inner,
par.set = ps_xgboost_filter,
control = tune.ctrl_xgboost,
show.info = TRUE, measures = list(rmse))
xgboost_info.gain = makeTuneWrapper(filter_wrapper_xgboost_info.gain, resampling = inner,
par.set = ps_xgboost_filter,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
xgboost_gain.ratio = makeTuneWrapper(filter_wrapper_xgboost_gain.ratio, resampling = inner,
par.set = ps_xgboost_filter,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
xgboost_variance = makeTuneWrapper(filter_wrapper_xgboost_variance, resampling = inner,
par.set = ps_xgboost_filter,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
xgboost_rank.cor = makeTuneWrapper(filter_wrapper_xgboost_rank.cor, resampling = inner,
par.set = ps_xgboost_filter,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
xgboost_linear.cor = makeTuneWrapper(filter_wrapper_xgboost_linear.cor, resampling = inner,
par.set = ps_xgboost_filter,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
xgboost_mrmr = makeTuneWrapper(filter_wrapper_xgboost_mrmr, resampling = inner,
par.set = ps_xgboost_filter,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
xgboost_cmim = makeTuneWrapper(filter_wrapper_xgboost_cmim, resampling = inner,
par.set = ps_xgboost_filter,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
xgboost_carscore = makeTuneWrapper(filter_wrapper_xgboost_carscore, resampling = inner,
par.set = ps_xgboost_filter,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
xgboost_no_filter = makeTuneWrapper(lrn_xgboost, resampling = inner,
par.set = ps_xgboost,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
xgboost_pca = makeTuneWrapper(pca_wrapper_xgboost, resampling = inner,
par.set = ps_xgboost_pca,
control = tune.ctrl_xgboost, show.info = TRUE,
measures = list(rmse))
# Random Forest -----------------------------------------------------------
rf_borda = makeTuneWrapper(filter_wrapper_rf_borda, resampling = inner,
par.set = ps_rf_filter,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_info.gain = makeTuneWrapper(filter_wrapper_rf_info.gain, resampling = inner,
par.set = ps_rf_filter,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_gain.ratio = makeTuneWrapper(filter_wrapper_rf_gain.ratio, resampling = inner,
par.set = ps_rf_filter,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_variance = makeTuneWrapper(filter_wrapper_rf_variance, resampling = inner,
par.set = ps_rf_filter,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_rank.cor = makeTuneWrapper(filter_wrapper_rf_rank.cor, resampling = inner,
par.set = ps_rf_filter,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_linear.cor = makeTuneWrapper(filter_wrapper_rf_linear.cor, resampling = inner,
par.set = ps_rf_filter,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_mrmr = makeTuneWrapper(filter_wrapper_rf_mrmr, resampling = inner,
par.set = ps_rf_filter,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_cmim = makeTuneWrapper(filter_wrapper_rf_cmim, resampling = inner,
par.set = ps_rf_filter,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_carscore = makeTuneWrapper(filter_wrapper_rf_carscore, resampling = inner,
par.set = ps_rf_filter,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_no_filter = makeTuneWrapper(lrn_rf, resampling = inner,
par.set = ps_rf,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
rf_pca = makeTuneWrapper(pca_wrapper_rf, resampling = inner,
par.set = ps_rf_pca,
control = tune.ctrl_rf, show.info = TRUE,
measures = list(rmse))
# RIDGE ---------------------------------------------------------------------
ridge_borda = makeTuneWrapper(filter_wrapper_ridge_borda, resampling = inner,
par.set = ps_ridge_filter,
control = tune.ctrl_ridge,
show.info = TRUE, measures = list(rmse))
ridge_info.gain = makeTuneWrapper(filter_wrapper_ridge_info.gain, resampling = inner,
par.set = ps_ridge_filter,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
ridge_gain.ratio = makeTuneWrapper(filter_wrapper_ridge_gain.ratio, resampling = inner,
par.set = ps_ridge_filter,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
ridge_variance = makeTuneWrapper(filter_wrapper_ridge_variance, resampling = inner,
par.set = ps_ridge_filter,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
ridge_rank.cor = makeTuneWrapper(filter_wrapper_ridge_rank.cor, resampling = inner,
par.set = ps_ridge_filter,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
ridge_linear.cor = makeTuneWrapper(filter_wrapper_ridge_linear.cor, resampling = inner,
par.set = ps_ridge_filter,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
ridge_mrmr = makeTuneWrapper(filter_wrapper_ridge_mrmr, resampling = inner,
par.set = ps_ridge_filter,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
ridge_cmim = makeTuneWrapper(filter_wrapper_ridge_cmim, resampling = inner,
par.set = ps_ridge_filter,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
ridge_carscore = makeTuneWrapper(filter_wrapper_ridge_carscore, resampling = inner,
par.set = ps_ridge_filter,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
ridge_no_filter = makeTuneWrapper(lrn_ridge, resampling = inner,
par.set = ps_ridge,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
ridge_pca = makeTuneWrapper(pca_wrapper_ridge, resampling = inner,
par.set = ps_ridge_pca,
control = tune.ctrl_ridge, show.info = TRUE,
measures = list(rmse))
# LASSO ---------------------------------------------------------------------
lasso_borda = makeTuneWrapper(filter_wrapper_lasso_borda, resampling = inner,
par.set = ps_lasso_filter,
control = tune.ctrl_lasso,
show.info = TRUE, measures = list(rmse))
lasso_info.gain = makeTuneWrapper(filter_wrapper_lasso_info.gain, resampling = inner,
par.set = ps_lasso_filter,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
lasso_gain.ratio = makeTuneWrapper(filter_wrapper_lasso_gain.ratio, resampling = inner,
par.set = ps_lasso_filter,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
lasso_variance = makeTuneWrapper(filter_wrapper_lasso_variance, resampling = inner,
par.set = ps_lasso_filter,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
lasso_rank.cor = makeTuneWrapper(filter_wrapper_lasso_rank.cor, resampling = inner,
par.set = ps_lasso_filter,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
lasso_linear.cor = makeTuneWrapper(filter_wrapper_lasso_linear.cor, resampling = inner,
par.set = ps_lasso_filter,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
lasso_mrmr = makeTuneWrapper(filter_wrapper_lasso_mrmr, resampling = inner,
par.set = ps_lasso_filter,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
lasso_cmim = makeTuneWrapper(filter_wrapper_lasso_cmim, resampling = inner,
par.set = ps_lasso_filter,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
lasso_carscore = makeTuneWrapper(filter_wrapper_lasso_carscore, resampling = inner,
par.set = ps_lasso_filter,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
lasso_no_filter = makeTuneWrapper(lrn_lasso, resampling = inner,
par.set = ps_lasso,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
lasso_pca = makeTuneWrapper(pca_wrapper_lasso, resampling = inner,
par.set = ps_lasso_pca,
control = tune.ctrl_lasso, show.info = TRUE,
measures = list(rmse))
|
# something is done incorrectly
x.data<- runif(20)
#generate some explanatory data points
A <- -5
B <- 100
k <- -4
#generate response data points with an exponential, plus noise
y.data <- A * exp(k*x.data) + B + 0.1 * runif(20)
#fit the log-transformed data to linear relationship
myfit <- lm(log(abs(y.data-B)) ~ x.data)
plot(x.data, y.data)
summary(myfit)
Af <- myfit$coefficients[1]
kf <- myfit$coefficients[2]
curve(B + Af * exp(kf*x), add = TRUE) | /nonlinear_fitting_9_5.R | no_license | natalyabakhshetyan/quantifying_life_problems_r | R | false | false | 451 | r | # something is done incorrectly
x.data<- runif(20)
#generate some explanatory data points
A <- -5
B <- 100
k <- -4
#generate response data points with an exponential, plus noise
y.data <- A * exp(k*x.data) + B + 0.1 * runif(20)
#fit the log-transformed data to linear relationship
myfit <- lm(log(abs(y.data-B)) ~ x.data)
plot(x.data, y.data)
summary(myfit)
Af <- myfit$coefficients[1]
kf <- myfit$coefficients[2]
curve(B + Af * exp(kf*x), add = TRUE) |
source("simFragileY.R")
# XfA, Xfa, XfAi, Xfai, XmA, Xma, XmAi, Xmai, YA, Ya, YAi, Yai
genotypes <- c(.5, .5, 0, 0, .25, .25, 0, 0, .25, .25, 0, 0)
# sz sets the size of the vectors of parameters that we will test
# so 100 means we will test 100 values from x to y of each variable
sz <- 100
# this will be our vector of aneuploidy rates associated with chromosome inversions
u.vec <- seq(from = 0, to = .08, length.out = sz)
# this will be the recombination distance between the SDR and the SA locus
r <- .1
# these are the indices for the chromosomes that we will want to track below
# XmAi, Xmai, YAi, Yai
inv.ind <- c(7, 8, 11, 12)
# this is the domminance factor that we will be setting
# recessive=0, additive=.5, dominant=1
h <- .5
# results is just a list to hold results in
results <- list()
for(j in 1:4){
cat("\nadditive", j)
s.vec <- seq(from = 0, to = .25, length.out = sz)
results[[j]] <- as.data.frame(matrix(,sz,sz))
colnames(results[[j]]) <- u.vec
row.names(results[[j]]) <- s.vec
if(j==2 | j==3) s.vec <- -1*s.vec
for(ix in 1:sz){ #across aneuploidy rates
if(ix %% 5 == 0) cat(", ix")
for(iy in 1:sz){ #across selection coefficients
# let system equilibrate
#cat("\nequilibrate")
equi <- simFragileY(genotypes=genotypes, h = h, u = u.vec[ix], s = s.vec[iy],
r = r, report = "FATE", criterion = "STABLE", reporting=1)
# introduce rare mutation type equals 3,4,7,8,11,12
# XfA, Xfa, XfAi, Xfai, XmA, Xma, XmAi, Xmai, YA, Ya, YAi, Yai
equi[inv.ind[j]] <- .005
cat("\nitterating")
results[[j]][iy,ix] <- simFragileY(genotypes=equi, h = h, u = u.vec[ix], s = s.vec[iy],
r = r, report = "FATE", criterion = "STABLE", reporting=1)[inv.ind[j]]
}
}
}
names(results) <- c("XAi", "Xai", "YAi", "Yai")
results.add <- results
cat("\ndominance")
h <- 1
results <- list()
for(j in 1:4){
cat(j)
s.vec <- seq(from = 0, to = .25, length.out = sz)
results[[j]] <- as.data.frame(matrix(,sz,sz))
colnames(results[[j]]) <- u.vec
row.names(results[[j]]) <- s.vec
if(j==2 | j==3) s.vec <- -1*s.vec
for(ix in 1:sz){ #across aneuploidy rates
for(iy in 1:sz){ #across selection coefficients
# let system equilibrate
#cat("\nequilibrate")
equi <- simFragileY(genotypes=genotypes, h = h, u = u.vec[ix], s = s.vec[iy],
r = r, report = "FATE", criterion = "STABLE", reporting=1)
# introduce rare mutation type equals 3,4,7,8,11,12
# XfA, Xfa, XfAi, Xfai, XmA, Xma, XmAi, Xmai, YA, Ya, YAi, Yai
equi[inv.ind[j]] <- .005
#cat("\nitterating")
results[[j]][iy,ix] <- simFragileY(genotypes=equi, h = h, u = u.vec[ix], s = s.vec[iy],
r = r, report = "FATE", criterion = "STABLE", reporting=1)[inv.ind[j]]
}
}
}
names(results) <- c("XAi", "Xai", "YAi", "Yai")
results.dom <- results
cat("\nrecessive")
h <- 0
results <- list()
for(j in 1:4){
cat(j)
s.vec <- seq(from = 0, to = .25, length.out = sz)
results[[j]] <- as.data.frame(matrix(,sz,sz))
colnames(results[[j]]) <- u.vec
row.names(results[[j]]) <- s.vec
if(j==2 | j==3) s.vec <- -1*s.vec
for(ix in 1:sz){ #across aneuploidy rates
for(iy in 1:sz){ #across selection coefficients
# let system equilibrate
#cat("\nequilibrate")
equi <- simFragileY(genotypes=genotypes, h = h, u = u.vec[ix], s = s.vec[iy],
r = r, report = "FATE", criterion = "STABLE", reporting=1)
# introduce rare mutation type equals 3,4,7,8,11,12
# XfA, Xfa, XfAi, Xfai, XmA, Xma, XmAi, Xmai, YA, Ya, YAi, Yai
equi[inv.ind[j]] <- .005
#cat("\nitterating")
results[[j]][iy,ix] <- simFragileY(genotypes=equi, h = h, u = u.vec[ix], s = s.vec[iy],
r = r, report = "FATE", criterion = "STABLE", reporting=1)[inv.ind[j]]
}
}
}
names(results) <- c("XAi", "Xai", "YAi", "Yai")
results.rec <- results
# results.rec, results.add, and results.dom are plotted in figure 2 of the manuscript | /selection.vs.aneuploidy.R | no_license | coleoguy/inversion2016 | R | false | false | 4,212 | r | source("simFragileY.R")
# XfA, Xfa, XfAi, Xfai, XmA, Xma, XmAi, Xmai, YA, Ya, YAi, Yai
genotypes <- c(.5, .5, 0, 0, .25, .25, 0, 0, .25, .25, 0, 0)
# sz sets the size of the vectors of parameters that we will test
# so 100 means we will test 100 values from x to y of each variable
sz <- 100
# this will be our vector of aneuploidy rates associated with chromosome inversions
u.vec <- seq(from = 0, to = .08, length.out = sz)
# this will be the recombination distance between the SDR and the SA locus
r <- .1
# these are the indices for the chromosomes that we will want to track below
# XmAi, Xmai, YAi, Yai
inv.ind <- c(7, 8, 11, 12)
# this is the domminance factor that we will be setting
# recessive=0, additive=.5, dominant=1
h <- .5
# results is just a list to hold results in
results <- list()
for(j in 1:4){
cat("\nadditive", j)
s.vec <- seq(from = 0, to = .25, length.out = sz)
results[[j]] <- as.data.frame(matrix(,sz,sz))
colnames(results[[j]]) <- u.vec
row.names(results[[j]]) <- s.vec
if(j==2 | j==3) s.vec <- -1*s.vec
for(ix in 1:sz){ #across aneuploidy rates
if(ix %% 5 == 0) cat(", ix")
for(iy in 1:sz){ #across selection coefficients
# let system equilibrate
#cat("\nequilibrate")
equi <- simFragileY(genotypes=genotypes, h = h, u = u.vec[ix], s = s.vec[iy],
r = r, report = "FATE", criterion = "STABLE", reporting=1)
# introduce rare mutation type equals 3,4,7,8,11,12
# XfA, Xfa, XfAi, Xfai, XmA, Xma, XmAi, Xmai, YA, Ya, YAi, Yai
equi[inv.ind[j]] <- .005
cat("\nitterating")
results[[j]][iy,ix] <- simFragileY(genotypes=equi, h = h, u = u.vec[ix], s = s.vec[iy],
r = r, report = "FATE", criterion = "STABLE", reporting=1)[inv.ind[j]]
}
}
}
names(results) <- c("XAi", "Xai", "YAi", "Yai")
results.add <- results
cat("\ndominance")
h <- 1
results <- list()
for(j in 1:4){
cat(j)
s.vec <- seq(from = 0, to = .25, length.out = sz)
results[[j]] <- as.data.frame(matrix(,sz,sz))
colnames(results[[j]]) <- u.vec
row.names(results[[j]]) <- s.vec
if(j==2 | j==3) s.vec <- -1*s.vec
for(ix in 1:sz){ #across aneuploidy rates
for(iy in 1:sz){ #across selection coefficients
# let system equilibrate
#cat("\nequilibrate")
equi <- simFragileY(genotypes=genotypes, h = h, u = u.vec[ix], s = s.vec[iy],
r = r, report = "FATE", criterion = "STABLE", reporting=1)
# introduce rare mutation type equals 3,4,7,8,11,12
# XfA, Xfa, XfAi, Xfai, XmA, Xma, XmAi, Xmai, YA, Ya, YAi, Yai
equi[inv.ind[j]] <- .005
#cat("\nitterating")
results[[j]][iy,ix] <- simFragileY(genotypes=equi, h = h, u = u.vec[ix], s = s.vec[iy],
r = r, report = "FATE", criterion = "STABLE", reporting=1)[inv.ind[j]]
}
}
}
names(results) <- c("XAi", "Xai", "YAi", "Yai")
results.dom <- results
cat("\nrecessive")
h <- 0
results <- list()
for(j in 1:4){
cat(j)
s.vec <- seq(from = 0, to = .25, length.out = sz)
results[[j]] <- as.data.frame(matrix(,sz,sz))
colnames(results[[j]]) <- u.vec
row.names(results[[j]]) <- s.vec
if(j==2 | j==3) s.vec <- -1*s.vec
for(ix in 1:sz){ #across aneuploidy rates
for(iy in 1:sz){ #across selection coefficients
# let system equilibrate
#cat("\nequilibrate")
equi <- simFragileY(genotypes=genotypes, h = h, u = u.vec[ix], s = s.vec[iy],
r = r, report = "FATE", criterion = "STABLE", reporting=1)
# introduce rare mutation type equals 3,4,7,8,11,12
# XfA, Xfa, XfAi, Xfai, XmA, Xma, XmAi, Xmai, YA, Ya, YAi, Yai
equi[inv.ind[j]] <- .005
#cat("\nitterating")
results[[j]][iy,ix] <- simFragileY(genotypes=equi, h = h, u = u.vec[ix], s = s.vec[iy],
r = r, report = "FATE", criterion = "STABLE", reporting=1)[inv.ind[j]]
}
}
}
names(results) <- c("XAi", "Xai", "YAi", "Yai")
results.rec <- results
# results.rec, results.add, and results.dom are plotted in figure 2 of the manuscript |
##read in the .rds file as a dataframe
NEI <- readRDS("summarySCC_PM25.rds")
SCCdata <- readRDS("Source_Classification_Code.rds")
#bind the two dfs on the variable name SCC which is a factor in both dfs
library(plyr)
df <- join(NEI, SCCdata, by = "SCC")
#pull out the columns of interest
library(dplyr)
df1 <- select(df, year, EI.Sector, Emissions)
#pull coal combustion-related sources from the Emissions Inventory sector (EI.Sector) variable
df2 <- filter(df1, EI.Sector ==
c("Fuel Comb - Comm/Institutional - Coal",
"Fuel Comb - Electric Generation - Coal",
"Fuel Comb - Industrial Boilers, ICEs - Coal"))
#plot the sources without certain outliers showing
png(file = "plot4.png")
library(ggplot2)
qplot(year, Emissions, data = df2, color = EI.Sector,
ylim = c(0, 7500),
xlab = "Year", ylab = "PM2.5 Emitted (tons)",
main = "US Ambient Air Pollution from Coal Combustion")
dev.off()
| /plot4.R | no_license | Mewzician/ExploratoryDataAnalysis_Project-2 | R | false | false | 973 | r | ##read in the .rds file as a dataframe
NEI <- readRDS("summarySCC_PM25.rds")
SCCdata <- readRDS("Source_Classification_Code.rds")
#bind the two dfs on the variable name SCC which is a factor in both dfs
library(plyr)
df <- join(NEI, SCCdata, by = "SCC")
#pull out the columns of interest
library(dplyr)
df1 <- select(df, year, EI.Sector, Emissions)
#pull coal combustion-related sources from the Emissions Inventory sector (EI.Sector) variable
df2 <- filter(df1, EI.Sector ==
c("Fuel Comb - Comm/Institutional - Coal",
"Fuel Comb - Electric Generation - Coal",
"Fuel Comb - Industrial Boilers, ICEs - Coal"))
#plot the sources without certain outliers showing
png(file = "plot4.png")
library(ggplot2)
qplot(year, Emissions, data = df2, color = EI.Sector,
ylim = c(0, 7500),
xlab = "Year", ylab = "PM2.5 Emitted (tons)",
main = "US Ambient Air Pollution from Coal Combustion")
dev.off()
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{getRearData}
\alias{getRearData}
\title{getRearData}
\usage{
getRearData(subject, session, file_path = getwd(), file_ext = "txt")
}
\arguments{
\item{subject}{subject ID corresponding to file with activity time series.}
\item{session}{session number corresponding to folder where file is located.}
\item{file_path}{optional specification of file path where session folders are located.
Defaults to current working directory.}
\item{file_ext}{optional specification of file extension if something other than "txt".}
}
\value{
data frame containing subject, session, zone, rearing count, start time,
duration, minimum rearing duration, maximum rearing duration, mean rearing duration, and
rearing variability score)
}
\description{
Computes rearing counts, duration, and summary stats for a single subject.
}
\details{
WARNING: this function should only be run on files that are analysis ready, i.e.,
you've already run "fixData" and "labelZones" on all files.
}
\author{
Jason Shumake
}
| /rodact/man/getRearData.Rd | no_license | jashu/rodent-activity | R | false | false | 1,052 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{getRearData}
\alias{getRearData}
\title{getRearData}
\usage{
getRearData(subject, session, file_path = getwd(), file_ext = "txt")
}
\arguments{
\item{subject}{subject ID corresponding to file with activity time series.}
\item{session}{session number corresponding to folder where file is located.}
\item{file_path}{optional specification of file path where session folders are located.
Defaults to current working directory.}
\item{file_ext}{optional specification of file extension if something other than "txt".}
}
\value{
data frame containing subject, session, zone, rearing count, start time,
duration, minimum rearing duration, maximum rearing duration, mean rearing duration, and
rearing variability score)
}
\description{
Computes rearing counts, duration, and summary stats for a single subject.
}
\details{
WARNING: this function should only be run on files that are analysis ready, i.e.,
you've already run "fixData" and "labelZones" on all files.
}
\author{
Jason Shumake
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gene_set_enrichment.R
\name{gene_set_enrichment}
\alias{gene_set_enrichment}
\title{Evaluate the enrichment for a list of gene sets}
\usage{
gene_set_enrichment(
gene_list,
fdr_cut = 0.1,
modeling_results = fetch_data(type = "modeling_results"),
model_type = names(modeling_results)[1],
reverse = FALSE
)
}
\arguments{
\item{gene_list}{A named \code{list} object (could be a \code{data.frame}) where each
element of the list is a character vector of Ensembl gene IDs.}
\item{fdr_cut}{A \code{numeric(1)} specifying the FDR cutoff to use for
determining significance among the modeling results genes.}
\item{modeling_results}{Defaults to the output of
\code{fetch_data(type = 'modeling_results')}. This is a list of tables with the
columns \verb{f_stat_*} or \verb{t_stat_*} as well as \verb{p_value_*} and \verb{fdr_*} plus
\code{ensembl}. The column name is used to extract the statistic results, the
p-values, and the FDR adjusted p-values. Then the \code{ensembl} column is used
for matching in some cases. See \code{\link[=fetch_data]{fetch_data()}} for more details.}
\item{model_type}{A named element of the \code{modeling_results} list. By default
that is either \code{enrichment} for the model that tests one human brain layer
against the rest (one group vs the rest), \code{pairwise} which compares two
layers (groups) denoted by \code{layerA-layerB} such that \code{layerA} is greater
than \code{layerB}, and \code{anova} which determines if any layer (group) is different
from the rest adjusting for the mean expression level. The statistics for
\code{enrichment} and \code{pairwise} are t-statistics while the \code{anova} model ones
are F-statistics.}
\item{reverse}{A \code{logical(1)} indicating whether to multiply by \code{-1} the
input statistics and reverse the \code{layerA-layerB} column names (using the \code{-})
into \code{layerB-layerA}.}
}
\value{
A table in long format with the enrichment results using
\code{\link[stats:fisher.test]{stats::fisher.test()}}.
}
\description{
Using the layer-level (group-level) data, this function evaluates whether
list of gene sets (Ensembl gene IDs) are enriched among the significant
genes (FDR < 0.1 by default) genes for a given model type result. Test the
alternative hypothesis that OR > 1, i.e. that gene set is over-represented in the
set of enriched genes. If you want to check depleted genes, change \code{reverse}
to \code{TRUE}.
}
\details{
Check
https://github.com/LieberInstitute/HumanPilot/blob/master/Analysis/Layer_Guesses/check_clinical_gene_sets.R
to see a full script from where this family of functions is derived from.
}
\examples{
## Read in the SFARI gene sets included in the package
asd_sfari <- utils::read.csv(
system.file(
"extdata",
"SFARI-Gene_genes_01-03-2020release_02-04-2020export.csv",
package = "spatialLIBD"
),
as.is = TRUE
)
## Format them appropriately
asd_sfari_geneList <- list(
Gene_SFARI_all = asd_sfari$ensembl.id,
Gene_SFARI_high = asd_sfari$ensembl.id[asd_sfari$gene.score < 3],
Gene_SFARI_syndromic = asd_sfari$ensembl.id[asd_sfari$syndromic == 1]
)
## Obtain the necessary data
if (!exists("modeling_results")) {
modeling_results <- fetch_data(type = "modeling_results")
}
## Compute the gene set enrichment results
asd_sfari_enrichment <- gene_set_enrichment(
gene_list = asd_sfari_geneList,
modeling_results = modeling_results,
model_type = "enrichment"
)
## Explore the results
asd_sfari_enrichment
}
\seealso{
Other Gene set enrichment functions:
\code{\link{gene_set_enrichment_plot}()}
}
\author{
Andrew E Jaffe, Leonardo Collado-Torres
}
\concept{Gene set enrichment functions}
| /man/gene_set_enrichment.Rd | no_license | LieberInstitute/spatialLIBD | R | false | true | 3,758 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gene_set_enrichment.R
\name{gene_set_enrichment}
\alias{gene_set_enrichment}
\title{Evaluate the enrichment for a list of gene sets}
\usage{
gene_set_enrichment(
gene_list,
fdr_cut = 0.1,
modeling_results = fetch_data(type = "modeling_results"),
model_type = names(modeling_results)[1],
reverse = FALSE
)
}
\arguments{
\item{gene_list}{A named \code{list} object (could be a \code{data.frame}) where each
element of the list is a character vector of Ensembl gene IDs.}
\item{fdr_cut}{A \code{numeric(1)} specifying the FDR cutoff to use for
determining significance among the modeling results genes.}
\item{modeling_results}{Defaults to the output of
\code{fetch_data(type = 'modeling_results')}. This is a list of tables with the
columns \verb{f_stat_*} or \verb{t_stat_*} as well as \verb{p_value_*} and \verb{fdr_*} plus
\code{ensembl}. The column name is used to extract the statistic results, the
p-values, and the FDR adjusted p-values. Then the \code{ensembl} column is used
for matching in some cases. See \code{\link[=fetch_data]{fetch_data()}} for more details.}
\item{model_type}{A named element of the \code{modeling_results} list. By default
that is either \code{enrichment} for the model that tests one human brain layer
against the rest (one group vs the rest), \code{pairwise} which compares two
layers (groups) denoted by \code{layerA-layerB} such that \code{layerA} is greater
than \code{layerB}, and \code{anova} which determines if any layer (group) is different
from the rest adjusting for the mean expression level. The statistics for
\code{enrichment} and \code{pairwise} are t-statistics while the \code{anova} model ones
are F-statistics.}
\item{reverse}{A \code{logical(1)} indicating whether to multiply by \code{-1} the
input statistics and reverse the \code{layerA-layerB} column names (using the \code{-})
into \code{layerB-layerA}.}
}
\value{
A table in long format with the enrichment results using
\code{\link[stats:fisher.test]{stats::fisher.test()}}.
}
\description{
Using the layer-level (group-level) data, this function evaluates whether
list of gene sets (Ensembl gene IDs) are enriched among the significant
genes (FDR < 0.1 by default) genes for a given model type result. Test the
alternative hypothesis that OR > 1, i.e. that gene set is over-represented in the
set of enriched genes. If you want to check depleted genes, change \code{reverse}
to \code{TRUE}.
}
\details{
Check
https://github.com/LieberInstitute/HumanPilot/blob/master/Analysis/Layer_Guesses/check_clinical_gene_sets.R
to see a full script from where this family of functions is derived from.
}
\examples{
## Read in the SFARI gene sets included in the package
asd_sfari <- utils::read.csv(
system.file(
"extdata",
"SFARI-Gene_genes_01-03-2020release_02-04-2020export.csv",
package = "spatialLIBD"
),
as.is = TRUE
)
## Format them appropriately
asd_sfari_geneList <- list(
Gene_SFARI_all = asd_sfari$ensembl.id,
Gene_SFARI_high = asd_sfari$ensembl.id[asd_sfari$gene.score < 3],
Gene_SFARI_syndromic = asd_sfari$ensembl.id[asd_sfari$syndromic == 1]
)
## Obtain the necessary data
if (!exists("modeling_results")) {
modeling_results <- fetch_data(type = "modeling_results")
}
## Compute the gene set enrichment results
asd_sfari_enrichment <- gene_set_enrichment(
gene_list = asd_sfari_geneList,
modeling_results = modeling_results,
model_type = "enrichment"
)
## Explore the results
asd_sfari_enrichment
}
\seealso{
Other Gene set enrichment functions:
\code{\link{gene_set_enrichment_plot}()}
}
\author{
Andrew E Jaffe, Leonardo Collado-Torres
}
\concept{Gene set enrichment functions}
|
options( show.error.messages=F, error = function () { cat( geterrmessage(), file=stderr() ); q( "no", 1, F ) } )
# we need that to not crash galaxy with an UTF8 error on German LC settings.
loc <- Sys.setlocale("LC_MESSAGES", "en_US.UTF-8")
suppressPackageStartupMessages({
library(fgsea)
library(ggplot2)
library(optparse)
})
option_list <- list(
make_option(c("-rnk_file", "--rnk_file"), type="character", help="Path to ranked genes file"),
make_option(c("-header", "--header"), type="logical", help = "Does ranked genes file have a header"),
make_option(c("-sets_file", "--sets_file"), type="character", help = "Path to gene sets file"),
make_option(c("-gmt", "--gmt"), type="logical", help = "Is the sets file in GMT format"),
make_option(c("-out_tab","--out_tab"), type="character", help="Path to output file"),
make_option(c("-min_size", "--min_size"), type="integer", help="Minimal size of a gene set to test. All pathways below the threshold are excluded."),
make_option(c("-max_size", "--max_size"), type="integer", help="Maximal size of a gene set to test. All pathways above the threshold are excluded."),
make_option(c("-n_perm", "--n_perm"), type="integer", help="Number of permutations to do. Minimial possible nominal p-value is about 1/nperm"),
make_option(c("-rda_opt", "--rda_opt"), type="logical", help="Output RData file"),
make_option(c("-plot_opt", "--plot_opt"), type="logical", help="Output plot"),
make_option(c("-top_num", "--top_num"), type="integer", help="Top number of pathways to plot")
)
parser <- OptionParser(usage = "%prog [options] file", option_list=option_list)
args = parse_args(parser)
# Vars:
rnk_file = args$rnk_file
if (args$header) {
header = TRUE
} else {
header = FALSE
}
sets_file = args$sets_file
gmt = args$gmt
out_tab = args$out_tab
min_size = args$min_size
max_size = args$max_size
n_perm = args$n_perm
rda_opt = args$rda_opt
plot_opt = args$plot_opt
top_num = args$top_num
## Basically using the steps from the fgsea vignette
rankTab <- read.table(rnk_file, header=header, colClasses = c("character", "numeric"))
ranks <-rankTab[,2]
names(ranks) <- rankTab[,1]
if (gmt) {
pathways <- gmtPathways(sets_file)
} else {
pathways <- load(sets_file)
pathways <- get(pathways)
}
fgseaRes <- fgsea(pathways, ranks, minSize=min_size, maxSize=max_size, nperm=n_perm)
fgseaRes <- fgseaRes[order(pval), ]
# Convert leadingEdge column from list to character to output
fgseaRes$leadingEdge <- sapply(fgseaRes$leadingEdge, toString)
write.table(fgseaRes, out_tab, sep="\t", row.names=FALSE, quote=FALSE)
if (plot_opt) {
pdf("fgsea_plots.pdf", width=8)
topPathways <- head(fgseaRes, n=top_num)
topPathways <- topPathways$pathway
## Make summary table plot for top pathways
plotGseaTable(pathways[topPathways], ranks, fgseaRes, gseaParam = 0.5,
colwidths = c(5.3,3,0.7, 0.9, 0.9))
# Make enrichment plots for top pathways
for (i in topPathways) {
p <- plotEnrichment(pathways[[i]], ranks) + labs(title=i)
print(p)
}
dev.off()
}
## Output RData file
if (rda_opt) {
save.image(file = "fgsea_analysis.RData")
} | /tools/fgsea/fgsea.R | permissive | bimbam23/tools-iuc | R | false | false | 3,128 | r | options( show.error.messages=F, error = function () { cat( geterrmessage(), file=stderr() ); q( "no", 1, F ) } )
# we need that to not crash galaxy with an UTF8 error on German LC settings.
loc <- Sys.setlocale("LC_MESSAGES", "en_US.UTF-8")
suppressPackageStartupMessages({
library(fgsea)
library(ggplot2)
library(optparse)
})
option_list <- list(
make_option(c("-rnk_file", "--rnk_file"), type="character", help="Path to ranked genes file"),
make_option(c("-header", "--header"), type="logical", help = "Does ranked genes file have a header"),
make_option(c("-sets_file", "--sets_file"), type="character", help = "Path to gene sets file"),
make_option(c("-gmt", "--gmt"), type="logical", help = "Is the sets file in GMT format"),
make_option(c("-out_tab","--out_tab"), type="character", help="Path to output file"),
make_option(c("-min_size", "--min_size"), type="integer", help="Minimal size of a gene set to test. All pathways below the threshold are excluded."),
make_option(c("-max_size", "--max_size"), type="integer", help="Maximal size of a gene set to test. All pathways above the threshold are excluded."),
make_option(c("-n_perm", "--n_perm"), type="integer", help="Number of permutations to do. Minimial possible nominal p-value is about 1/nperm"),
make_option(c("-rda_opt", "--rda_opt"), type="logical", help="Output RData file"),
make_option(c("-plot_opt", "--plot_opt"), type="logical", help="Output plot"),
make_option(c("-top_num", "--top_num"), type="integer", help="Top number of pathways to plot")
)
parser <- OptionParser(usage = "%prog [options] file", option_list=option_list)
args = parse_args(parser)
# Vars:
rnk_file = args$rnk_file
if (args$header) {
header = TRUE
} else {
header = FALSE
}
sets_file = args$sets_file
gmt = args$gmt
out_tab = args$out_tab
min_size = args$min_size
max_size = args$max_size
n_perm = args$n_perm
rda_opt = args$rda_opt
plot_opt = args$plot_opt
top_num = args$top_num
## Basically using the steps from the fgsea vignette
rankTab <- read.table(rnk_file, header=header, colClasses = c("character", "numeric"))
ranks <-rankTab[,2]
names(ranks) <- rankTab[,1]
if (gmt) {
pathways <- gmtPathways(sets_file)
} else {
pathways <- load(sets_file)
pathways <- get(pathways)
}
fgseaRes <- fgsea(pathways, ranks, minSize=min_size, maxSize=max_size, nperm=n_perm)
fgseaRes <- fgseaRes[order(pval), ]
# Convert leadingEdge column from list to character to output
fgseaRes$leadingEdge <- sapply(fgseaRes$leadingEdge, toString)
write.table(fgseaRes, out_tab, sep="\t", row.names=FALSE, quote=FALSE)
if (plot_opt) {
pdf("fgsea_plots.pdf", width=8)
topPathways <- head(fgseaRes, n=top_num)
topPathways <- topPathways$pathway
## Make summary table plot for top pathways
plotGseaTable(pathways[topPathways], ranks, fgseaRes, gseaParam = 0.5,
colwidths = c(5.3,3,0.7, 0.9, 0.9))
# Make enrichment plots for top pathways
for (i in topPathways) {
p <- plotEnrichment(pathways[[i]], ranks) + labs(title=i)
print(p)
}
dev.off()
}
## Output RData file
if (rda_opt) {
save.image(file = "fgsea_analysis.RData")
} |
# You should create one R script called run_analysis.R that does the following.
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
if (!require("data.table")){
install.packages("data.table")
}
if (!require("reshape2")){
install.packages("reshape2")
}
library(data.table)
library(reshape2)
library(dplyr)
#Reading Data
X_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
X_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
activity_labels <- read.table("./data/UCI HAR Dataset/activity_labels.txt")
features <- read.table("./data/UCI HAR Dataset/features.txt")
# 1. Merging training and test sets
Merged_Data <- rbind(X_train, X_test)
# 2. Extracting only the measurements on the mean and standard deviation for each measurement:
extract_features <- grep("mean()|std()", features[,2])
Merged_Data <- Merged_Data[, extract_features]
# 3. Uses descriptive activity names to name the activities in the data set
#Names
cleanFNames <- sapply(features[,2], function(x){
gsub("[()]", "",x)})
names(Merged_Data) <- cleanFNames[extract_features]
#label the data set with descriptive labels
subject <- bind_rows(subject_test, subject_train)
names(subject) <- 'subject'
activity <- bind_rows(y_train, y_test)
names(activity) <- 'activity'
#4. Combining subject, activity, mean and std in only dataset:
Merged_Data <- bind_cols(subject, activity, Merged_Data)
# Renaming labels of levels with activity_levels and apply it to Merged_Data
activity_ID <- factor(Merged_Data$activity)
levels(activity_ID) <- activity_labels[,2]
Merged_Data$activity <- activity_ID
View(Merged_Data)
#5. Tidy data set with the average of each variable for each activity and each subject.
Data <- melt(Merged_Data, (id.vars=c("subject", "activity")))
tidy_data <- dcast(Data, subject + activity ~ variable, mean)
names(tidy_data)[-c(1:2)] <- paste("[mean of]", names(tidy_data)[-c(1:2)])
#6. Outputing a tidy data:
write.table(tidy_data, file = "./tidy_data.txt", sep=",")
| /run_analysis.R | no_license | HisraelPassarelli/Getting-and-Cleaning-Data | R | false | false | 2,848 | r | # You should create one R script called run_analysis.R that does the following.
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
if (!require("data.table")){
install.packages("data.table")
}
if (!require("reshape2")){
install.packages("reshape2")
}
library(data.table)
library(reshape2)
library(dplyr)
#Reading Data
X_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
X_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
activity_labels <- read.table("./data/UCI HAR Dataset/activity_labels.txt")
features <- read.table("./data/UCI HAR Dataset/features.txt")
# 1. Merging training and test sets
Merged_Data <- rbind(X_train, X_test)
# 2. Extracting only the measurements on the mean and standard deviation for each measurement:
extract_features <- grep("mean()|std()", features[,2])
Merged_Data <- Merged_Data[, extract_features]
# 3. Uses descriptive activity names to name the activities in the data set
#Names
cleanFNames <- sapply(features[,2], function(x){
gsub("[()]", "",x)})
names(Merged_Data) <- cleanFNames[extract_features]
#label the data set with descriptive labels
subject <- bind_rows(subject_test, subject_train)
names(subject) <- 'subject'
activity <- bind_rows(y_train, y_test)
names(activity) <- 'activity'
#4. Combining subject, activity, mean and std in only dataset:
Merged_Data <- bind_cols(subject, activity, Merged_Data)
# Renaming labels of levels with activity_levels and apply it to Merged_Data
activity_ID <- factor(Merged_Data$activity)
levels(activity_ID) <- activity_labels[,2]
Merged_Data$activity <- activity_ID
View(Merged_Data)
#5. Tidy data set with the average of each variable for each activity and each subject.
Data <- melt(Merged_Data, (id.vars=c("subject", "activity")))
tidy_data <- dcast(Data, subject + activity ~ variable, mean)
names(tidy_data)[-c(1:2)] <- paste("[mean of]", names(tidy_data)[-c(1:2)])
#6. Outputing a tidy data:
write.table(tidy_data, file = "./tidy_data.txt", sep=",")
|
#' this function of secondary growth model describe the evolution of the square root of the maximum specific growth rate (sqrtmumax) as a function of pH, This is a symetric cardinal pH model developped by Rosso & al.in 1995 with three parameters (pHmin, pHopt, muopt), obtained by fixing pHmax =2
#'
#' @param pH # a number
#' @param pHmin Minimal growth pH #a number
#' @param pHopt Optimal growth pH #a number
#' @param muopt Optimal growth rate # a number
#' @return sqrmumax^2= mumax #maximum growth rate # a number
#' @export
#'
#' @examples
Gamma_pH_3p <- function(pH,pHmin,muopt,pHopt)
{sqrtmumax<-sqrt(((pH >= pHmin) & (pH <=(2 * pHopt- pHmin))) * muopt
* (pH - pHmin) * (pH - ((2 * pHopt) - pHmin)) / ((pH - pHmin) * (pH - ((2 * pHopt) - pHmin)) - (pH - pHopt)^2))
return((sqrtmumax^2))
}
| /R/Gamma_pH_3p.R | no_license | Subhasishbasak/predictive-microbiology | R | false | false | 820 | r | #' this function of secondary growth model describe the evolution of the square root of the maximum specific growth rate (sqrtmumax) as a function of pH, This is a symetric cardinal pH model developped by Rosso & al.in 1995 with three parameters (pHmin, pHopt, muopt), obtained by fixing pHmax =2
#'
#' @param pH # a number
#' @param pHmin Minimal growth pH #a number
#' @param pHopt Optimal growth pH #a number
#' @param muopt Optimal growth rate # a number
#' @return sqrmumax^2= mumax #maximum growth rate # a number
#' @export
#'
#' @examples
Gamma_pH_3p <- function(pH,pHmin,muopt,pHopt)
{sqrtmumax<-sqrt(((pH >= pHmin) & (pH <=(2 * pHopt- pHmin))) * muopt
* (pH - pHmin) * (pH - ((2 * pHopt) - pHmin)) / ((pH - pHmin) * (pH - ((2 * pHopt) - pHmin)) - (pH - pHopt)^2))
return((sqrtmumax^2))
}
|
library(dplyr)
library(tidyr)
features <- read.table("UCI HAR Dataset/features.txt")
testfeaturevalues <- read.table("UCI HAR Dataset/test/X_test.txt")
testactivityid <- read.table("UCI HAR Dataset/test/y_test.txt")
testsubjectid <- read.table("UCI HAR Dataset/test/subject_test.txt")
trainfeaturevalues <- read.table("UCI HAR Dataset/train/X_train.txt")
trainactivityid <- read.table("UCI HAR Dataset/train/y_train.txt")
trainsubjectid <- read.table("UCI HAR Dataset/train/subject_train.txt")
testfeaturevalues <- testfeaturevalues %>% mutate(subjectid = testsubjectid[, 1], activityid = testactivityid$'V1') %>%
select(activityid, subjectid, 1:561)
trainfeaturevalues <- trainfeaturevalues %>% mutate(subjectid = trainsubjectid[, 1], activityid = trainactivityid$'V1') %>%
select(activityid, subjectid, 1:561)
joindata <- rbind(trainfeaturevalues, testfeaturevalues)
## this stores the indices of features that represent mean and standard deviation measurements
##from the 'features$V2' column. feature names that END with 'mean()' and 'std()' are taken
reqfeatures <- grep('mean\\()|std\\()', features$V2)
## in 'joindata' feature values start from third column since first two are subject and activity
##id; so 'reqfeatures' offset by 2. 'select' function from dplyr then used to select columns in
##'joindata' reperesenting mean and standard deviation measurements by using column indices
##values store in 'reqfeatures'
reqfeatures <- reqfeatures + 2
joindata <- joindata %>% select(1, 2, reqfeatures)
##names of columns 3 to 68 of joindata replaced by names of features they represent
reqfeatures0 <- reqfeatures - 2
names(joindata)[3:68] <- as.character(features$V2[reqfeatures0])
joindata$activityid <- gsub('1', 'walking', joindata$activityid)
joindata$activityid <- gsub('2', 'walkupstair', joindata$activityid)
joindata$activityid <- gsub('3', 'walkdownstair', joindata$activityid)
joindata$activityid <- gsub('4', 'sitting', joindata$activityid)
joindata$activityid <- gsub('5', 'standing', joindata$activityid)
joindata$activityid <- gsub('6', 'lying', joindata$activityid)
##features are spread across columns 3:563. They are all put in one column called 'features' and
##their values put in a column called 'values'. wide data made longer
joindata <- gather(joindata, feature, value, -(1:2))
##first character of all feature names is either 't' or 'f' showing whether its a time domain or
##frequency feature value. A '.' is placed after first character and feature name then seperated
##and a new column 'domain' added.
joindata$feature <- gsub('^([a-z]{1})', '\\1.\\2', joindata$feature)
joindata <- separate(joindata, feature, into = c('domain', 'feature'), sep = '\\.')
##some feature names have "body" repeated twice in their names. extra 'body' string removed
joindata$feature <- gsub('^BodyBody', 'Body', joindata$feature)
##"feature" column further seperated into two new columns 'variable' and 'direction' and split
##using character '-' as marker
joindata <- separate(joindata, feature, into = c('feature', 'variable', 'direction'), sep = '-')
##a "." is placed in 'feature' column components wherever a lower case and then an upper case
##character occur together. Reason is that within every component diferent feature names start
##with an upper case character. then a split is made into columns 'accelerationtype',
##'instrument', 'jerk', 'euclideanmag' using the "." as a marker
joindata$feature <- gsub('(+[a-z])([A-Z]+)', '\\1.\\2', joindata$feature)
joindata <- separate(joindata, feature, into = c('accelerationtype', 'instrument', 'jerk', 'euclideanmag'), sep = '\\.')
##in the features where there was no Jerk but only Mag, the Mag got placed in 'jerk' column
##because of the way we seperated the features column. The two lines below first place the Mag
##back in 'euclideanmag' column and then place NA in coresponding locations in 'jerk' column
joindata$euclideanmag[joindata$jerk == 'Mag'] <- "Mag"
joindata$jerk <- gsub('^Mag', NA, joindata$jerk)
## two changes in domain column replacing 't' and 'f' by Time and Freq
joindata$domain <- gsub('t', 'Time', joindata$domain)
joindata$domain <- gsub('f', 'Freq', joindata$domain)
##accelerometer readings are of two types 'body' or 'gravity'. gyrometer has no acceleration
##typebut the way we have seperated features into columns 'body' appears in 'accelerationtype'
##columnof gyrometer readings. So indices of rows representing gyrometer readings are stored in
##'gyrolocations' vector and in those rows accelerationtype set to "NA"
gyrolocations <- grep("Gyro", joindata$instrument)
joindata$accelerationtype[gyrolocations] <- NA
##adjustments in instrument column
joindata$instrument <- gsub('Acc', 'Accelerometer', joindata$instrument)
joindata$instrument <- gsub('Gyro', 'Gyrometer', joindata$instrument)
##adjustments in variable column
joindata$variable <- gsub('mean\\()', 'Mean', joindata$variable)
joindata$variable <- gsub('std\\()', 'SD', joindata$variable)
tidy_data <- joindata %>% group_by(activityid, subjectid, domain, accelerationtype, instrument,
jerk, euclideanmag, variable, direction) %>%
summarize(occurance = n(), average = mean(value))
tidy_data$average <- round(tidy_data$average, 5)
write.table(tidy_data,"tidyData.txt",quote = FALSE, sep="\t\t", col.names = NA)
| /run_analysis.r | no_license | infinity73/Course-Project---Getting-and-Cleaning-Data | R | false | false | 5,502 | r | library(dplyr)
library(tidyr)
features <- read.table("UCI HAR Dataset/features.txt")
testfeaturevalues <- read.table("UCI HAR Dataset/test/X_test.txt")
testactivityid <- read.table("UCI HAR Dataset/test/y_test.txt")
testsubjectid <- read.table("UCI HAR Dataset/test/subject_test.txt")
trainfeaturevalues <- read.table("UCI HAR Dataset/train/X_train.txt")
trainactivityid <- read.table("UCI HAR Dataset/train/y_train.txt")
trainsubjectid <- read.table("UCI HAR Dataset/train/subject_train.txt")
testfeaturevalues <- testfeaturevalues %>% mutate(subjectid = testsubjectid[, 1], activityid = testactivityid$'V1') %>%
select(activityid, subjectid, 1:561)
trainfeaturevalues <- trainfeaturevalues %>% mutate(subjectid = trainsubjectid[, 1], activityid = trainactivityid$'V1') %>%
select(activityid, subjectid, 1:561)
joindata <- rbind(trainfeaturevalues, testfeaturevalues)
## this stores the indices of features that represent mean and standard deviation measurements
##from the 'features$V2' column. feature names that END with 'mean()' and 'std()' are taken
reqfeatures <- grep('mean\\()|std\\()', features$V2)
## in 'joindata' feature values start from third column since first two are subject and activity
##id; so 'reqfeatures' offset by 2. 'select' function from dplyr then used to select columns in
##'joindata' reperesenting mean and standard deviation measurements by using column indices
##values store in 'reqfeatures'
reqfeatures <- reqfeatures + 2
joindata <- joindata %>% select(1, 2, reqfeatures)
##names of columns 3 to 68 of joindata replaced by names of features they represent
reqfeatures0 <- reqfeatures - 2
names(joindata)[3:68] <- as.character(features$V2[reqfeatures0])
joindata$activityid <- gsub('1', 'walking', joindata$activityid)
joindata$activityid <- gsub('2', 'walkupstair', joindata$activityid)
joindata$activityid <- gsub('3', 'walkdownstair', joindata$activityid)
joindata$activityid <- gsub('4', 'sitting', joindata$activityid)
joindata$activityid <- gsub('5', 'standing', joindata$activityid)
joindata$activityid <- gsub('6', 'lying', joindata$activityid)
##features are spread across columns 3:563. They are all put in one column called 'features' and
##their values put in a column called 'values'. wide data made longer
joindata <- gather(joindata, feature, value, -(1:2))
##first character of all feature names is either 't' or 'f' showing whether its a time domain or
##frequency feature value. A '.' is placed after first character and feature name then seperated
##and a new column 'domain' added.
joindata$feature <- gsub('^([a-z]{1})', '\\1.\\2', joindata$feature)
joindata <- separate(joindata, feature, into = c('domain', 'feature'), sep = '\\.')
##some feature names have "body" repeated twice in their names. extra 'body' string removed
joindata$feature <- gsub('^BodyBody', 'Body', joindata$feature)
##"feature" column further seperated into two new columns 'variable' and 'direction' and split
##using character '-' as marker
joindata <- separate(joindata, feature, into = c('feature', 'variable', 'direction'), sep = '-')
##a "." is placed in 'feature' column components wherever a lower case and then an upper case
##character occur together. Reason is that within every component diferent feature names start
##with an upper case character. then a split is made into columns 'accelerationtype',
##'instrument', 'jerk', 'euclideanmag' using the "." as a marker
joindata$feature <- gsub('(+[a-z])([A-Z]+)', '\\1.\\2', joindata$feature)
joindata <- separate(joindata, feature, into = c('accelerationtype', 'instrument', 'jerk', 'euclideanmag'), sep = '\\.')
##in the features where there was no Jerk but only Mag, the Mag got placed in 'jerk' column
##because of the way we seperated the features column. The two lines below first place the Mag
##back in 'euclideanmag' column and then place NA in coresponding locations in 'jerk' column
joindata$euclideanmag[joindata$jerk == 'Mag'] <- "Mag"
joindata$jerk <- gsub('^Mag', NA, joindata$jerk)
## two changes in domain column replacing 't' and 'f' by Time and Freq
joindata$domain <- gsub('t', 'Time', joindata$domain)
joindata$domain <- gsub('f', 'Freq', joindata$domain)
##accelerometer readings are of two types 'body' or 'gravity'. gyrometer has no acceleration
##typebut the way we have seperated features into columns 'body' appears in 'accelerationtype'
##columnof gyrometer readings. So indices of rows representing gyrometer readings are stored in
##'gyrolocations' vector and in those rows accelerationtype set to "NA"
gyrolocations <- grep("Gyro", joindata$instrument)
joindata$accelerationtype[gyrolocations] <- NA
##adjustments in instrument column
joindata$instrument <- gsub('Acc', 'Accelerometer', joindata$instrument)
joindata$instrument <- gsub('Gyro', 'Gyrometer', joindata$instrument)
##adjustments in variable column
joindata$variable <- gsub('mean\\()', 'Mean', joindata$variable)
joindata$variable <- gsub('std\\()', 'SD', joindata$variable)
tidy_data <- joindata %>% group_by(activityid, subjectid, domain, accelerationtype, instrument,
jerk, euclideanmag, variable, direction) %>%
summarize(occurance = n(), average = mean(value))
tidy_data$average <- round(tidy_data$average, 5)
write.table(tidy_data,"tidyData.txt",quote = FALSE, sep="\t\t", col.names = NA)
|
#' Recode factors, keeping only most frequent levels
#'
#' This function is a generic, with methods for `factor` and `character`
#' objects. It lists all unique values in the input, ranks them from the most to
#' the least frequent, and keeps the top `n` values. Other values are replaced
#' by the chosen replacement. Under the hood, this uses [forcats::fct_lump()]
#' and [forcats::fct_recode()].
#'
#' @author Thibaut Jombart, Zhian N. Kamvar
#'
#' @export
#'
#' @param x a `factor` or a `character` vector
#'
#' @param n the number of levels or values to keep
#'
#' @param replacement a single value to replace the less frequent values with
#'
#' @param ... further arguments passed to [forcats::fct_lump()].
#'
#' @examples
#'
#' ## make toy data
#' x <- sample(letters[1:10], 100, replace = TRUE)
#' sort(table(x), decreasing = TRUE)
#'
#' ## keep top values
#' top_values(x, 2) # top 2
#' top_values(x, 2, NA) # top 3, replace with NA
#' top_values(x, 0) # extreme case, keep nothing
top_values <- function(x, n, ...) {
UseMethod("top_values")
}
#' @export
#' @rdname top_values
top_values.default <- function(x, n, ...) {
class_x <- paste(class(x), collapse = ", ")
msg <- sprintf("top_values has no method for the class: %s",
class_x)
stop(msg)
}
#' @export
#' @rdname top_values
#' @importFrom forcats fct_lump
top_values.factor <- function(x, n, replacement = "other", ...) {
# check if the replacement is missing... fct_lump doesn't like other_level = NA
other_is_missing <- is.na(replacement)
# use a unique level for the other to avoid overwriting any levels.
other <- if (other_is_missing) sprintf("other%s", Sys.time()) else replacement
# do the work
out <- forcats::fct_lump(x, n = n, other_level = other, ...)
# remove the "other" if other is missing
if (other_is_missing) {
out <- forcats::fct_recode(out, NULL = other)
}
out
}
#' @export
#' @rdname top_values
top_values.character <- function(x, n, replacement = "other", ...) {
# convert to factor, filter, and return as a character again
as.character(top_values(factor(x), n = n, replacement = replacement, ...))
}
| /R/top_values.R | no_license | scottyaz/linelist | R | false | false | 2,168 | r | #' Recode factors, keeping only most frequent levels
#'
#' This function is a generic, with methods for `factor` and `character`
#' objects. It lists all unique values in the input, ranks them from the most to
#' the least frequent, and keeps the top `n` values. Other values are replaced
#' by the chosen replacement. Under the hood, this uses [forcats::fct_lump()]
#' and [forcats::fct_recode()].
#'
#' @author Thibaut Jombart, Zhian N. Kamvar
#'
#' @export
#'
#' @param x a `factor` or a `character` vector
#'
#' @param n the number of levels or values to keep
#'
#' @param replacement a single value to replace the less frequent values with
#'
#' @param ... further arguments passed to [forcats::fct_lump()].
#'
#' @examples
#'
#' ## make toy data
#' x <- sample(letters[1:10], 100, replace = TRUE)
#' sort(table(x), decreasing = TRUE)
#'
#' ## keep top values
#' top_values(x, 2) # top 2
#' top_values(x, 2, NA) # top 3, replace with NA
#' top_values(x, 0) # extreme case, keep nothing
top_values <- function(x, n, ...) {
UseMethod("top_values")
}
#' @export
#' @rdname top_values
top_values.default <- function(x, n, ...) {
class_x <- paste(class(x), collapse = ", ")
msg <- sprintf("top_values has no method for the class: %s",
class_x)
stop(msg)
}
#' @export
#' @rdname top_values
#' @importFrom forcats fct_lump
top_values.factor <- function(x, n, replacement = "other", ...) {
# check if the replacement is missing... fct_lump doesn't like other_level = NA
other_is_missing <- is.na(replacement)
# use a unique level for the other to avoid overwriting any levels.
other <- if (other_is_missing) sprintf("other%s", Sys.time()) else replacement
# do the work
out <- forcats::fct_lump(x, n = n, other_level = other, ...)
# remove the "other" if other is missing
if (other_is_missing) {
out <- forcats::fct_recode(out, NULL = other)
}
out
}
#' @export
#' @rdname top_values
top_values.character <- function(x, n, replacement = "other", ...) {
# convert to factor, filter, and return as a character again
as.character(top_values(factor(x), n = n, replacement = replacement, ...))
}
|
# seqsetvis vignette --------------------------------------------------------
## ----load seqsetvis, message=FALSE-----------------------------------------
library(seqsetvis)
## ----load optional libs, message = FALSE-----------------------------------
library(GenomicRanges)
library(data.table)
library(cowplot)
theme_set(cowplot::theme_cowplot())
## ----overlap basic---------------------------------------------------------
olaps = ssvOverlapIntervalSets(CTCF_in_10a_narrowPeak_grs)
head(olaps)
## ----overlap GRangesList---------------------------------------------------
olaps_fromGRangesList = ssvOverlapIntervalSets(
GenomicRanges::GRangesList(CTCF_in_10a_narrowPeak_grs))
## ----ssvMakeMembTable basic------------------------------------------------
head(ssvMakeMembTable(olaps))
## ----ssvMakeMembTable numeric----------------------------------------------
my_set_list = list(1:3, 2:3, 3:6)
ssvMakeMembTable(my_set_list)
## ----ssvMakeMembTable named numeric----------------------------------------
names(my_set_list) = c("first", "second", "third")
ssvMakeMembTable(my_set_list)
## ----ssvMakeMembTable character--------------------------------------------
my_set_list_char = lapply(my_set_list, function(x)letters[x])
ssvMakeMembTable(my_set_list_char)
## ----barplot, fig.width=4, fig.height=3------------------------------------
ssvFeatureBars(olaps)
## ----pie, fig.width=5, fig.height=3----------------------------------------
ssvFeaturePie(olaps)
## ----venn, fig.width=4, fig.height=3---------------------------------------
ssvFeatureVenn(olaps)
## ----euler, fig.width=4, fig.height=3--------------------------------------
ssvFeatureEuler(olaps)
## ----binary heatmap, fig.width=3, fig.height=4-----------------------------
ssvFeatureBinaryHeatmap(olaps)
## ----ssvFetchBigwig, eval = FALSE------------------------------------------
bigwig_files = c(
system.file("extdata", "MCF10A_CTCF_FE_random100.bw",
package = "seqsetvis"),
system.file("extdata", "MCF10AT1_CTCF_FE_random100.bw",
package = "seqsetvis"),
system.file("extdata", "MCF10CA1_CTCF_FE_random100.bw",
package = "seqsetvis")
)
names(bigwig_files) = sub("_FE_random100.bw", "", basename(bigwig_files))
# names(bigwig_files) = letters[1:3]
olap_gr = CTCF_in_10a_overlaps_gr
target_size = quantile(width(olap_gr), .75)
window_size = 50
target_size = round(target_size / window_size) * window_size
olap_gr = resize(olap_gr, target_size, fix = "center")
bw_gr = ssvFetchBigwig(bigwig_files, olap_gr, win_size = window_size)
bw_gr
## --------------------------------------------------------------------------
olap_gr = CTCF_in_10a_overlaps_gr
bw_gr = CTCF_in_10a_profiles_gr
## ----factorize-------------------------------------------------------------
olap_groups = ssvFactorizeMembTable(mcols(olap_gr))
## ----lineplot basic, fig.width=6, fig.height=2.5---------------------------
# facet labels will display better if split into multiple lines
bw_gr$facet_label = sub("_", "\n", bw_gr$sample)
ssvSignalLineplot(bw_data = subset(bw_gr, id %in% 1:12), facet_ = "facet_label")
## ----lineplot region facet, fig.width=5, fig.height=3----------------------
ssvSignalLineplot(bw_data = subset(bw_gr, id %in% 1:4), facet_ = "id")
## ----lineplot aggregated, fig.width=5, fig.height=2------------------------
ssvSignalLineplotAgg(bw_data = bw_gr)
## ----lineplot aggregated smoothed, fig.width=5, fig.height=2---------------
ssvSignalLineplotAgg(bw_data = bw_gr, spline_n = 10)
## ----lineplot--------------------------------------------------------------
# append set info, modify aggregation group_ and add facet
olap_2groups = ssvFactorizeMembTable(ssvMakeMembTable(olap_gr)[, 1:2])
grouped_gr = GRanges(merge(bw_gr, olap_2groups))
grouped_gr = subset(grouped_gr, sample %in% c("MCF10A_CTCF", "MCF10AT1_CTCF"))
ssvSignalLineplotAgg(bw_data = grouped_gr, spline_n = 10,
group_ = c("sample", "group")) +
facet_wrap("group", ncol = 2) +
labs(title = "Aggregated by peak call set", y = "FE", x = "bp from center")
## ----scatterplot basic, fig.width=3, fig.height=3--------------------------
ssvSignalScatterplot(bw_gr, x_name = "MCF10A_CTCF", y_name = "MCF10AT1_CTCF")
## ----scatterplot all sets, fig.width=8, fig.height=3-----------------------
ssvSignalScatterplot(bw_gr, x_name = "MCF10A_CTCF", y_name = "MCF10AT1_CTCF",
color_table = olap_groups)
## ----scatterplot 2 sets, fig.width=6, fig.height=3-------------------------
# by subsetting the matrix returned by ssvMakeMembTable() we have a lot of
# control over the coloring.
olap_2groups = ssvFactorizeMembTable(ssvMakeMembTable(olap_gr)[, 1:2])
ssvSignalScatterplot(bw_gr, x_name = "MCF10A_CTCF", y_name = "MCF10AT1_CTCF",
color_table = olap_2groups)
## ----outside group, fig.width=5, fig.height=3------------------------------
olap_OutGroups = ssvFactorizeMembTable(
ssvMakeMembTable(olap_gr)[, 3, drop = FALSE])
ssvSignalScatterplot(bw_gr,
x_name = "MCF10A_CTCF",
y_name = "MCF10AT1_CTCF",
color_table = olap_OutGroups)
## ----scatterplot facet, fig.width=6, fig.height=4--------------------------
#tweaking group description will clean up plot labels a lot
olap_groups$group = gsub("_CTCF", "", olap_groups$group)
olap_groups$group = gsub(" & ", "\n", olap_groups$group)
ssvSignalScatterplot(bw_gr, x_name = "MCF10A_CTCF", y_name = "MCF10AT1_CTCF",
color_table = olap_groups) +
facet_wrap("group") + guides(color = "none") +
theme_linedraw()
## ----banded quantiles------------------------------------------------------
ssvSignalBandedQuantiles(bw_gr, by_ = "sample", hsv_grayscale = TRUE,
hsv_symmetric = TRUE,
hsv_reverse = TRUE)
## ----heatmap basic, message=FALSE, fig.width=5-----------------------------
ssvSignalHeatmap(bw_gr, nclust = 3, facet_ = "facet_label")
## ----heatmap perform pre-clustering----------------------------------------
bw_clust = ssvSignalClustering(bw_gr, nclust = 3)
bw_clust
## ----heatmap cluster selection---------------------------------------------
subset(bw_clust, cluster_id == 3)
## ----heatmap use pre-cluster, message=FALSE, fig.width=5-------------------
ssvSignalHeatmap(bw_clust, facet_ = "facet_label")
## ----setup np_files bw_files-----------------------------------------------
pkgdata_path = system.file("extdata",
package = "seqsetvis")
cache_path = paste0(pkgdata_path, "/.cache")
# the next line is enough to initialize the cache
# BiocFileCache(cache = cache_path)
use_full_data = dir.exists(cache_path) & require(BiocFileCache)
use_full_data = FALSE
if(use_full_data){
library(BiocFileCache)
ssv_bfc = BiocFileCache(cache = cache_path)
bw_files = vapply(seq_along(CTCF_in_10a_bigWig_urls), function(i){
rname = paste(names(CTCF_in_10a_bigWig_urls)[i],
"bigwig",
sep = ",")
fpath = CTCF_in_10a_bigWig_urls[i]
#bfcrpath calls bfcadd() if necessary and returns file path
bfcrpath(ssv_bfc, rname = rname, fpath = fpath)
}, "char")
names(bw_files) = names(CTCF_in_10a_bigWig_urls)
np_files = vapply(seq_along(CTCF_in_10a_narrowPeak_urls), function(i){
rname = paste(names(CTCF_in_10a_narrowPeak_urls)[i],
"narrowPeak",
sep = ",")
fpath = CTCF_in_10a_narrowPeak_urls[i]
#bfcrpath calls bfcadd() if necessary and returns file path
bfcrpath(ssv_bfc, rname = rname, fpath = fpath)
}, "a")
names(np_files) = names(CTCF_in_10a_narrowPeak_urls)
}else{
bw_files = vapply(c("MCF10A_CTCF", "MCF10AT1_CTCF", "MCF10CA1_CTCF"),
function(x){
system.file("extdata", paste0(x, "_FE_random100.bw"),
package = "seqsetvis")
}, "char")
# set filepaths
np_files = c(
system.file("extdata", "MCF10A_CTCF_random100.narrowPeak",
package = "seqsetvis"),
system.file("extdata", "MCF10AT1_CTCF_random100.narrowPeak",
package = "seqsetvis"),
system.file("extdata", "MCF10CA1_CTCF_random100.narrowPeak",
package = "seqsetvis")
)
names(np_files) = sub("_random100.narrowPeak", "",
x = basename(np_files))
}
## ----load package narrowPeak-----------------------------------------------
# load peak calls
np_grs = easyLoad_narrowPeak(np_files)
## ----overlap peaks---------------------------------------------------------
olaps = ssvOverlapIntervalSets(np_grs)
## ----ctcf fig1,hold=TRUE, fig.align='center', fig.height=4, fig.width = 8----
p_bars = ssvFeatureBars(olaps, show_counts = FALSE) +
theme(legend.position = "left")
p_bars = p_bars + theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
legend.justification = "center") +
labs(fill = "cell line")
p_venn = ssvFeatureVenn(olaps, counts_txt_size = 4) +
guides(fill = "none", color = "none")
p_euler = ssvFeatureEuler(olaps) +
guides(fill = "none", color = "none")
cowplot::ggdraw() +
cowplot::draw_plot(p_bars, x = 0, y = 0, width = .4, height = .7) +
cowplot::draw_plot(p_venn, x = .4, y = .1, width = .3, height = .7) +
cowplot::draw_plot(p_euler, x = 0.7, y = .1, width = 0.3, height = .7) +
cowplot::draw_plot_label(c("CTCF binding in breast cancer cell lines",
"A", "B", "C"),
x = c(.04, .17, 0.4, .7),
y = c(.92, .75, .75, .75), size = 10, hjust = 0)
## ----color change, eval=FALSE,hold = TRUE, fig.align='center', fig.height=4, fig.width = 8----
# col_vals = c("MCF10A_CTCF" = 'red',
# "MCF10AT1_CTCF" = "blue",
# "MCF10CA1_CTCF" = "green")
# sf = scale_fill_manual(values = col_vals)
# sc = scale_color_manual(values = col_vals)
# cowplot::ggdraw() +
# cowplot::draw_plot(p_bars + sf,
# x = 0, y = 0,
# width = .4, height = .7) +
# cowplot::draw_plot(p_venn + sf + sc,
# x = .4, y = .1,
# width = .3, height = .7) +
# cowplot::draw_plot(p_euler + sf + sc,
# x = 0.7, y = .1,
# width = 0.3, height = .7) +
# cowplot::draw_plot_label(c("CTCF binding in breast cancer cell lines",
# "A", "B", "C"),
# x = c(.04, .17, 0.4, .7),
# y = c(.92, .75, .75, .75), size = 10, hjust = 0)
## ----load ChIPpeakAnno, message=FALSE--------------------------------------
library(ChIPpeakAnno)
data(TSS.human.GRCh38)
macs.anno <- annotatePeakInBatch(olaps, AnnotationData=TSS.human.GRCh38)
## ----distance filter-------------------------------------------------------
macs.anno = subset(macs.anno, distancetoFeature < 1000)
## ----subset 1--------------------------------------------------------------
subset(macs.anno, MCF10AT1_CTCF & MCF10A_CTCF & !MCF10CA1_CTCF)$feature
## ----subset 2--------------------------------------------------------------
subset(macs.anno, MCF10A_CTCF & !MCF10AT1_CTCF & !MCF10CA1_CTCF)$feature
## ----set fixed width, fig.height=3, fig.width=3----------------------------
window_size = 50
width_q75 = quantile(width(olaps), .75)
width_q75 = ceiling(width_q75 / window_size) * window_size
hist_res = hist(width(olaps))
lines(rep(width_q75, 2), c(0, max(hist_res$counts)), col = "red", lwd = 5)
text(width_q75, max(hist_res$counts), "fixedSize", adj = c(-.1, 1), col = "red")
## apply width
olaps_fixedSize = centerFixedSizeGRanges(olaps, width_q75)
## ----fetch package bw------------------------------------------------------
if(use_full_data){
bw_gr = ssvFetchBigwig(file_paths = bw_files,
qgr = olaps_fixedSize,
win_size = 50)
}
## ----ctcf scatterplots, fig.width=10, fig.height=4, message=FALSE----------
# shortening colnames will make group names less cumbersome in plot legend
colnames(mcols(olaps_fixedSize)) = sub("_CTCF", "",
colnames(mcols(olaps_fixedSize)))
all_groups = levels(ssvFactorizeMembTable(
ssvMakeMembTable(olaps_fixedSize))$group)
all_colors = RColorBrewer::brewer.pal(length(all_groups), "Set1")
all_colors[5:7] = safeBrew(3, "Dark2")
names(all_colors) = all_groups
olap_groups_12 = ssvFactorizeMembTable(
ssvMakeMembTable(olaps_fixedSize)[, 1:2])
p_12 = ssvSignalScatterplot(bw_gr,
x_name = "MCF10A_CTCF",
y_name = "MCF10AT1_CTCF",
color_table = olap_groups_12) +
scale_color_manual(values = all_colors)
olap_groups_13 = ssvFactorizeMembTable(
ssvMakeMembTable(olaps_fixedSize)[, c(1,3)])
p_13 = ssvSignalScatterplot(bw_gr,
x_name = "MCF10A_CTCF",
y_name = "MCF10CA1_CTCF",
color_table = olap_groups_13) +
scale_color_manual(values = all_colors)
if(use_full_data){
tp_12 = p_12 + scale_size_continuous(range = .1) +
scale_alpha_continuous(range = .1) +
geom_density2d(aes(color = group), h = 40, bins = 3)
tp_13 = p_13 + scale_size_continuous(range = .1) +
scale_alpha_continuous(range = .1) +
geom_density2d(aes(color = group), h = 40, bins = 3)
cowplot::plot_grid(tp_12 + labs(title = ""),
tp_13 + labs(title = ""),
label_y = .85, labels = "AUTO")
}else{
cowplot::plot_grid(p_12 + labs(title = ""),
p_13 + labs(title = ""),
label_y = .85, labels = "AUTO")
}
## ----ctcf heatmap, message=FALSE, fig.width=5------------------------------
bw_gr$facet_label = sub("_", "\n", bw_gr$sample)
clust_gr = ssvSignalClustering(bw_gr, nclust = 3, facet_ = "facet_label")
ssvSignalHeatmap(clust_gr, facet_ = "facet_label") + labs(fill = "FE",
y = "region",
x = "bp from center")
## ----ctcf recentered heatmap, message = FALSE, fig.width=10, fig.height=6----
center_gr = centerAtMax(clust_gr, view_size = 150,
by_ = "id", check_by_dupes = FALSE)
p_center_hmap = ssvSignalHeatmap(center_gr, facet_ = "facet_label") +
labs(fill = "FE",
y = "region",
x = "bp from center")
## since center_gr still retains clustering information, clustering is not
## repeated by default, the following reclusters the data.
clust_center_gr = ssvSignalClustering(center_gr, nclust = 3)
p_center_hmap_reclust = ssvSignalHeatmap(clust_center_gr,
facet_ = "facet_label") +
labs(fill = "FE",
y = "region",
x = "bp from center")
cowplot::plot_grid(p_center_hmap + labs(title = "original clustering"),
p_center_hmap_reclust + labs(title = "reclustered"))
## ----cluster annotation----------------------------------------------------
clust_df = as.data.frame(mcols(clust_gr))
clust_df = unique(clust_df[,c("id", "cluster_id")])
olap_clust_annot = olap_gr
mcols(olap_clust_annot) = data.frame(id = seq_along(olap_clust_annot))
olap_clust_annot = GRanges(merge(olap_clust_annot, clust_df))
olap_clust_annot = subset(olap_clust_annot, cluster_id %in% 1:2)
olap_clust_annot <- annotatePeakInBatch(olap_clust_annot,
AnnotationData=TSS.human.GRCh38)
olap_clust_annot$feature
## --------------------------------------------------------------------------
target_data = "MCF10A_CTCF"
chmm_win = 200 #window size is an important chromHMM parameter.
# 200 is the default window size and matches the state segementation
if(use_full_data){
# set all file paths
chmm_bw_file = bfcrpath(ssv_bfc, rnames = paste(target_data, "bigwig",
sep = ","))
chmm_np_file = bfcrpath(ssv_bfc, rnames = paste(target_data, "narrowPeak",
sep = ","))
chmm_seg_file = bfcrpath(ssv_bfc, rnames = "MCF7,segmentation",
fpath = chromHMM_demo_segmentation_url)
query_chain = bfcquery(ssv_bfc, "hg19ToHg38,chain")
if(nrow(query_chain) == 0){
chain_hg19ToHg38_gz = bfcrpath(ssv_bfc, rnames = "hg19ToHg38,gz",
fpath = chromHMM_demo_chain_url)
ch_raw = readLines(gzfile(chain_hg19ToHg38_gz))
ch_file = bfcnew(ssv_bfc, rname = "hg19ToHg38,chain")
writeLines(ch_raw, con = ch_file)
}
chmm_chain_file = bfcrpath(ssv_bfc, rnames = "hg19ToHg38,chain")
ch = rtracklayer::import.chain(chmm_chain_file)
# load segmentation data
chmm_gr = rtracklayer::import.bed(chmm_seg_file)
#cleanup state names.
chmm_gr$name = gsub("\\+", " and ", chmm_gr$name)
chmm_gr$name = gsub("_", " ", chmm_gr$name)
#setup color to state mapping
colDF = unique(mcols(chmm_gr)[c("name", "itemRgb")])
state_colors = colDF$itemRgb
names(state_colors) = colDF$name
#liftover states from hg19 to hg38
ch = rtracklayer::import.chain(chmm_chain_file)
chmm_gr_hg38 = rtracklayer::liftOver(chmm_gr, ch)
chmm_gr_hg38 = unlist(chmm_gr_hg38)
chmm_grs_list = as.list(GenomicRanges::split(chmm_gr_hg38,
chmm_gr_hg38$name))
#transform narrowPeak ranges to summit positions
chmm_np_grs = easyLoad_narrowPeak(chmm_np_file, file_names = target_data)
chmm_summit_grs = lapply(chmm_np_grs, function(x){
start(x) = start(x) + x$relSummit
end(x) = start(x)
x
})
qlist = append(chmm_summit_grs[1], chmm_grs_list)
chmm_olaps = ssvOverlapIntervalSets(qlist, use_first = TRUE)
#discard the columns for peak call and no_hit, not informative here.
mcols(chmm_olaps)[[1]] = NULL
chmm_olaps$no_hit = NULL
#total width of genome assigned each state
state_total_widths = sapply(chmm_grs_list, function(my_gr){
sum(as.numeric(width(my_gr)))
})
#Expand state regions into 200 bp windows.
state_wingrs = lapply(chmm_grs_list, function(my_gr){
st = my_gr$name[1]
wgr = unlist(slidingWindows(my_gr, chmm_win, chmm_win))
wgr$state = st
wgr
})
state_wingrs = unlist(GRangesList(state_wingrs))
# fetch bw data for each state
# it probably isn't useful to grab every single window for each state
# so we can cap the number of each state carried forward
max_per_state = 5000
# flank size zooms out a bit from each chromHMM window
flank_size = 400
state_split = split(state_wingrs, state_wingrs$state)
state_split = lapply(state_split, function(my_gr){
samp_gr = sample(my_gr, min(length(my_gr), max_per_state))
samp_gr = sort(samp_gr)
names(samp_gr) = seq_along(samp_gr)
samp_gr
})
state_gr = unlist(GRangesList(state_split))
state_gr = resize(state_gr, width = chmm_win + 2 * flank_size,
fix = "center")
bw_states_gr = ssvFetchBigwig(file_paths = chmm_bw_file,
qgr = state_gr,
win_size = 50)
bw_states_gr$grp = sub("\\..+", "", bw_states_gr$id)
bw_states_gr$grp_id = sub(".+\\.", "", bw_states_gr$id)
}else{
max_per_state = 20
flank_size = 400
state_colors = chromHMM_demo_state_colors
bw_states_gr = chromHMM_demo_bw_states_gr
chmm_olaps = chromHMM_demo_overlaps_gr
state_total_widths = chromHMM_demo_state_total_widths
}
## ----state raw, message = FALSE, fig.width=3, fig.height=3-----------------
olaps_df = as.data.frame(mcols(chmm_olaps))
colnames(olaps_df) = gsub("\\.", " ", colnames(olaps_df))
p_state_raw_count = ssvFeatureBars(olaps_df, show_counts = FALSE) +
labs(fill = "state", x = "") +
scale_fill_manual(values = state_colors) +
theme_cowplot() + guides(fill = "none") +
theme(axis.text.x = element_text(angle = 90, size = 8,
hjust = 1, vjust = .5))
p_state_raw_count
## ----state enrichment, fig.width=3, fig.height=3---------------------------
state_width_fraction = state_total_widths / sum(state_total_widths)
state_peak_hits = colSums(olaps_df)
state_peak_fraction = state_peak_hits / sum(state_peak_hits)
enrichment = state_peak_fraction /
state_width_fraction[names(state_peak_fraction)]
enrich_df = data.frame(state = names(enrichment), enrichment = enrichment)
p_state_enrichment = ggplot(enrich_df) +
geom_bar(aes(x = state, fill = state, y = enrichment), stat = "identity") +
labs(x = "") +
theme_cowplot() + guides(fill = "none") +
scale_fill_manual(values = state_colors) +
theme(axis.text.x = element_text(angle = 90, size = 8,
hjust = 1, vjust = .5))
p_state_enrichment
## ---- message=FALSE, fig.width=6-------------------------------------------
p_agg_tracks = ssvSignalLineplotAgg(bw_states_gr,
sample_ = "grp",
color_ = "grp")
gb = ggplot2::ggplot_build(p_agg_tracks)
yrng = range(gb$data[[1]]$y)
p_agg_tracks = p_agg_tracks +
scale_color_manual(values = state_colors) +
annotate("line", x = rep(-chmm_win/2, 2), y = yrng) +
annotate("line", x = rep(chmm_win/2, 2), y = yrng) +
labs(y = "FE", x = "bp", color = "state",
title = paste("Average FE by state,", target_data),
subtitle = paste("states sampled to",
max_per_state,
chmm_win,
"bp windows each\n",
flank_size,
"bp flanking each side")) +
theme(plot.title = element_text(hjust = 0))
p_agg_tracks
## ----state heatmap, fig.width=8--------------------------------------------
pdt = as.data.table(mcols(bw_states_gr))
pdt$grp_id = as.integer(pdt$grp_id)
# reassign grp_id to sort within each state set
dt_list = lapply(unique(pdt$grp), function(state){
dt = pdt[grp == state]
dtmax = dt[, .(ymax = y[which(x == x[order(abs(x))][1])]), by = grp_id]
dtmax = dtmax[order(ymax, decreasing = TRUE)]
dtmax[, grp_o := seq_len(.N)]
dtmax$ymax = NULL
dt = merge(dtmax, dt)
dt[, grp_id := grp_o]
dt$grp_o = NULL
dt
})
# reassemble
pdt = rbindlist(dt_list)
# heatmap facetted by state and sorted in decreasing order
p_state_hmap = ggplot(pdt) +
geom_raster(aes(x = x, y = grp_id, fill = y)) +
scale_y_reverse() +
facet_wrap("grp", nrow = 2) +
theme(axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.line.y = element_blank(),
axis.text.x = element_text(angle = 90, hjust = 1, vjust = .5),
strip.text = element_text(size= 8)) +
scale_fill_gradientn(colors = c("white", "orange", "red")) +
labs(y = "", fill = "FE",
x = "bp from local summit",
title = paste(max_per_state, "random regions per state"))
p_state_hmap
## ---- fig.height=12, fig.width=8-------------------------------------------
bar_height = .25
line_height = .3
ggdraw() +
draw_plot(p_state_raw_count + guides(fill = "none"),
x = 0,
y = 1 - bar_height,
width = .5,
height = bar_height) +
draw_plot(p_state_enrichment + guides(fill = "none"),
x = .5,
y = 1 - bar_height,
width = .5,
height = bar_height) +
draw_plot(p_agg_tracks,
x = 0,
y = 1 - bar_height - line_height,
width = 1,
height = line_height) +
draw_plot(p_state_hmap,
x = 0,
y = 0,
width = 1,
height = 1 - bar_height - line_height) +
draw_plot_label(LETTERS[1:4],
c(0, 0.48, 0, 0),
c(1, 1, 1 - bar_height, 1 - bar_height - line_height),
size = 15)
| /learn/seqsetvis.R | no_license | tsoleary/rna_seq | R | false | false | 24,006 | r | # seqsetvis vignette --------------------------------------------------------
## ----load seqsetvis, message=FALSE-----------------------------------------
library(seqsetvis)
## ----load optional libs, message = FALSE-----------------------------------
library(GenomicRanges)
library(data.table)
library(cowplot)
theme_set(cowplot::theme_cowplot())
## ----overlap basic---------------------------------------------------------
olaps = ssvOverlapIntervalSets(CTCF_in_10a_narrowPeak_grs)
head(olaps)
## ----overlap GRangesList---------------------------------------------------
olaps_fromGRangesList = ssvOverlapIntervalSets(
GenomicRanges::GRangesList(CTCF_in_10a_narrowPeak_grs))
## ----ssvMakeMembTable basic------------------------------------------------
head(ssvMakeMembTable(olaps))
## ----ssvMakeMembTable numeric----------------------------------------------
my_set_list = list(1:3, 2:3, 3:6)
ssvMakeMembTable(my_set_list)
## ----ssvMakeMembTable named numeric----------------------------------------
names(my_set_list) = c("first", "second", "third")
ssvMakeMembTable(my_set_list)
## ----ssvMakeMembTable character--------------------------------------------
my_set_list_char = lapply(my_set_list, function(x)letters[x])
ssvMakeMembTable(my_set_list_char)
## ----barplot, fig.width=4, fig.height=3------------------------------------
ssvFeatureBars(olaps)
## ----pie, fig.width=5, fig.height=3----------------------------------------
ssvFeaturePie(olaps)
## ----venn, fig.width=4, fig.height=3---------------------------------------
ssvFeatureVenn(olaps)
## ----euler, fig.width=4, fig.height=3--------------------------------------
ssvFeatureEuler(olaps)
## ----binary heatmap, fig.width=3, fig.height=4-----------------------------
ssvFeatureBinaryHeatmap(olaps)
## ----ssvFetchBigwig, eval = FALSE------------------------------------------
bigwig_files = c(
system.file("extdata", "MCF10A_CTCF_FE_random100.bw",
package = "seqsetvis"),
system.file("extdata", "MCF10AT1_CTCF_FE_random100.bw",
package = "seqsetvis"),
system.file("extdata", "MCF10CA1_CTCF_FE_random100.bw",
package = "seqsetvis")
)
names(bigwig_files) = sub("_FE_random100.bw", "", basename(bigwig_files))
# names(bigwig_files) = letters[1:3]
olap_gr = CTCF_in_10a_overlaps_gr
target_size = quantile(width(olap_gr), .75)
window_size = 50
target_size = round(target_size / window_size) * window_size
olap_gr = resize(olap_gr, target_size, fix = "center")
bw_gr = ssvFetchBigwig(bigwig_files, olap_gr, win_size = window_size)
bw_gr
## --------------------------------------------------------------------------
olap_gr = CTCF_in_10a_overlaps_gr
bw_gr = CTCF_in_10a_profiles_gr
## ----factorize-------------------------------------------------------------
olap_groups = ssvFactorizeMembTable(mcols(olap_gr))
## ----lineplot basic, fig.width=6, fig.height=2.5---------------------------
# facet labels will display better if split into multiple lines
bw_gr$facet_label = sub("_", "\n", bw_gr$sample)
ssvSignalLineplot(bw_data = subset(bw_gr, id %in% 1:12), facet_ = "facet_label")
## ----lineplot region facet, fig.width=5, fig.height=3----------------------
ssvSignalLineplot(bw_data = subset(bw_gr, id %in% 1:4), facet_ = "id")
## ----lineplot aggregated, fig.width=5, fig.height=2------------------------
ssvSignalLineplotAgg(bw_data = bw_gr)
## ----lineplot aggregated smoothed, fig.width=5, fig.height=2---------------
ssvSignalLineplotAgg(bw_data = bw_gr, spline_n = 10)
## ----lineplot--------------------------------------------------------------
# append set info, modify aggregation group_ and add facet
olap_2groups = ssvFactorizeMembTable(ssvMakeMembTable(olap_gr)[, 1:2])
grouped_gr = GRanges(merge(bw_gr, olap_2groups))
grouped_gr = subset(grouped_gr, sample %in% c("MCF10A_CTCF", "MCF10AT1_CTCF"))
ssvSignalLineplotAgg(bw_data = grouped_gr, spline_n = 10,
group_ = c("sample", "group")) +
facet_wrap("group", ncol = 2) +
labs(title = "Aggregated by peak call set", y = "FE", x = "bp from center")
## ----scatterplot basic, fig.width=3, fig.height=3--------------------------
ssvSignalScatterplot(bw_gr, x_name = "MCF10A_CTCF", y_name = "MCF10AT1_CTCF")
## ----scatterplot all sets, fig.width=8, fig.height=3-----------------------
ssvSignalScatterplot(bw_gr, x_name = "MCF10A_CTCF", y_name = "MCF10AT1_CTCF",
color_table = olap_groups)
## ----scatterplot 2 sets, fig.width=6, fig.height=3-------------------------
# by subsetting the matrix returned by ssvMakeMembTable() we have a lot of
# control over the coloring.
olap_2groups = ssvFactorizeMembTable(ssvMakeMembTable(olap_gr)[, 1:2])
ssvSignalScatterplot(bw_gr, x_name = "MCF10A_CTCF", y_name = "MCF10AT1_CTCF",
color_table = olap_2groups)
## ----outside group, fig.width=5, fig.height=3------------------------------
olap_OutGroups = ssvFactorizeMembTable(
ssvMakeMembTable(olap_gr)[, 3, drop = FALSE])
ssvSignalScatterplot(bw_gr,
x_name = "MCF10A_CTCF",
y_name = "MCF10AT1_CTCF",
color_table = olap_OutGroups)
## ----scatterplot facet, fig.width=6, fig.height=4--------------------------
#tweaking group description will clean up plot labels a lot
olap_groups$group = gsub("_CTCF", "", olap_groups$group)
olap_groups$group = gsub(" & ", "\n", olap_groups$group)
ssvSignalScatterplot(bw_gr, x_name = "MCF10A_CTCF", y_name = "MCF10AT1_CTCF",
color_table = olap_groups) +
facet_wrap("group") + guides(color = "none") +
theme_linedraw()
## ----banded quantiles------------------------------------------------------
ssvSignalBandedQuantiles(bw_gr, by_ = "sample", hsv_grayscale = TRUE,
hsv_symmetric = TRUE,
hsv_reverse = TRUE)
## ----heatmap basic, message=FALSE, fig.width=5-----------------------------
ssvSignalHeatmap(bw_gr, nclust = 3, facet_ = "facet_label")
## ----heatmap perform pre-clustering----------------------------------------
bw_clust = ssvSignalClustering(bw_gr, nclust = 3)
bw_clust
## ----heatmap cluster selection---------------------------------------------
subset(bw_clust, cluster_id == 3)
## ----heatmap use pre-cluster, message=FALSE, fig.width=5-------------------
ssvSignalHeatmap(bw_clust, facet_ = "facet_label")
## ----setup np_files bw_files-----------------------------------------------
pkgdata_path = system.file("extdata",
package = "seqsetvis")
cache_path = paste0(pkgdata_path, "/.cache")
# the next line is enough to initialize the cache
# BiocFileCache(cache = cache_path)
use_full_data = dir.exists(cache_path) & require(BiocFileCache)
use_full_data = FALSE
if(use_full_data){
library(BiocFileCache)
ssv_bfc = BiocFileCache(cache = cache_path)
bw_files = vapply(seq_along(CTCF_in_10a_bigWig_urls), function(i){
rname = paste(names(CTCF_in_10a_bigWig_urls)[i],
"bigwig",
sep = ",")
fpath = CTCF_in_10a_bigWig_urls[i]
#bfcrpath calls bfcadd() if necessary and returns file path
bfcrpath(ssv_bfc, rname = rname, fpath = fpath)
}, "char")
names(bw_files) = names(CTCF_in_10a_bigWig_urls)
np_files = vapply(seq_along(CTCF_in_10a_narrowPeak_urls), function(i){
rname = paste(names(CTCF_in_10a_narrowPeak_urls)[i],
"narrowPeak",
sep = ",")
fpath = CTCF_in_10a_narrowPeak_urls[i]
#bfcrpath calls bfcadd() if necessary and returns file path
bfcrpath(ssv_bfc, rname = rname, fpath = fpath)
}, "a")
names(np_files) = names(CTCF_in_10a_narrowPeak_urls)
}else{
bw_files = vapply(c("MCF10A_CTCF", "MCF10AT1_CTCF", "MCF10CA1_CTCF"),
function(x){
system.file("extdata", paste0(x, "_FE_random100.bw"),
package = "seqsetvis")
}, "char")
# set filepaths
np_files = c(
system.file("extdata", "MCF10A_CTCF_random100.narrowPeak",
package = "seqsetvis"),
system.file("extdata", "MCF10AT1_CTCF_random100.narrowPeak",
package = "seqsetvis"),
system.file("extdata", "MCF10CA1_CTCF_random100.narrowPeak",
package = "seqsetvis")
)
names(np_files) = sub("_random100.narrowPeak", "",
x = basename(np_files))
}
## ----load package narrowPeak-----------------------------------------------
# load peak calls
np_grs = easyLoad_narrowPeak(np_files)
## ----overlap peaks---------------------------------------------------------
olaps = ssvOverlapIntervalSets(np_grs)
## ----ctcf fig1,hold=TRUE, fig.align='center', fig.height=4, fig.width = 8----
p_bars = ssvFeatureBars(olaps, show_counts = FALSE) +
theme(legend.position = "left")
p_bars = p_bars + theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
legend.justification = "center") +
labs(fill = "cell line")
p_venn = ssvFeatureVenn(olaps, counts_txt_size = 4) +
guides(fill = "none", color = "none")
p_euler = ssvFeatureEuler(olaps) +
guides(fill = "none", color = "none")
cowplot::ggdraw() +
cowplot::draw_plot(p_bars, x = 0, y = 0, width = .4, height = .7) +
cowplot::draw_plot(p_venn, x = .4, y = .1, width = .3, height = .7) +
cowplot::draw_plot(p_euler, x = 0.7, y = .1, width = 0.3, height = .7) +
cowplot::draw_plot_label(c("CTCF binding in breast cancer cell lines",
"A", "B", "C"),
x = c(.04, .17, 0.4, .7),
y = c(.92, .75, .75, .75), size = 10, hjust = 0)
## ----color change, eval=FALSE,hold = TRUE, fig.align='center', fig.height=4, fig.width = 8----
# col_vals = c("MCF10A_CTCF" = 'red',
# "MCF10AT1_CTCF" = "blue",
# "MCF10CA1_CTCF" = "green")
# sf = scale_fill_manual(values = col_vals)
# sc = scale_color_manual(values = col_vals)
# cowplot::ggdraw() +
# cowplot::draw_plot(p_bars + sf,
# x = 0, y = 0,
# width = .4, height = .7) +
# cowplot::draw_plot(p_venn + sf + sc,
# x = .4, y = .1,
# width = .3, height = .7) +
# cowplot::draw_plot(p_euler + sf + sc,
# x = 0.7, y = .1,
# width = 0.3, height = .7) +
# cowplot::draw_plot_label(c("CTCF binding in breast cancer cell lines",
# "A", "B", "C"),
# x = c(.04, .17, 0.4, .7),
# y = c(.92, .75, .75, .75), size = 10, hjust = 0)
## ----load ChIPpeakAnno, message=FALSE--------------------------------------
library(ChIPpeakAnno)
data(TSS.human.GRCh38)
macs.anno <- annotatePeakInBatch(olaps, AnnotationData=TSS.human.GRCh38)
## ----distance filter-------------------------------------------------------
macs.anno = subset(macs.anno, distancetoFeature < 1000)
## ----subset 1--------------------------------------------------------------
subset(macs.anno, MCF10AT1_CTCF & MCF10A_CTCF & !MCF10CA1_CTCF)$feature
## ----subset 2--------------------------------------------------------------
subset(macs.anno, MCF10A_CTCF & !MCF10AT1_CTCF & !MCF10CA1_CTCF)$feature
## ----set fixed width, fig.height=3, fig.width=3----------------------------
window_size = 50
width_q75 = quantile(width(olaps), .75)
width_q75 = ceiling(width_q75 / window_size) * window_size
hist_res = hist(width(olaps))
lines(rep(width_q75, 2), c(0, max(hist_res$counts)), col = "red", lwd = 5)
text(width_q75, max(hist_res$counts), "fixedSize", adj = c(-.1, 1), col = "red")
## apply width
olaps_fixedSize = centerFixedSizeGRanges(olaps, width_q75)
## ----fetch package bw------------------------------------------------------
if(use_full_data){
bw_gr = ssvFetchBigwig(file_paths = bw_files,
qgr = olaps_fixedSize,
win_size = 50)
}
## ----ctcf scatterplots, fig.width=10, fig.height=4, message=FALSE----------
# shortening colnames will make group names less cumbersome in plot legend
colnames(mcols(olaps_fixedSize)) = sub("_CTCF", "",
colnames(mcols(olaps_fixedSize)))
all_groups = levels(ssvFactorizeMembTable(
ssvMakeMembTable(olaps_fixedSize))$group)
all_colors = RColorBrewer::brewer.pal(length(all_groups), "Set1")
all_colors[5:7] = safeBrew(3, "Dark2")
names(all_colors) = all_groups
olap_groups_12 = ssvFactorizeMembTable(
ssvMakeMembTable(olaps_fixedSize)[, 1:2])
p_12 = ssvSignalScatterplot(bw_gr,
x_name = "MCF10A_CTCF",
y_name = "MCF10AT1_CTCF",
color_table = olap_groups_12) +
scale_color_manual(values = all_colors)
olap_groups_13 = ssvFactorizeMembTable(
ssvMakeMembTable(olaps_fixedSize)[, c(1,3)])
p_13 = ssvSignalScatterplot(bw_gr,
x_name = "MCF10A_CTCF",
y_name = "MCF10CA1_CTCF",
color_table = olap_groups_13) +
scale_color_manual(values = all_colors)
if(use_full_data){
tp_12 = p_12 + scale_size_continuous(range = .1) +
scale_alpha_continuous(range = .1) +
geom_density2d(aes(color = group), h = 40, bins = 3)
tp_13 = p_13 + scale_size_continuous(range = .1) +
scale_alpha_continuous(range = .1) +
geom_density2d(aes(color = group), h = 40, bins = 3)
cowplot::plot_grid(tp_12 + labs(title = ""),
tp_13 + labs(title = ""),
label_y = .85, labels = "AUTO")
}else{
cowplot::plot_grid(p_12 + labs(title = ""),
p_13 + labs(title = ""),
label_y = .85, labels = "AUTO")
}
## ----ctcf heatmap, message=FALSE, fig.width=5------------------------------
bw_gr$facet_label = sub("_", "\n", bw_gr$sample)
clust_gr = ssvSignalClustering(bw_gr, nclust = 3, facet_ = "facet_label")
ssvSignalHeatmap(clust_gr, facet_ = "facet_label") + labs(fill = "FE",
y = "region",
x = "bp from center")
## ----ctcf recentered heatmap, message = FALSE, fig.width=10, fig.height=6----
center_gr = centerAtMax(clust_gr, view_size = 150,
by_ = "id", check_by_dupes = FALSE)
p_center_hmap = ssvSignalHeatmap(center_gr, facet_ = "facet_label") +
labs(fill = "FE",
y = "region",
x = "bp from center")
## since center_gr still retains clustering information, clustering is not
## repeated by default, the following reclusters the data.
clust_center_gr = ssvSignalClustering(center_gr, nclust = 3)
p_center_hmap_reclust = ssvSignalHeatmap(clust_center_gr,
facet_ = "facet_label") +
labs(fill = "FE",
y = "region",
x = "bp from center")
cowplot::plot_grid(p_center_hmap + labs(title = "original clustering"),
p_center_hmap_reclust + labs(title = "reclustered"))
## ----cluster annotation----------------------------------------------------
clust_df = as.data.frame(mcols(clust_gr))
clust_df = unique(clust_df[,c("id", "cluster_id")])
olap_clust_annot = olap_gr
mcols(olap_clust_annot) = data.frame(id = seq_along(olap_clust_annot))
olap_clust_annot = GRanges(merge(olap_clust_annot, clust_df))
olap_clust_annot = subset(olap_clust_annot, cluster_id %in% 1:2)
olap_clust_annot <- annotatePeakInBatch(olap_clust_annot,
AnnotationData=TSS.human.GRCh38)
olap_clust_annot$feature
## --------------------------------------------------------------------------
target_data = "MCF10A_CTCF"
chmm_win = 200 #window size is an important chromHMM parameter.
# 200 is the default window size and matches the state segementation
if(use_full_data){
# set all file paths
chmm_bw_file = bfcrpath(ssv_bfc, rnames = paste(target_data, "bigwig",
sep = ","))
chmm_np_file = bfcrpath(ssv_bfc, rnames = paste(target_data, "narrowPeak",
sep = ","))
chmm_seg_file = bfcrpath(ssv_bfc, rnames = "MCF7,segmentation",
fpath = chromHMM_demo_segmentation_url)
query_chain = bfcquery(ssv_bfc, "hg19ToHg38,chain")
if(nrow(query_chain) == 0){
chain_hg19ToHg38_gz = bfcrpath(ssv_bfc, rnames = "hg19ToHg38,gz",
fpath = chromHMM_demo_chain_url)
ch_raw = readLines(gzfile(chain_hg19ToHg38_gz))
ch_file = bfcnew(ssv_bfc, rname = "hg19ToHg38,chain")
writeLines(ch_raw, con = ch_file)
}
chmm_chain_file = bfcrpath(ssv_bfc, rnames = "hg19ToHg38,chain")
ch = rtracklayer::import.chain(chmm_chain_file)
# load segmentation data
chmm_gr = rtracklayer::import.bed(chmm_seg_file)
#cleanup state names.
chmm_gr$name = gsub("\\+", " and ", chmm_gr$name)
chmm_gr$name = gsub("_", " ", chmm_gr$name)
#setup color to state mapping
colDF = unique(mcols(chmm_gr)[c("name", "itemRgb")])
state_colors = colDF$itemRgb
names(state_colors) = colDF$name
#liftover states from hg19 to hg38
ch = rtracklayer::import.chain(chmm_chain_file)
chmm_gr_hg38 = rtracklayer::liftOver(chmm_gr, ch)
chmm_gr_hg38 = unlist(chmm_gr_hg38)
chmm_grs_list = as.list(GenomicRanges::split(chmm_gr_hg38,
chmm_gr_hg38$name))
#transform narrowPeak ranges to summit positions
chmm_np_grs = easyLoad_narrowPeak(chmm_np_file, file_names = target_data)
chmm_summit_grs = lapply(chmm_np_grs, function(x){
start(x) = start(x) + x$relSummit
end(x) = start(x)
x
})
qlist = append(chmm_summit_grs[1], chmm_grs_list)
chmm_olaps = ssvOverlapIntervalSets(qlist, use_first = TRUE)
#discard the columns for peak call and no_hit, not informative here.
mcols(chmm_olaps)[[1]] = NULL
chmm_olaps$no_hit = NULL
#total width of genome assigned each state
state_total_widths = sapply(chmm_grs_list, function(my_gr){
sum(as.numeric(width(my_gr)))
})
#Expand state regions into 200 bp windows.
state_wingrs = lapply(chmm_grs_list, function(my_gr){
st = my_gr$name[1]
wgr = unlist(slidingWindows(my_gr, chmm_win, chmm_win))
wgr$state = st
wgr
})
state_wingrs = unlist(GRangesList(state_wingrs))
# fetch bw data for each state
# it probably isn't useful to grab every single window for each state
# so we can cap the number of each state carried forward
max_per_state = 5000
# flank size zooms out a bit from each chromHMM window
flank_size = 400
state_split = split(state_wingrs, state_wingrs$state)
state_split = lapply(state_split, function(my_gr){
samp_gr = sample(my_gr, min(length(my_gr), max_per_state))
samp_gr = sort(samp_gr)
names(samp_gr) = seq_along(samp_gr)
samp_gr
})
state_gr = unlist(GRangesList(state_split))
state_gr = resize(state_gr, width = chmm_win + 2 * flank_size,
fix = "center")
bw_states_gr = ssvFetchBigwig(file_paths = chmm_bw_file,
qgr = state_gr,
win_size = 50)
bw_states_gr$grp = sub("\\..+", "", bw_states_gr$id)
bw_states_gr$grp_id = sub(".+\\.", "", bw_states_gr$id)
}else{
max_per_state = 20
flank_size = 400
state_colors = chromHMM_demo_state_colors
bw_states_gr = chromHMM_demo_bw_states_gr
chmm_olaps = chromHMM_demo_overlaps_gr
state_total_widths = chromHMM_demo_state_total_widths
}
## ----state raw, message = FALSE, fig.width=3, fig.height=3-----------------
olaps_df = as.data.frame(mcols(chmm_olaps))
colnames(olaps_df) = gsub("\\.", " ", colnames(olaps_df))
p_state_raw_count = ssvFeatureBars(olaps_df, show_counts = FALSE) +
labs(fill = "state", x = "") +
scale_fill_manual(values = state_colors) +
theme_cowplot() + guides(fill = "none") +
theme(axis.text.x = element_text(angle = 90, size = 8,
hjust = 1, vjust = .5))
p_state_raw_count
## ----state enrichment, fig.width=3, fig.height=3---------------------------
state_width_fraction = state_total_widths / sum(state_total_widths)
state_peak_hits = colSums(olaps_df)
state_peak_fraction = state_peak_hits / sum(state_peak_hits)
enrichment = state_peak_fraction /
state_width_fraction[names(state_peak_fraction)]
enrich_df = data.frame(state = names(enrichment), enrichment = enrichment)
p_state_enrichment = ggplot(enrich_df) +
geom_bar(aes(x = state, fill = state, y = enrichment), stat = "identity") +
labs(x = "") +
theme_cowplot() + guides(fill = "none") +
scale_fill_manual(values = state_colors) +
theme(axis.text.x = element_text(angle = 90, size = 8,
hjust = 1, vjust = .5))
p_state_enrichment
## ---- message=FALSE, fig.width=6-------------------------------------------
p_agg_tracks = ssvSignalLineplotAgg(bw_states_gr,
sample_ = "grp",
color_ = "grp")
gb = ggplot2::ggplot_build(p_agg_tracks)
yrng = range(gb$data[[1]]$y)
p_agg_tracks = p_agg_tracks +
scale_color_manual(values = state_colors) +
annotate("line", x = rep(-chmm_win/2, 2), y = yrng) +
annotate("line", x = rep(chmm_win/2, 2), y = yrng) +
labs(y = "FE", x = "bp", color = "state",
title = paste("Average FE by state,", target_data),
subtitle = paste("states sampled to",
max_per_state,
chmm_win,
"bp windows each\n",
flank_size,
"bp flanking each side")) +
theme(plot.title = element_text(hjust = 0))
p_agg_tracks
## ----state heatmap, fig.width=8--------------------------------------------
pdt = as.data.table(mcols(bw_states_gr))
pdt$grp_id = as.integer(pdt$grp_id)
# reassign grp_id to sort within each state set
dt_list = lapply(unique(pdt$grp), function(state){
dt = pdt[grp == state]
dtmax = dt[, .(ymax = y[which(x == x[order(abs(x))][1])]), by = grp_id]
dtmax = dtmax[order(ymax, decreasing = TRUE)]
dtmax[, grp_o := seq_len(.N)]
dtmax$ymax = NULL
dt = merge(dtmax, dt)
dt[, grp_id := grp_o]
dt$grp_o = NULL
dt
})
# reassemble
pdt = rbindlist(dt_list)
# heatmap facetted by state and sorted in decreasing order
p_state_hmap = ggplot(pdt) +
geom_raster(aes(x = x, y = grp_id, fill = y)) +
scale_y_reverse() +
facet_wrap("grp", nrow = 2) +
theme(axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.line.y = element_blank(),
axis.text.x = element_text(angle = 90, hjust = 1, vjust = .5),
strip.text = element_text(size= 8)) +
scale_fill_gradientn(colors = c("white", "orange", "red")) +
labs(y = "", fill = "FE",
x = "bp from local summit",
title = paste(max_per_state, "random regions per state"))
p_state_hmap
## ---- fig.height=12, fig.width=8-------------------------------------------
bar_height = .25
line_height = .3
ggdraw() +
draw_plot(p_state_raw_count + guides(fill = "none"),
x = 0,
y = 1 - bar_height,
width = .5,
height = bar_height) +
draw_plot(p_state_enrichment + guides(fill = "none"),
x = .5,
y = 1 - bar_height,
width = .5,
height = bar_height) +
draw_plot(p_agg_tracks,
x = 0,
y = 1 - bar_height - line_height,
width = 1,
height = line_height) +
draw_plot(p_state_hmap,
x = 0,
y = 0,
width = 1,
height = 1 - bar_height - line_height) +
draw_plot_label(LETTERS[1:4],
c(0, 0.48, 0, 0),
c(1, 1, 1 - bar_height, 1 - bar_height - line_height),
size = 15)
|
source("getData.R")
joinData <- function(pollens){
# Joins all tables from the API with
# an api/opendata/pollens table
# and returns the resulting data.frame
locations <- getAndParse("http://polen.sepa.gov.rs", "/api/opendata/locations/",
c("id", "location_name", "lat", "long", "desc") )
allergens <- getAndParse("http://polen.sepa.gov.rs", "/api/opendata/allergens/",
c("id", "allergen_name", "localized_name", "margine_top", "margine_bottom", "type",
"allergenitcity", "allergenitcity_display") )
if(nrow(pollens) == 0)
{
return(pollens)
}
pagesToParse <- paste("/api/opendata/concentrations/?pollen=", unique(pollens$id) ,sep = "")
concentrations <- parsePage("http://polen.sepa.gov.rs", pagesToParse, parseConcentrations)
pollen_location <- merge(pollens, locations, by.x = "location", by.y="id")
concentration_allergen <- merge(concentrations, allergens, by.x = "allergen", by.y="id")
pollendf <- merge(pollen_location, concentration_allergen, by.x = "concentration_id", by.y="id")
pollendf$lat <- as.numeric(as.character(pollendf$lat) )
pollendf$long <- as.numeric(as.numeric(pollendf$long) )
#Delete unnecessary columns
pollendf <- subset(pollendf, select = -c(concentration_id, location, desc, allergen, margine_top, margine_bottom, type, pollen, allergenitcity, allergenitcity_display) )
return(pollendf)
}
| /scripts/joinData.R | no_license | MomirMilutinovic/pollen-forecast | R | false | false | 1,469 | r | source("getData.R")
joinData <- function(pollens){
# Joins all tables from the API with
# an api/opendata/pollens table
# and returns the resulting data.frame
locations <- getAndParse("http://polen.sepa.gov.rs", "/api/opendata/locations/",
c("id", "location_name", "lat", "long", "desc") )
allergens <- getAndParse("http://polen.sepa.gov.rs", "/api/opendata/allergens/",
c("id", "allergen_name", "localized_name", "margine_top", "margine_bottom", "type",
"allergenitcity", "allergenitcity_display") )
if(nrow(pollens) == 0)
{
return(pollens)
}
pagesToParse <- paste("/api/opendata/concentrations/?pollen=", unique(pollens$id) ,sep = "")
concentrations <- parsePage("http://polen.sepa.gov.rs", pagesToParse, parseConcentrations)
pollen_location <- merge(pollens, locations, by.x = "location", by.y="id")
concentration_allergen <- merge(concentrations, allergens, by.x = "allergen", by.y="id")
pollendf <- merge(pollen_location, concentration_allergen, by.x = "concentration_id", by.y="id")
pollendf$lat <- as.numeric(as.character(pollendf$lat) )
pollendf$long <- as.numeric(as.numeric(pollendf$long) )
#Delete unnecessary columns
pollendf <- subset(pollendf, select = -c(concentration_id, location, desc, allergen, margine_top, margine_bottom, type, pollen, allergenitcity, allergenitcity_display) )
return(pollendf)
}
|
library("crayon", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("rstudioapi", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("cli", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("withr", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("readr", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("tidyverse", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("BiocGenerics", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("S4Vectors", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("IRanges", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("GenomeInfoDb", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("GenomicRanges", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("R.methodsS3", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("R.oo", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("R.utils", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("data.table", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("ggplot2", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("plyr", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("caret", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("dplyr", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("tidyverse", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("AppliedPredictiveModeling", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("ggplot2", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("reshape2", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("kernlab", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("pryr", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("caret", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
#library("doParallel", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
#library("doSNOW", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
#cl <- makeForkCluster(3)
#registerDoParallel(cl)
#cl <- makeCluster(3, type = "FORKS")
#registerDoSNOW(cl)
#clusterCall(cl, function(x) .libPaths(x), .libPaths())
#clusterEvalQ(cl, .libPaths("/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/"))
print("This is using svmSpectrumString and tunelength")
print(Sys.time())
setwd("/exports/eddie/scratch/s1772751/Prepared_Data/index")
#print(mem_used())
print("reading full dataset")
x <-fread("/exports/eddie/scratch/s1772751/Prepared_Data/index/dataset_downsampled_cleaned_caret_5sf_0_015neg_0_5pos_0_05.csv", data.table=FALSE)
print("finished reading full dataset")
#print(mem_used())
dropped <- c("row_number", "chr", "variant_pos","TISSUE", "REF", "ALT")
x <- x[ , !(names(x) %in% dropped)]
#dropped_class <- subset(x, select=c("CLASS"))
#drops <- c("row_number", "chr", "variant_pos", "CLASS", "TISSUE", "REF", "ALT")
#x <- x[ , !(names(x) %in% drops)]
#x <- log(x+1)
#x <- cbind(x, dropped_class)
#fwrite(x, file="dataset_downsampled_cleaned_caret.csv", sep=",")
i <- 0.80
set.seed(3456)
trainIndex <- createDataPartition(x$CLASS, p = i,
list = FALSE, times = 1)
x_train <- x[as.vector(trainIndex),]
x_test <- x[as.vector(-trainIndex),]
fitControl <- trainControl(
method = "cv",
## repeated ten times
number = 3, verboseIter = TRUE,
returnResamp = "final",
savePredictions = "final",
# classProbs = TRUE,
summaryFunction = twoClassSummary,
sampling = "down")
print(mem_used())
print(Sys.time())
#gbmGrid <- expand.grid(interaction.depth = c(1, 10),
# n.trees = c(100,500),
# shrinkage = c(.1, .25),
# n.minobsinnode = 10)
print("just about to train")
print(Sys.time())
set.seed(825)
Fit <- train(CLASS ~ ., data = x_train,
method = "svmSpectrumString",
trControl = fitControl,
## This last option is actually one
## for gbm() that passes through
preProc = c("center", "scale"),
verbose = FALSE,
tuneLength = 5)
# metric = "ROC")
print("just about to save")
print(Sys.time())
#stopCluster(cl)
saveRDS(Fit, "svmSpectrumString_tunelength_down_0_05.RDS")
#predicted <- predict(gbmFit, x_test)
#print(head(predicted))
| /ML_models/0_05/caret_svmSpectrumString.R | permissive | HIM003/edinburgh-university-dissertation | R | false | false | 5,001 | r | library("crayon", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("rstudioapi", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("cli", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("withr", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("readr", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("tidyverse", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("BiocGenerics", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("S4Vectors", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("IRanges", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("GenomeInfoDb", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("GenomicRanges", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("R.methodsS3", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("R.oo", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("R.utils", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("data.table", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("ggplot2", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("plyr", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("caret", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("dplyr", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("tidyverse", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("AppliedPredictiveModeling", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("ggplot2", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("reshape2", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("kernlab", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("pryr", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
library("caret", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
#library("doParallel", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
#library("doSNOW", lib.loc = "/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/")
#cl <- makeForkCluster(3)
#registerDoParallel(cl)
#cl <- makeCluster(3, type = "FORKS")
#registerDoSNOW(cl)
#clusterCall(cl, function(x) .libPaths(x), .libPaths())
#clusterEvalQ(cl, .libPaths("/exports/csce/eddie/inf/groups/mamode_prendergast/R_packages/"))
print("This is using svmSpectrumString and tunelength")
print(Sys.time())
setwd("/exports/eddie/scratch/s1772751/Prepared_Data/index")
#print(mem_used())
print("reading full dataset")
x <-fread("/exports/eddie/scratch/s1772751/Prepared_Data/index/dataset_downsampled_cleaned_caret_5sf_0_015neg_0_5pos_0_05.csv", data.table=FALSE)
print("finished reading full dataset")
#print(mem_used())
dropped <- c("row_number", "chr", "variant_pos","TISSUE", "REF", "ALT")
x <- x[ , !(names(x) %in% dropped)]
#dropped_class <- subset(x, select=c("CLASS"))
#drops <- c("row_number", "chr", "variant_pos", "CLASS", "TISSUE", "REF", "ALT")
#x <- x[ , !(names(x) %in% drops)]
#x <- log(x+1)
#x <- cbind(x, dropped_class)
#fwrite(x, file="dataset_downsampled_cleaned_caret.csv", sep=",")
i <- 0.80
set.seed(3456)
trainIndex <- createDataPartition(x$CLASS, p = i,
list = FALSE, times = 1)
x_train <- x[as.vector(trainIndex),]
x_test <- x[as.vector(-trainIndex),]
fitControl <- trainControl(
method = "cv",
## repeated ten times
number = 3, verboseIter = TRUE,
returnResamp = "final",
savePredictions = "final",
# classProbs = TRUE,
summaryFunction = twoClassSummary,
sampling = "down")
print(mem_used())
print(Sys.time())
#gbmGrid <- expand.grid(interaction.depth = c(1, 10),
# n.trees = c(100,500),
# shrinkage = c(.1, .25),
# n.minobsinnode = 10)
print("just about to train")
print(Sys.time())
set.seed(825)
Fit <- train(CLASS ~ ., data = x_train,
method = "svmSpectrumString",
trControl = fitControl,
## This last option is actually one
## for gbm() that passes through
preProc = c("center", "scale"),
verbose = FALSE,
tuneLength = 5)
# metric = "ROC")
print("just about to save")
print(Sys.time())
#stopCluster(cl)
saveRDS(Fit, "svmSpectrumString_tunelength_down_0_05.RDS")
#predicted <- predict(gbmFit, x_test)
#print(head(predicted))
|
library(normtest)
### Name: ajb.norm.test
### Title: Adjusted Jarque-Bera test for normality
### Aliases: ajb.norm.test
### Keywords: htest
### ** Examples
ajb.norm.test(rnorm(100))
ajb.norm.test(abs(runif(100,-2,5)))
| /data/genthat_extracted_code/normtest/examples/ajb.norm.test.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 225 | r | library(normtest)
### Name: ajb.norm.test
### Title: Adjusted Jarque-Bera test for normality
### Aliases: ajb.norm.test
### Keywords: htest
### ** Examples
ajb.norm.test(rnorm(100))
ajb.norm.test(abs(runif(100,-2,5)))
|
#' Filter endPoints based on groups and assays.
#'
#' This function provides a mechanism to specify 3 levels of information in the
#' supplied data frame \code{\link{end_point_info}} to be used in subsequent analysis steps.
#' First, the user specifies the ToxCast assay annotation using the 'groupCol'
#' argument, which is a column header in 'end_point_info'. Second, the user
#' specifies the families of assays to use. Finally, the user can choose to
#' remove specific group(s) from the category. The default is to remove
#' 'Background Measurement' and 'Undefined'. Choices for this should be
#' reconsidered based on individual study objectives.
#'
#' The default category ('groupCol') is 'intended_target_family'. Depending
#' on the study, other categories may be more relevant. The best resource on these
#' groupings is the "ToxCast Assay Annotation Data User Guide" directly from
#' EPA \url{https://www.epa.gov/chemical-research/toxcast-assay-annotation-data-user-guide}.
#' Following that link, it defines "intended_target_family" as "the target family of the
#' objective target for the assay". Much more detail can be discovered in that documentation.
#'
#' @param ep Data frame containing Endpoint information from ToxCast
#' @param groupCol Character name of ToxCast annotation column to use as a group category
#' @param assays Vector of assays to use in the data analysis. Possible values are "ACEA", "APR", "ATG",
#' "NVS", "OT", "TOX21", "CEETOX", "LTEA", "CLD", "TANGUAY", "CCTE_PADILLA", "BSK" ,
#' "CCTE", "STM", "ARUNA", "CCTE_SHAFER", "CPHEA_STOKER", "CCTE_GLTED", "UPITT", "UKN",
#' "ERF", "TAMU", "IUF", "CCTE_MUNDY", "UTOR", "VALA". By default, the
#' "BSK" (BioSeek) assay is removed.
#' @param remove_groups Vector of groups within the selected 'groupCol' to remove.
#' @export
#' @examples
#' end_point_info <- end_point_info
#' cleaned_ep <- clean_endPoint_info(end_point_info)
#' filtered_ep <- filter_groups(cleaned_ep)
#' head(filtered_ep)
filter_groups <- function(ep,
groupCol = "intended_target_family",
assays = c(
"ACEA", "APR", "ATG",
"NVS", "OT", "TOX21", "CEETOX",
"LTEA", "CLD", "TANGUAY", "CCTE_PADILLA",
"CCTE", "STM", "ARUNA", "CCTE_SHAFER",
"CPHEA_STOKER", "CCTE_GLTED", "UPITT", "UKN",
"ERF", "TAMU", "IUF", "CCTE_MUNDY",
"UTOR", "VALA"
),
remove_groups = c("Background Measurement", "Undefined")) {
possible_assays <- unique(end_point_info$assay_source_name)
match.arg(assays, possible_assays, several.ok = TRUE)
# Getting rid of NSE warnings:
assay_source_name <- assay_component_endpoint_name <- ".dplyr"
ep <- ep[, c("assay_component_endpoint_name", groupCol, "assay_source_name")] %>%
rename(
endPoint = assay_component_endpoint_name,
assaysFull = assay_source_name
)
names(ep)[names(ep) == groupCol] <- "groupCol"
ep <- ep[(ep$assaysFull %in% assays), ]
ep <- ep[!is.na(ep$groupCol), ]
if (any(!is.na(remove_groups))) {
if (any(remove_groups != "")) {
ep <- ep[!(ep$groupCol) %in% remove_groups, ]
}
}
return(ep)
}
| /R/filter_endPoint_info.R | no_license | cran/toxEval | R | false | false | 3,399 | r | #' Filter endPoints based on groups and assays.
#'
#' This function provides a mechanism to specify 3 levels of information in the
#' supplied data frame \code{\link{end_point_info}} to be used in subsequent analysis steps.
#' First, the user specifies the ToxCast assay annotation using the 'groupCol'
#' argument, which is a column header in 'end_point_info'. Second, the user
#' specifies the families of assays to use. Finally, the user can choose to
#' remove specific group(s) from the category. The default is to remove
#' 'Background Measurement' and 'Undefined'. Choices for this should be
#' reconsidered based on individual study objectives.
#'
#' The default category ('groupCol') is 'intended_target_family'. Depending
#' on the study, other categories may be more relevant. The best resource on these
#' groupings is the "ToxCast Assay Annotation Data User Guide" directly from
#' EPA \url{https://www.epa.gov/chemical-research/toxcast-assay-annotation-data-user-guide}.
#' Following that link, it defines "intended_target_family" as "the target family of the
#' objective target for the assay". Much more detail can be discovered in that documentation.
#'
#' @param ep Data frame containing Endpoint information from ToxCast
#' @param groupCol Character name of ToxCast annotation column to use as a group category
#' @param assays Vector of assays to use in the data analysis. Possible values are "ACEA", "APR", "ATG",
#' "NVS", "OT", "TOX21", "CEETOX", "LTEA", "CLD", "TANGUAY", "CCTE_PADILLA", "BSK" ,
#' "CCTE", "STM", "ARUNA", "CCTE_SHAFER", "CPHEA_STOKER", "CCTE_GLTED", "UPITT", "UKN",
#' "ERF", "TAMU", "IUF", "CCTE_MUNDY", "UTOR", "VALA". By default, the
#' "BSK" (BioSeek) assay is removed.
#' @param remove_groups Vector of groups within the selected 'groupCol' to remove.
#' @export
#' @examples
#' end_point_info <- end_point_info
#' cleaned_ep <- clean_endPoint_info(end_point_info)
#' filtered_ep <- filter_groups(cleaned_ep)
#' head(filtered_ep)
filter_groups <- function(ep,
groupCol = "intended_target_family",
assays = c(
"ACEA", "APR", "ATG",
"NVS", "OT", "TOX21", "CEETOX",
"LTEA", "CLD", "TANGUAY", "CCTE_PADILLA",
"CCTE", "STM", "ARUNA", "CCTE_SHAFER",
"CPHEA_STOKER", "CCTE_GLTED", "UPITT", "UKN",
"ERF", "TAMU", "IUF", "CCTE_MUNDY",
"UTOR", "VALA"
),
remove_groups = c("Background Measurement", "Undefined")) {
possible_assays <- unique(end_point_info$assay_source_name)
match.arg(assays, possible_assays, several.ok = TRUE)
# Getting rid of NSE warnings:
assay_source_name <- assay_component_endpoint_name <- ".dplyr"
ep <- ep[, c("assay_component_endpoint_name", groupCol, "assay_source_name")] %>%
rename(
endPoint = assay_component_endpoint_name,
assaysFull = assay_source_name
)
names(ep)[names(ep) == groupCol] <- "groupCol"
ep <- ep[(ep$assaysFull %in% assays), ]
ep <- ep[!is.na(ep$groupCol), ]
if (any(!is.na(remove_groups))) {
if (any(remove_groups != "")) {
ep <- ep[!(ep$groupCol) %in% remove_groups, ]
}
}
return(ep)
}
|
# Global parameters
par(ps=12)
## Data is must be downloaded and extracted from following url to the relative path ./data from this script https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
## Read Data
wholedata <- read.table("data/household_power_consumption.txt", sep=";", header = TRUE, colClasses = c("Date", "Time", "factor", "factor", "factor", "factor", "factor", "factor"))
## Subset to only show data from 1.2.2007 and 2.2.2007
plotdata <- subset(wholedata, Date %in% c("1/2/2007","2/2/2007"))
# Get the needed variables
globalActivePower <- as.numeric(as.character(plotdata$Global_active_power))
# generate the png
png(filename="plot1.png")
hist(globalActivePower, breaks=12, col="#FF2500", main="Global Active Power", xlab="Global Active Power (kilowatts)", ylim=c(0,1200), xlim=c(0,6), xaxt="n")
axis(1, at=c(0,2,4,6), hadj=0.75)
dev.off() | /plot1.R | no_license | rohmux/ExData_Plotting1 | R | false | false | 892 | r | # Global parameters
par(ps=12)
## Data is must be downloaded and extracted from following url to the relative path ./data from this script https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
## Read Data
wholedata <- read.table("data/household_power_consumption.txt", sep=";", header = TRUE, colClasses = c("Date", "Time", "factor", "factor", "factor", "factor", "factor", "factor"))
## Subset to only show data from 1.2.2007 and 2.2.2007
plotdata <- subset(wholedata, Date %in% c("1/2/2007","2/2/2007"))
# Get the needed variables
globalActivePower <- as.numeric(as.character(plotdata$Global_active_power))
# generate the png
png(filename="plot1.png")
hist(globalActivePower, breaks=12, col="#FF2500", main="Global Active Power", xlab="Global Active Power (kilowatts)", ylim=c(0,1200), xlim=c(0,6), xaxt="n")
axis(1, at=c(0,2,4,6), hadj=0.75)
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visit.R
\name{context_to_terminals}
\alias{context_to_terminals}
\title{Propagate context to terminals}
\usage{
context_to_terminals(
pd_nested,
outer_lag_newlines,
outer_indent,
outer_spaces,
outer_indention_refs
)
}
\arguments{
\item{pd_nested}{A nested parse table.}
\item{outer_lag_newlines}{The lag_newlines to be propagated inwards.}
\item{outer_indent}{The indention depth to be propagated inwards.}
\item{outer_spaces}{The number of spaces to be propagated inwards.}
\item{outer_indention_refs}{The reference pos id that should be propagated
inwards.}
}
\value{
An updated parse table.
}
\description{
Implements a very specific pre-visiting scheme, namely to propagate
indention, spaces and lag_newlines to inner token to terminals. This means
that information regarding indention, line breaks and spaces (which is
relative in \code{pd_nested}) will be converted into absolute.
}
\seealso{
context_towards_terminals visitors
}
\keyword{internal}
| /man/context_to_terminals.Rd | permissive | r-lib/styler | R | false | true | 1,047 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visit.R
\name{context_to_terminals}
\alias{context_to_terminals}
\title{Propagate context to terminals}
\usage{
context_to_terminals(
pd_nested,
outer_lag_newlines,
outer_indent,
outer_spaces,
outer_indention_refs
)
}
\arguments{
\item{pd_nested}{A nested parse table.}
\item{outer_lag_newlines}{The lag_newlines to be propagated inwards.}
\item{outer_indent}{The indention depth to be propagated inwards.}
\item{outer_spaces}{The number of spaces to be propagated inwards.}
\item{outer_indention_refs}{The reference pos id that should be propagated
inwards.}
}
\value{
An updated parse table.
}
\description{
Implements a very specific pre-visiting scheme, namely to propagate
indention, spaces and lag_newlines to inner token to terminals. This means
that information regarding indention, line breaks and spaces (which is
relative in \code{pd_nested}) will be converted into absolute.
}
\seealso{
context_towards_terminals visitors
}
\keyword{internal}
|
test_that("pkgcheck", {
options (repos = c (CRAN = "https://cloud.r-project.org"))
d <- srr::srr_stats_pkg_skeleton ()
x <- capture.output (
roxygen2::roxygenise (d),
type = "message")
expect_true (length (x) > 10)
expect_true (any (grepl ("srrstats", x)))
expect_output (
chk <- pkgcheck (d)
)
expect_type (chk, "list")
items <- c ("package",
"version",
"license",
"summary",
"git",
"srr",
"file_list",
"fns_have_exs",
"left_assigns",
"pkgstats",
"network_file",
"badges",
"gp",
"pkg_versions")
expect_true (all (items %in% names (chk)))
md <- checks_to_markdown (chk, render = FALSE)
expect_type (md, "character")
expect_true (length (md) > 100L)
a <- attributes (md)
expect_true (length (a) > 0L)
expect_true (all (c ("checks_okay",
"is_noteworthy",
"network_file",
"srr_report_file") %in% names (a)))
})
| /tests/testthat/test-pkgcheck.R | no_license | annakrystalli/pkgcheck | R | false | false | 1,552 | r | test_that("pkgcheck", {
options (repos = c (CRAN = "https://cloud.r-project.org"))
d <- srr::srr_stats_pkg_skeleton ()
x <- capture.output (
roxygen2::roxygenise (d),
type = "message")
expect_true (length (x) > 10)
expect_true (any (grepl ("srrstats", x)))
expect_output (
chk <- pkgcheck (d)
)
expect_type (chk, "list")
items <- c ("package",
"version",
"license",
"summary",
"git",
"srr",
"file_list",
"fns_have_exs",
"left_assigns",
"pkgstats",
"network_file",
"badges",
"gp",
"pkg_versions")
expect_true (all (items %in% names (chk)))
md <- checks_to_markdown (chk, render = FALSE)
expect_type (md, "character")
expect_true (length (md) > 100L)
a <- attributes (md)
expect_true (length (a) > 0L)
expect_true (all (c ("checks_okay",
"is_noteworthy",
"network_file",
"srr_report_file") %in% names (a)))
})
|
\name{print.haplin}
\alias{print.haplin}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Print a haplin object}
\description{
Print basic information about a haplin object
}
\usage{
\method{print}{haplin}(x, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{A \code{haplin} object, i.e. the result of running \code{haplin}.}
\item{...}{Other arguments, passed on to \code{print}.}
}
%\details{}
\references{ Gjessing HK and Lie RT. Case-parent triads: Estimating single- and double-dose effects of fetal and maternal disease gene haplotypes. Annals of Human Genetics (2006) 70, pp. 382-396.\cr\cr
Web Site: \url{https://people.uib.no/gjessing/genetics/software/haplin/}}
\author{Hakon K. Gjessing\cr
Professor of Biostatistics\cr
Division of Epidemiology\cr
Norwegian Institute of Public Health\cr
\email{hakon.gjessing@uib.no}}
\note{Further information is found on the web page
}
\seealso{\code{\link{haplin}}}
| /man/print.haplin.Rd | no_license | cran/Haplin | R | false | false | 982 | rd | \name{print.haplin}
\alias{print.haplin}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Print a haplin object}
\description{
Print basic information about a haplin object
}
\usage{
\method{print}{haplin}(x, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{A \code{haplin} object, i.e. the result of running \code{haplin}.}
\item{...}{Other arguments, passed on to \code{print}.}
}
%\details{}
\references{ Gjessing HK and Lie RT. Case-parent triads: Estimating single- and double-dose effects of fetal and maternal disease gene haplotypes. Annals of Human Genetics (2006) 70, pp. 382-396.\cr\cr
Web Site: \url{https://people.uib.no/gjessing/genetics/software/haplin/}}
\author{Hakon K. Gjessing\cr
Professor of Biostatistics\cr
Division of Epidemiology\cr
Norwegian Institute of Public Health\cr
\email{hakon.gjessing@uib.no}}
\note{Further information is found on the web page
}
\seealso{\code{\link{haplin}}}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mapgist.R
\name{map_gist}
\alias{map_gist}
\title{Publish an interactive map as a GitHub gist}
\usage{
map_gist(
input,
lat = "lat",
lon = "long",
geometry = "point",
group = NULL,
type = "FeatureCollection",
file = "myfile.geojson",
description = "",
public = TRUE,
browse = TRUE,
...
)
}
\arguments{
\item{input}{Input object}
\item{lat}{Name of latitude variable}
\item{lon}{Name of longitude variable}
\item{geometry}{(character) Are polygons in the object}
\item{group}{(character) A grouping variable to perform grouping for
polygons - doesn't apply for points}
\item{type}{(character) One of FeatureCollection or GeometryCollection}
\item{file}{File name to use to put up as the gist file}
\item{description}{Description for the GitHub gist, or leave to default
(=no description)}
\item{public}{(logical) Want gist to be public or not? Default: \code{TRUE}}
\item{browse}{If \code{TRUE} (default) the map opens in your default browser.}
\item{...}{Further arguments passed on to \code{httr::POST}}
}
\description{
There are two ways to authorize to work with your GitHub
account:
\itemize{
\item PAT - Generate a personal access token (PAT) at
https://help.github.com/articles/creating-an-access-token-for-command-line-use
and record it in the \code{GITHUB_PAT} envar in your \code{.Renviron} file.
\item Interactive - Interactively login into your GitHub account and authorise
with OAuth.
}
Using the PAT method is recommended.
Using the \code{gist_auth()} function you can authenticate separately first, or
if you're not authenticated, this function will run internally with each
function call. If you have a PAT, that will be used, if not, OAuth will
be used.
}
\examples{
\dontrun{
if (!identical(Sys.getenv("GITHUB_PAT"), "")) {
# From file
file <- "myfile.geojson"
geojson_write(us_cities[1:20, ], lat='lat', lon='long', file = file)
map_gist(file=as.location(file))
# From SpatialPoints class
library("sp")
x <- c(1,2,3,4,5)
y <- c(3,2,5,1,4)
s <- SpatialPoints(cbind(x,y))
map_gist(s)
# from SpatialPointsDataFrame class
x <- c(1,2,3,4,5)
y <- c(3,2,5,1,4)
s <- SpatialPointsDataFrame(cbind(x,y), mtcars[1:5,])
map_gist(s)
# from SpatialPolygons class
poly1 <- Polygons(list(Polygon(cbind(c(-100,-90,-85,-100),
c(40,50,45,40)))), "1")
poly2 <- Polygons(list(Polygon(cbind(c(-90,-80,-75,-90),
c(30,40,35,30)))), "2")
sp_poly <- SpatialPolygons(list(poly1, poly2), 1:2)
map_gist(sp_poly)
# From SpatialPolygonsDataFrame class
sp_polydf <- as(sp_poly, "SpatialPolygonsDataFrame")
map_gist(sp_poly)
# From SpatialLines class
c1 <- cbind(c(1,2,3), c(3,2,2))
c2 <- cbind(c1[,1]+.05,c1[,2]+.05)
c3 <- cbind(c(1,2,3),c(1,1.5,1))
L1 <- Line(c1)
L2 <- Line(c2)
L3 <- Line(c3)
Ls1 <- Lines(list(L1), ID = "a")
Ls2 <- Lines(list(L2, L3), ID = "b")
sl1 <- SpatialLines(list(Ls1))
sl12 <- SpatialLines(list(Ls1, Ls2))
map_gist(sl1)
# From SpatialLinesDataFrame class
dat <- data.frame(X = c("Blue", "Green"),
Y = c("Train", "Plane"),
Z = c("Road", "River"), row.names = c("a", "b"))
sldf <- SpatialLinesDataFrame(sl12, dat)
map_gist(sldf)
# From SpatialGrid
x <- GridTopology(c(0,0), c(1,1), c(5,5))
y <- SpatialGrid(x)
map_gist(y)
# From SpatialGridDataFrame
sgdim <- c(3,4)
sg <- SpatialGrid(GridTopology(rep(0,2), rep(10,2), sgdim))
sgdf <- SpatialGridDataFrame(sg, data.frame(val = 1:12))
map_gist(sgdf)
# from data.frame
## to points
map_gist(us_cities)
## to polygons
head(states)
map_gist(states[1:351, ], lat='lat', lon='long', geometry="polygon", group='group')
## From a list
mylist <- list(list(lat=30, long=120, marker="red"),
list(lat=30, long=130, marker="blue"))
map_gist(mylist, lat="lat", lon="long")
# From a numeric vector
## of length 2 to a point
vec <- c(-99.74,32.45)
map_gist(vec)
## this requires numeric class input, so inputting a list will dispatch on the list method
poly <- c(c(-114.345703125,39.436192999314095),
c(-114.345703125,43.45291889355468),
c(-106.61132812499999,43.45291889355468),
c(-106.61132812499999,39.436192999314095),
c(-114.345703125,39.436192999314095))
map_gist(poly, geometry = "polygon")
# From a json object
(x <- geojson_json(c(-99.74,32.45)))
map_gist(x)
## another example
map_gist(geojson_json(us_cities[1:10,], lat='lat', lon='long'))
# From a geo_list object
(res <- geojson_list(us_cities[1:2,], lat='lat', lon='long'))
map_gist(res)
# From SpatialPixels
pixels <- suppressWarnings(SpatialPixels(SpatialPoints(us_cities[c("long", "lat")])))
summary(pixels)
map_gist(pixels)
# From SpatialPixelsDataFrame
pixelsdf <- suppressWarnings(
SpatialPixelsDataFrame(points = canada_cities[c("long", "lat")], data = canada_cities)
)
map_gist(pixelsdf)
# From SpatialRings
library("rgeos")
r1 <- Ring(cbind(x=c(1,1,2,2,1), y=c(1,2,2,1,1)), ID="1")
r2 <- Ring(cbind(x=c(1,1,2,2,1), y=c(1,2,2,1,1)), ID="2")
r1r2 <- SpatialRings(list(r1, r2))
map_gist(r1r2)
# From SpatialRingsDataFrame
dat <- data.frame(id = c(1,2), value = 3:4)
r1r2df <- SpatialRingsDataFrame(r1r2, data = dat)
map_gist(r1r2df)
}
}
}
| /man/map_gist.Rd | permissive | ChrisJones687/geojsonio | R | false | true | 5,182 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mapgist.R
\name{map_gist}
\alias{map_gist}
\title{Publish an interactive map as a GitHub gist}
\usage{
map_gist(
input,
lat = "lat",
lon = "long",
geometry = "point",
group = NULL,
type = "FeatureCollection",
file = "myfile.geojson",
description = "",
public = TRUE,
browse = TRUE,
...
)
}
\arguments{
\item{input}{Input object}
\item{lat}{Name of latitude variable}
\item{lon}{Name of longitude variable}
\item{geometry}{(character) Are polygons in the object}
\item{group}{(character) A grouping variable to perform grouping for
polygons - doesn't apply for points}
\item{type}{(character) One of FeatureCollection or GeometryCollection}
\item{file}{File name to use to put up as the gist file}
\item{description}{Description for the GitHub gist, or leave to default
(=no description)}
\item{public}{(logical) Want gist to be public or not? Default: \code{TRUE}}
\item{browse}{If \code{TRUE} (default) the map opens in your default browser.}
\item{...}{Further arguments passed on to \code{httr::POST}}
}
\description{
There are two ways to authorize to work with your GitHub
account:
\itemize{
\item PAT - Generate a personal access token (PAT) at
https://help.github.com/articles/creating-an-access-token-for-command-line-use
and record it in the \code{GITHUB_PAT} envar in your \code{.Renviron} file.
\item Interactive - Interactively login into your GitHub account and authorise
with OAuth.
}
Using the PAT method is recommended.
Using the \code{gist_auth()} function you can authenticate separately first, or
if you're not authenticated, this function will run internally with each
function call. If you have a PAT, that will be used, if not, OAuth will
be used.
}
\examples{
\dontrun{
if (!identical(Sys.getenv("GITHUB_PAT"), "")) {
# From file
file <- "myfile.geojson"
geojson_write(us_cities[1:20, ], lat='lat', lon='long', file = file)
map_gist(file=as.location(file))
# From SpatialPoints class
library("sp")
x <- c(1,2,3,4,5)
y <- c(3,2,5,1,4)
s <- SpatialPoints(cbind(x,y))
map_gist(s)
# from SpatialPointsDataFrame class
x <- c(1,2,3,4,5)
y <- c(3,2,5,1,4)
s <- SpatialPointsDataFrame(cbind(x,y), mtcars[1:5,])
map_gist(s)
# from SpatialPolygons class
poly1 <- Polygons(list(Polygon(cbind(c(-100,-90,-85,-100),
c(40,50,45,40)))), "1")
poly2 <- Polygons(list(Polygon(cbind(c(-90,-80,-75,-90),
c(30,40,35,30)))), "2")
sp_poly <- SpatialPolygons(list(poly1, poly2), 1:2)
map_gist(sp_poly)
# From SpatialPolygonsDataFrame class
sp_polydf <- as(sp_poly, "SpatialPolygonsDataFrame")
map_gist(sp_poly)
# From SpatialLines class
c1 <- cbind(c(1,2,3), c(3,2,2))
c2 <- cbind(c1[,1]+.05,c1[,2]+.05)
c3 <- cbind(c(1,2,3),c(1,1.5,1))
L1 <- Line(c1)
L2 <- Line(c2)
L3 <- Line(c3)
Ls1 <- Lines(list(L1), ID = "a")
Ls2 <- Lines(list(L2, L3), ID = "b")
sl1 <- SpatialLines(list(Ls1))
sl12 <- SpatialLines(list(Ls1, Ls2))
map_gist(sl1)
# From SpatialLinesDataFrame class
dat <- data.frame(X = c("Blue", "Green"),
Y = c("Train", "Plane"),
Z = c("Road", "River"), row.names = c("a", "b"))
sldf <- SpatialLinesDataFrame(sl12, dat)
map_gist(sldf)
# From SpatialGrid
x <- GridTopology(c(0,0), c(1,1), c(5,5))
y <- SpatialGrid(x)
map_gist(y)
# From SpatialGridDataFrame
sgdim <- c(3,4)
sg <- SpatialGrid(GridTopology(rep(0,2), rep(10,2), sgdim))
sgdf <- SpatialGridDataFrame(sg, data.frame(val = 1:12))
map_gist(sgdf)
# from data.frame
## to points
map_gist(us_cities)
## to polygons
head(states)
map_gist(states[1:351, ], lat='lat', lon='long', geometry="polygon", group='group')
## From a list
mylist <- list(list(lat=30, long=120, marker="red"),
list(lat=30, long=130, marker="blue"))
map_gist(mylist, lat="lat", lon="long")
# From a numeric vector
## of length 2 to a point
vec <- c(-99.74,32.45)
map_gist(vec)
## this requires numeric class input, so inputting a list will dispatch on the list method
poly <- c(c(-114.345703125,39.436192999314095),
c(-114.345703125,43.45291889355468),
c(-106.61132812499999,43.45291889355468),
c(-106.61132812499999,39.436192999314095),
c(-114.345703125,39.436192999314095))
map_gist(poly, geometry = "polygon")
# From a json object
(x <- geojson_json(c(-99.74,32.45)))
map_gist(x)
## another example
map_gist(geojson_json(us_cities[1:10,], lat='lat', lon='long'))
# From a geo_list object
(res <- geojson_list(us_cities[1:2,], lat='lat', lon='long'))
map_gist(res)
# From SpatialPixels
pixels <- suppressWarnings(SpatialPixels(SpatialPoints(us_cities[c("long", "lat")])))
summary(pixels)
map_gist(pixels)
# From SpatialPixelsDataFrame
pixelsdf <- suppressWarnings(
SpatialPixelsDataFrame(points = canada_cities[c("long", "lat")], data = canada_cities)
)
map_gist(pixelsdf)
# From SpatialRings
library("rgeos")
r1 <- Ring(cbind(x=c(1,1,2,2,1), y=c(1,2,2,1,1)), ID="1")
r2 <- Ring(cbind(x=c(1,1,2,2,1), y=c(1,2,2,1,1)), ID="2")
r1r2 <- SpatialRings(list(r1, r2))
map_gist(r1r2)
# From SpatialRingsDataFrame
dat <- data.frame(id = c(1,2), value = 3:4)
r1r2df <- SpatialRingsDataFrame(r1r2, data = dat)
map_gist(r1r2df)
}
}
}
|
library(reshape2)
library(dplyr)
library(xts)
library(dygraphs)
library(ggplot2)
speciality_wise<-readRDS("speciality_wise.rda")
city_wise<-readRDS("city_wise.rda")
speciality_wise[is.na(speciality_wise)]<-0
city_wise[is.na(city_wise)]<-0
df1<-melt(speciality_wise,id.vars = c("Date","Speciality","Category"))
df2<-melt(city_wise,id.vars = c("Date","City_Name","Category"))
df1$Date<-as.Date(df1$Date, format="%m/%d/%Y")
df2$Date<-as.Date(df2$Date, format="%m/%d/%Y")
df2$City_Name<-as.factor(df2$City_Name)
df2$Category<-as.factor(df2$Category)
request<-df1 %>% filter(variable=="Request")
booking<-df1 %>% filter(variable=="Booking")
opd<-df1 %>% filter(variable=="OPD")
ipd<-df1 %>% filter(variable=="IPD")
request2<-df2 %>% filter(variable=="Request")
booking2<-df2 %>% filter(variable=="Booking")
opd2<-df2 %>% filter(variable=="OPD")
ipd2<-df2 %>% filter(variable=="IPD")
shinyServer(
function(input,output){
selected1 <- reactive({request %>%
filter(Speciality==input$Speciality,Category == input$Type) %>%
group_by(Date) %>%
summarise(n = value)})
selected2 <- reactive({booking %>%
filter(Speciality==input$Speciality,Category == input$Type) %>%
group_by(Date) %>%
summarise(n = value)})
selected3 <- reactive({opd %>%
filter(Speciality==input$Speciality,Category == input$Type) %>%
group_by(Date) %>%
summarise(n = value)})
selected4 <- reactive({ipd %>%
filter(Speciality==input$Speciality,Category == input$Type) %>%
group_by(Date) %>%
summarise(n = value)})
selected5 <- reactive({request2 %>%
filter(City_Name==input$City,Category == input$Category) %>%
group_by(Date) %>%
summarise(n = value)})
selected6 <- reactive({booking2 %>%
filter(City_Name==input$City,Category == input$Category) %>%
group_by(Date) %>%
summarise(n = value)})
selected7 <- reactive({opd2 %>%
filter(City_Name==input$City,Category == input$Category) %>%
group_by(Date) %>%
summarise(n = value)})
selected8 <- reactive({ipd2 %>%
filter(City_Name==input$City,Category == input$Category) %>%
group_by(Date) %>%
summarise(n = value)})
bar<-reactive({df1 %>%
filter(Speciality==input$Speciality,Category==input$Type)
})
bar2<-reactive({df2 %>%
filter(City_Name==input$City,Category==input$Category)
})
output$dygraph<-renderDygraph({
spe_xts <- xts(cbind(selected1()$n,selected2()$n,selected3()$n,selected4()$n), order.by = as.Date(selected1()$Date))
dygraph(spe_xts,xlab = "Month (Plot For Speciality_wise)",ylab = "Value")%>%
dySeries("V1",label="Request",color="red", fillGraph = F, strokeWidth = 3, drawPoints = T,pointSize=3)%>%
dySeries("V2",label="Booking",color="green",fillGraph = F, strokeWidth = 3, drawPoints = T,pointSize=3)%>%
dySeries("V3",label="OPD",color="purple",fillGraph = F, strokeWidth = 3, drawPoints = T,pointSize=3)%>%
dySeries("V4",label="IPD",color="orange",fillGraph = F, strokeWidth = 3, drawPoints = T,pointSize=3)%>%
dyLegend(labelsDiv = "legendDivID",labelsSeparateLines = T)
})
output$dygraph2<-renderDygraph({
spe_xts2 <- xts(cbind(selected5()$n,selected6()$n,selected7()$n,selected8()$n), order.by = as.Date(selected5()$Date))
dygraph(spe_xts2,xlab = "Month (Plot For City_wise)",ylab = "Value")%>%
dySeries("V1",label="Request",color="red", fillGraph = F, strokeWidth = 3, drawPoints = T,pointSize=3)%>%
dySeries("V2",label="Booking",color="green",fillGraph = F, strokeWidth = 3, drawPoints = T,pointSize=3)%>%
dySeries("V3",label="OPD",color="purple",fillGraph = F, strokeWidth = 3, drawPoints = T,pointSize=3)%>%
dySeries("V4",label="IPD",color="orange",fillGraph = F, strokeWidth = 3, drawPoints = T,pointSize=3)%>%
dyLegend(labelsDiv = "legendDivID",labelsSeparateLines = T)
})
output$plotgraph1<-renderPlot({
hist(selected1()$n,col = "red",main = paste("Histogram of Request for",input$Speciality,sep = " "),xlab = "Range",ylab = "Number")
})
br()
output$plotgraph2<-renderPlot({
hist(selected2()$n,col = "green",main = paste("Histogram of Booking for",input$Speciality,sep = " "),xlab = "Range",ylab = "Number")
})
output$plotgraph3<-renderPlot({
hist(selected3()$n,col = "purple",main = paste("Histogram of OPD for",input$Speciality,sep = " "),xlab = "Range",ylab = "Number")
})
output$plotgraph4<-renderPlot({
hist(selected4()$n,col = "orange",main = paste("Histogram of IPD for",input$Speciality,sep = " "),xlab = "Range",ylab = "Number")
})
output$text1<-renderPrint({
summary(selected1()$n)
})
output$text2<-renderPrint({
summary(selected2()$n)
})
output$text3<-renderPrint({
summary(selected3()$n)
})
output$text4<-renderPrint({
summary(selected4()$n)
})
output$plotgraph5<-renderPlot({
hist(selected5()$n,col = "red",main = paste("Histogram of Request for",input$Speciality,sep = " "),xlab = "Range",ylab = "Number")
})
br()
output$plotgraph6<-renderPlot({
hist(selected6()$n,col = "green",main = paste("Histogram of Booking for",input$Speciality,sep = " "),xlab = "Range",ylab = "Number")
})
output$plotgraph7<-renderPlot({
hist(selected7()$n,col = "purple",main = paste("Histogram of OPD for",input$Speciality,sep = " "),xlab = "Range",ylab = "Number")
})
output$plotgraph8<-renderPlot({
hist(selected8()$n,col = "orange",main = paste("Histogram of IPD for",input$Speciality,sep = " "),xlab = "Range",ylab = "Number")
})
output$text5<-renderPrint({
summary(selected5()$n)
})
output$text6<-renderPrint({
summary(selected6()$n)
})
output$text7<-renderPrint({
summary(selected7()$n)
})
output$text8<-renderPrint({
summary(selected8()$n)
})
output$barplot<-renderPlot({
ggplot(bar(), aes(x = as.factor(Date), y = value, fill = variable)) +
geom_bar(position = "dodge", stat = "identity") +
xlab("Date") + ylab("Value") +
geom_text(aes(label = round(value, digits = 1)), position = position_dodge(width = 1),
vjust = -0.25, color = "blue", size = 4) + ggtitle(paste("Bar Plot of",input$Speciality,"For Request, Booking, OPD, and IPD",sep=" ")) +
theme(plot.title = element_text(lineheight = 1, face = "bold",colour = "red",size = 26),legend.position ="top")
},height=500,width=1200)
output$barplot2<-renderPlot({
ggplot(bar2(), aes(x = as.factor(Date), y = value, fill = variable)) +
geom_bar(position = "dodge", stat = "identity") +
xlab("Date") + ylab("Value") +
geom_text(aes(label = round(value, digits = 1)), position = position_dodge(width = 1),
vjust = -0.25, color = "blue", size = 4) + ggtitle(paste("Bar Plot of",input$Speciality,"For Request, Booking, OPD, and IPD",sep=" ")) +
theme(plot.title = element_text(lineheight = 1, face = "bold",colour = "red",size = 26),legend.position ="top")
},height=500,width=1200)
}
)
| /App2/server.R | no_license | mithunkmsg/speciality_city_project | R | false | false | 7,635 | r | library(reshape2)
library(dplyr)
library(xts)
library(dygraphs)
library(ggplot2)
speciality_wise<-readRDS("speciality_wise.rda")
city_wise<-readRDS("city_wise.rda")
speciality_wise[is.na(speciality_wise)]<-0
city_wise[is.na(city_wise)]<-0
df1<-melt(speciality_wise,id.vars = c("Date","Speciality","Category"))
df2<-melt(city_wise,id.vars = c("Date","City_Name","Category"))
df1$Date<-as.Date(df1$Date, format="%m/%d/%Y")
df2$Date<-as.Date(df2$Date, format="%m/%d/%Y")
df2$City_Name<-as.factor(df2$City_Name)
df2$Category<-as.factor(df2$Category)
request<-df1 %>% filter(variable=="Request")
booking<-df1 %>% filter(variable=="Booking")
opd<-df1 %>% filter(variable=="OPD")
ipd<-df1 %>% filter(variable=="IPD")
request2<-df2 %>% filter(variable=="Request")
booking2<-df2 %>% filter(variable=="Booking")
opd2<-df2 %>% filter(variable=="OPD")
ipd2<-df2 %>% filter(variable=="IPD")
shinyServer(
function(input,output){
selected1 <- reactive({request %>%
filter(Speciality==input$Speciality,Category == input$Type) %>%
group_by(Date) %>%
summarise(n = value)})
selected2 <- reactive({booking %>%
filter(Speciality==input$Speciality,Category == input$Type) %>%
group_by(Date) %>%
summarise(n = value)})
selected3 <- reactive({opd %>%
filter(Speciality==input$Speciality,Category == input$Type) %>%
group_by(Date) %>%
summarise(n = value)})
selected4 <- reactive({ipd %>%
filter(Speciality==input$Speciality,Category == input$Type) %>%
group_by(Date) %>%
summarise(n = value)})
selected5 <- reactive({request2 %>%
filter(City_Name==input$City,Category == input$Category) %>%
group_by(Date) %>%
summarise(n = value)})
selected6 <- reactive({booking2 %>%
filter(City_Name==input$City,Category == input$Category) %>%
group_by(Date) %>%
summarise(n = value)})
selected7 <- reactive({opd2 %>%
filter(City_Name==input$City,Category == input$Category) %>%
group_by(Date) %>%
summarise(n = value)})
selected8 <- reactive({ipd2 %>%
filter(City_Name==input$City,Category == input$Category) %>%
group_by(Date) %>%
summarise(n = value)})
bar<-reactive({df1 %>%
filter(Speciality==input$Speciality,Category==input$Type)
})
bar2<-reactive({df2 %>%
filter(City_Name==input$City,Category==input$Category)
})
output$dygraph<-renderDygraph({
spe_xts <- xts(cbind(selected1()$n,selected2()$n,selected3()$n,selected4()$n), order.by = as.Date(selected1()$Date))
dygraph(spe_xts,xlab = "Month (Plot For Speciality_wise)",ylab = "Value")%>%
dySeries("V1",label="Request",color="red", fillGraph = F, strokeWidth = 3, drawPoints = T,pointSize=3)%>%
dySeries("V2",label="Booking",color="green",fillGraph = F, strokeWidth = 3, drawPoints = T,pointSize=3)%>%
dySeries("V3",label="OPD",color="purple",fillGraph = F, strokeWidth = 3, drawPoints = T,pointSize=3)%>%
dySeries("V4",label="IPD",color="orange",fillGraph = F, strokeWidth = 3, drawPoints = T,pointSize=3)%>%
dyLegend(labelsDiv = "legendDivID",labelsSeparateLines = T)
})
output$dygraph2<-renderDygraph({
spe_xts2 <- xts(cbind(selected5()$n,selected6()$n,selected7()$n,selected8()$n), order.by = as.Date(selected5()$Date))
dygraph(spe_xts2,xlab = "Month (Plot For City_wise)",ylab = "Value")%>%
dySeries("V1",label="Request",color="red", fillGraph = F, strokeWidth = 3, drawPoints = T,pointSize=3)%>%
dySeries("V2",label="Booking",color="green",fillGraph = F, strokeWidth = 3, drawPoints = T,pointSize=3)%>%
dySeries("V3",label="OPD",color="purple",fillGraph = F, strokeWidth = 3, drawPoints = T,pointSize=3)%>%
dySeries("V4",label="IPD",color="orange",fillGraph = F, strokeWidth = 3, drawPoints = T,pointSize=3)%>%
dyLegend(labelsDiv = "legendDivID",labelsSeparateLines = T)
})
output$plotgraph1<-renderPlot({
hist(selected1()$n,col = "red",main = paste("Histogram of Request for",input$Speciality,sep = " "),xlab = "Range",ylab = "Number")
})
br()
output$plotgraph2<-renderPlot({
hist(selected2()$n,col = "green",main = paste("Histogram of Booking for",input$Speciality,sep = " "),xlab = "Range",ylab = "Number")
})
output$plotgraph3<-renderPlot({
hist(selected3()$n,col = "purple",main = paste("Histogram of OPD for",input$Speciality,sep = " "),xlab = "Range",ylab = "Number")
})
output$plotgraph4<-renderPlot({
hist(selected4()$n,col = "orange",main = paste("Histogram of IPD for",input$Speciality,sep = " "),xlab = "Range",ylab = "Number")
})
output$text1<-renderPrint({
summary(selected1()$n)
})
output$text2<-renderPrint({
summary(selected2()$n)
})
output$text3<-renderPrint({
summary(selected3()$n)
})
output$text4<-renderPrint({
summary(selected4()$n)
})
output$plotgraph5<-renderPlot({
hist(selected5()$n,col = "red",main = paste("Histogram of Request for",input$Speciality,sep = " "),xlab = "Range",ylab = "Number")
})
br()
output$plotgraph6<-renderPlot({
hist(selected6()$n,col = "green",main = paste("Histogram of Booking for",input$Speciality,sep = " "),xlab = "Range",ylab = "Number")
})
output$plotgraph7<-renderPlot({
hist(selected7()$n,col = "purple",main = paste("Histogram of OPD for",input$Speciality,sep = " "),xlab = "Range",ylab = "Number")
})
output$plotgraph8<-renderPlot({
hist(selected8()$n,col = "orange",main = paste("Histogram of IPD for",input$Speciality,sep = " "),xlab = "Range",ylab = "Number")
})
output$text5<-renderPrint({
summary(selected5()$n)
})
output$text6<-renderPrint({
summary(selected6()$n)
})
output$text7<-renderPrint({
summary(selected7()$n)
})
output$text8<-renderPrint({
summary(selected8()$n)
})
output$barplot<-renderPlot({
ggplot(bar(), aes(x = as.factor(Date), y = value, fill = variable)) +
geom_bar(position = "dodge", stat = "identity") +
xlab("Date") + ylab("Value") +
geom_text(aes(label = round(value, digits = 1)), position = position_dodge(width = 1),
vjust = -0.25, color = "blue", size = 4) + ggtitle(paste("Bar Plot of",input$Speciality,"For Request, Booking, OPD, and IPD",sep=" ")) +
theme(plot.title = element_text(lineheight = 1, face = "bold",colour = "red",size = 26),legend.position ="top")
},height=500,width=1200)
output$barplot2<-renderPlot({
ggplot(bar2(), aes(x = as.factor(Date), y = value, fill = variable)) +
geom_bar(position = "dodge", stat = "identity") +
xlab("Date") + ylab("Value") +
geom_text(aes(label = round(value, digits = 1)), position = position_dodge(width = 1),
vjust = -0.25, color = "blue", size = 4) + ggtitle(paste("Bar Plot of",input$Speciality,"For Request, Booking, OPD, and IPD",sep=" ")) +
theme(plot.title = element_text(lineheight = 1, face = "bold",colour = "red",size = 26),legend.position ="top")
},height=500,width=1200)
}
)
|
# nolint start
#' Shortcut to avoid specifying origin
#'
#' @param x an object to be converted
#' @param \dots further arguments to be passed from or to other methods
#' @export
as_date <- function(x, ...) {
as.Date(x, origin="1970-01-01", ...)
}
#' Mont of date object
#'
#' @param x date thingy
#' @export
month <- \(x, abb = TRUE) {
m <- as.POSIXlt(x)$mon + 1
if (abb[1]) month.abb[m] else month.name[m]
}
#' Year of date object
#'
#' @param x date thingy
#' @export
year <- \(x) {
as.POSIXlt(x)$year + 1900
}
# nolint end | /R/date.r | permissive | hrbrmstr/hrbrmisc | R | false | false | 540 | r | # nolint start
#' Shortcut to avoid specifying origin
#'
#' @param x an object to be converted
#' @param \dots further arguments to be passed from or to other methods
#' @export
as_date <- function(x, ...) {
as.Date(x, origin="1970-01-01", ...)
}
#' Mont of date object
#'
#' @param x date thingy
#' @export
month <- \(x, abb = TRUE) {
m <- as.POSIXlt(x)$mon + 1
if (abb[1]) month.abb[m] else month.name[m]
}
#' Year of date object
#'
#' @param x date thingy
#' @export
year <- \(x) {
as.POSIXlt(x)$year + 1900
}
# nolint end |
shinyServer(function(input, output, session) {
# new data frame with input data
selectedData <- reactive({
iris[, c(input$xcol, input$ycol)]
})
# clustering by number of clusters
k <- reactive({kmeans(selectedData(), input$k)})
output$clusterplot <- renderPlot({
par(mar = c(5.1, 4.1, 0, 1))
plot(selectedData(),
col = k()$cluster,
pch = 10, cex = 2)
points(k()$centers, pch = 7, cex = 3, lwd = 3)
})
# Average by K value
output$text <- renderText({
paste0("Average neighbors by cluster: ",
(length(iris$Sepal.Length) / input$k))
})
}) | /Server.R | no_license | felixds/dataproduct | R | false | false | 621 | r | shinyServer(function(input, output, session) {
# new data frame with input data
selectedData <- reactive({
iris[, c(input$xcol, input$ycol)]
})
# clustering by number of clusters
k <- reactive({kmeans(selectedData(), input$k)})
output$clusterplot <- renderPlot({
par(mar = c(5.1, 4.1, 0, 1))
plot(selectedData(),
col = k()$cluster,
pch = 10, cex = 2)
points(k()$centers, pch = 7, cex = 3, lwd = 3)
})
# Average by K value
output$text <- renderText({
paste0("Average neighbors by cluster: ",
(length(iris$Sepal.Length) / input$k))
})
}) |
################################################################
# >eR-BioStat #
# #
# GLM #
# CAHPTER 7 #
# GLM #
# #
# 2018 #
# Ziv Shkedy & Fetene Tekle #
################################################################
################################################################
## Example 1: Budworm data dose response data of the binomial #
################################################################
ldose <- rep(0:5, 2)
numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16)
sex <- factor(rep(c("M", "F"), c(6, 6)))
SF <- cbind(numdead, numalive=20-numdead)
p<-numdead/20
budworm.lg <- glm(SF ~ sex*ldose, family=binomial)
summary(budworm.lg)
par(mfrow=c(1,2))
plot(p ~ ldose)
plot(p ~ log(ldose))
budworm.lg1 <- glm(SF ~ ldose, family=binomial)
budworm.lg2 <- glm(SF ~ sex + ldose, family=binomial)
budworm.lg3<- glm(SF ~ sex*ldose, family=binomial)
summary(budworm.lg3)
################################################################
## Example 2: beetles data #
################################################################
beetle<-read.table("C:/projects/GLM/data4glm/beetle.txt", header = TRUE)
attach(beetle)
p<-killed/beetles
unkilled<-beetles-killed
Proportionkilled<-p
plot(Proportionkilled~Dose, main="Proportion of the killed beetles")
plot(Dose, Proportionkilled, pch=16, type="o")
tapply(Proportionkilled, list(Dose), mean)
library(MASS)
model.conf <-glm(cbind(killed,unkilled)~Dose, family=binomial("cloglog"), data=beetle)
confint(model.conf, level=0.95)
################################################################
## Example 3: mice data #
################################################################
resp<-as.factor(c(rep(0,21),rep(1,2),rep(0,19),rep(1,13)))
trti<-as.factor(c(rep(1,21),rep(1,2),rep(2,19),rep(2,13)))
cbind(resp,trti)
table(trti,resp)
library(MASS)
fit.mice<-glm(resp~trti,family=binomial(link = "logit"))
summary(fit.mice)
confint(fit.mice, level=0.95)
################################################################
## Example 4: HIV data #
################################################################
hivdat <- read.csv("C:/projects/VLIR/CrossCutting/CoursesUpdated/BinaryKasim/data/data 2_1_8.csv", header=TRUE)
hivdat
phiv<-mean(hivdat$hiv)
pie(c(phiv,1-phiv),labels=c("Positive","Negative"),col=c("1","2"))
par(mfrow=c(1,1))
plot(hivdat$age,hivdat$hiv)
hiv.fit1<-glm(hiv~age,family=binomial(link = "logit"),data=hivdat)
summary(hiv.fit1)
confint(hiv.fit1 , level=0.95)
#re=as.factor(c(rep(1,21),rep(0,2),rep(1,19),rep(0,13)))
#tr=as.factor(c(rep(1,21),rep(1,2),rep(2,19),rep(2,13)))
#cbind(re,tr)
#table(tr,re)
library(MASS)
#########################################################
# END OF CHAPTER 7 #
#########################################################
| /Statistical modeling (1)/glm/R programs/GLMchapter7_2018.R | no_license | eR-Biostat/Courses | R | false | false | 3,439 | r | ################################################################
# >eR-BioStat #
# #
# GLM #
# CAHPTER 7 #
# GLM #
# #
# 2018 #
# Ziv Shkedy & Fetene Tekle #
################################################################
################################################################
## Example 1: Budworm data dose response data of the binomial #
################################################################
ldose <- rep(0:5, 2)
numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16)
sex <- factor(rep(c("M", "F"), c(6, 6)))
SF <- cbind(numdead, numalive=20-numdead)
p<-numdead/20
budworm.lg <- glm(SF ~ sex*ldose, family=binomial)
summary(budworm.lg)
par(mfrow=c(1,2))
plot(p ~ ldose)
plot(p ~ log(ldose))
budworm.lg1 <- glm(SF ~ ldose, family=binomial)
budworm.lg2 <- glm(SF ~ sex + ldose, family=binomial)
budworm.lg3<- glm(SF ~ sex*ldose, family=binomial)
summary(budworm.lg3)
################################################################
## Example 2: beetles data #
################################################################
beetle<-read.table("C:/projects/GLM/data4glm/beetle.txt", header = TRUE)
attach(beetle)
p<-killed/beetles
unkilled<-beetles-killed
Proportionkilled<-p
plot(Proportionkilled~Dose, main="Proportion of the killed beetles")
plot(Dose, Proportionkilled, pch=16, type="o")
tapply(Proportionkilled, list(Dose), mean)
library(MASS)
model.conf <-glm(cbind(killed,unkilled)~Dose, family=binomial("cloglog"), data=beetle)
confint(model.conf, level=0.95)
################################################################
## Example 3: mice data #
################################################################
resp<-as.factor(c(rep(0,21),rep(1,2),rep(0,19),rep(1,13)))
trti<-as.factor(c(rep(1,21),rep(1,2),rep(2,19),rep(2,13)))
cbind(resp,trti)
table(trti,resp)
library(MASS)
fit.mice<-glm(resp~trti,family=binomial(link = "logit"))
summary(fit.mice)
confint(fit.mice, level=0.95)
################################################################
## Example 4: HIV data #
################################################################
hivdat <- read.csv("C:/projects/VLIR/CrossCutting/CoursesUpdated/BinaryKasim/data/data 2_1_8.csv", header=TRUE)
hivdat
phiv<-mean(hivdat$hiv)
pie(c(phiv,1-phiv),labels=c("Positive","Negative"),col=c("1","2"))
par(mfrow=c(1,1))
plot(hivdat$age,hivdat$hiv)
hiv.fit1<-glm(hiv~age,family=binomial(link = "logit"),data=hivdat)
summary(hiv.fit1)
confint(hiv.fit1 , level=0.95)
#re=as.factor(c(rep(1,21),rep(0,2),rep(1,19),rep(0,13)))
#tr=as.factor(c(rep(1,21),rep(1,2),rep(2,19),rep(2,13)))
#cbind(re,tr)
#table(tr,re)
library(MASS)
#########################################################
# END OF CHAPTER 7 #
#########################################################
|
plugin_get_crossref <- function(sources, ids, opts, ...){
callopts <- list(...)
if(any(grepl("entrez", sources))){
opts$ids <- ids
out <- do.call(entrez_get, opts)
attr(out, "format") <- "xml"
list(found = length(out), dois = NULL, data = out, opts = opts)
} else {
list(found = NULL, dois = NULL, data = NULL, opts = opts)
}
}
plugin_get_plos <- function(sources, ids, opts, ...){
callopts <- list(...)
if(any(grepl("plos", sources))){
opts$doi <- ids
opts$callopts <- callopts
out <- do.call(plos_fulltext, opts)
attr(out, "format") <- "xml"
list(found = length(out), dois = names(out), data = construct_paths(cache_options_get(), out), opts = opts)
} else {
list(found = NULL, dois = NULL, data = NULL, opts = opts)
}
}
construct_paths <- function(co, x){
if(!co$cache){
list(backend = NULL,
path = "session",
data = x)
} else {
list(backend = co$backend,
path = cache_save(obj = x, backend = co$backend, path = co$path),
data = NULL)
}
}
plugin_get_entrez <- function(sources, ids, opts, ...){
callopts <- list(...)
if(any(grepl("entrez", sources))){
opts$ids <- ids
out <- as.list(do.call(entrez_get, opts))
attr(out, "format") <- "xml"
list(found = length(out), dois = names(out), data = construct_paths(cache_options_get(), out), opts = opts)
} else {
list(found = NULL, dois = NULL, data = NULL, opts = opts)
}
}
entrez_get <- function(ids){
res <- entrez_search(db="pmc", term=paste0(sprintf('%s[doi]', ids), collapse = "|"))
vapply(res$ids, function(z) entrez_fetch(db = 'pmc', id=z, rettype = "xml"), character(1))
}
plugin_get_bmc <- function(sources, query, opts, ...){
callopts <- list(...)
if(any(grepl("bmc", sources))){
opts$uris <- query
opts$raw <- TRUE
out <- do.call(bmc_xml, opts)
attr(out, "format") <- "xml"
list(found = length(out), dois = NULL, data = out, opts = opts)
} else {
list(found = NULL, dois = NULL, data = NULL, opts = opts)
}
}
plugin_get_elife <- function(sources, ids, opts, ...){
callopts <- list(...)
if(any(grepl("elife", sources))){
opts$doi <- ids
out2 <- lapply(ids, elife_paper)
names(out2) <- ids
attr(out2, "format") <- "xml"
list(found = length(out2), dois = NULL, data = out2, opts = opts)
} else {
list(found = NULL, dois = NULL, data = NULL, opts = opts)
}
}
elife_paper <- function(doi) {
url <- sprintf("http://elife.elifesciences.org/elife-source-xml/%s", doi)
httr::content(GET(url), as="text")
}
| /R/plugins_get.R | permissive | emhart/fulltext | R | false | false | 2,575 | r | plugin_get_crossref <- function(sources, ids, opts, ...){
callopts <- list(...)
if(any(grepl("entrez", sources))){
opts$ids <- ids
out <- do.call(entrez_get, opts)
attr(out, "format") <- "xml"
list(found = length(out), dois = NULL, data = out, opts = opts)
} else {
list(found = NULL, dois = NULL, data = NULL, opts = opts)
}
}
plugin_get_plos <- function(sources, ids, opts, ...){
callopts <- list(...)
if(any(grepl("plos", sources))){
opts$doi <- ids
opts$callopts <- callopts
out <- do.call(plos_fulltext, opts)
attr(out, "format") <- "xml"
list(found = length(out), dois = names(out), data = construct_paths(cache_options_get(), out), opts = opts)
} else {
list(found = NULL, dois = NULL, data = NULL, opts = opts)
}
}
construct_paths <- function(co, x){
if(!co$cache){
list(backend = NULL,
path = "session",
data = x)
} else {
list(backend = co$backend,
path = cache_save(obj = x, backend = co$backend, path = co$path),
data = NULL)
}
}
plugin_get_entrez <- function(sources, ids, opts, ...){
callopts <- list(...)
if(any(grepl("entrez", sources))){
opts$ids <- ids
out <- as.list(do.call(entrez_get, opts))
attr(out, "format") <- "xml"
list(found = length(out), dois = names(out), data = construct_paths(cache_options_get(), out), opts = opts)
} else {
list(found = NULL, dois = NULL, data = NULL, opts = opts)
}
}
entrez_get <- function(ids){
res <- entrez_search(db="pmc", term=paste0(sprintf('%s[doi]', ids), collapse = "|"))
vapply(res$ids, function(z) entrez_fetch(db = 'pmc', id=z, rettype = "xml"), character(1))
}
plugin_get_bmc <- function(sources, query, opts, ...){
callopts <- list(...)
if(any(grepl("bmc", sources))){
opts$uris <- query
opts$raw <- TRUE
out <- do.call(bmc_xml, opts)
attr(out, "format") <- "xml"
list(found = length(out), dois = NULL, data = out, opts = opts)
} else {
list(found = NULL, dois = NULL, data = NULL, opts = opts)
}
}
plugin_get_elife <- function(sources, ids, opts, ...){
callopts <- list(...)
if(any(grepl("elife", sources))){
opts$doi <- ids
out2 <- lapply(ids, elife_paper)
names(out2) <- ids
attr(out2, "format") <- "xml"
list(found = length(out2), dois = NULL, data = out2, opts = opts)
} else {
list(found = NULL, dois = NULL, data = NULL, opts = opts)
}
}
elife_paper <- function(doi) {
url <- sprintf("http://elife.elifesciences.org/elife-source-xml/%s", doi)
httr::content(GET(url), as="text")
}
|
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
filename <- "power_consumption.zip"
if(!file.exists(filename)) {
download.file(fileUrl, filename)
}
unzip(filename)
epc <- read.csv("household_power_consumption.txt", sep=";", na.strings="?",
colClasses = c(rep("character", times=2),rep("numeric", times=7)))
library(dplyr)
epcsub <- epc %>% filter(Date=="1/2/2007" | Date=="2/2/2007") %>%
mutate(datetime = as.POSIXct(strptime(paste(Date, Time), "%d/%m/%Y %H:%M:%S")))
png(filename="plot4.png")
par(mfrow=c(2,2))
with(epcsub, plot(datetime, Global_active_power, type="l", xlab="", ylab="Global Active Power"))
with(epcsub, plot(datetime, Voltage, type="l"))
with(epcsub, plot(datetime, Sub_metering_1, type="l", xlab="", ylab="Energy sub metering"))
with(epcsub, lines(datetime, Sub_metering_2, col="red"))
with(epcsub, lines(datetime, Sub_metering_3, col="blue"))
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red","blue"), lwd=1, bty="n")
with(epcsub, plot(datetime, Global_reactive_power, type="l"))
dev.off() | /plot4.R | no_license | kamyzhu/ExData_Plotting1 | R | false | false | 1,146 | r | fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
filename <- "power_consumption.zip"
if(!file.exists(filename)) {
download.file(fileUrl, filename)
}
unzip(filename)
epc <- read.csv("household_power_consumption.txt", sep=";", na.strings="?",
colClasses = c(rep("character", times=2),rep("numeric", times=7)))
library(dplyr)
epcsub <- epc %>% filter(Date=="1/2/2007" | Date=="2/2/2007") %>%
mutate(datetime = as.POSIXct(strptime(paste(Date, Time), "%d/%m/%Y %H:%M:%S")))
png(filename="plot4.png")
par(mfrow=c(2,2))
with(epcsub, plot(datetime, Global_active_power, type="l", xlab="", ylab="Global Active Power"))
with(epcsub, plot(datetime, Voltage, type="l"))
with(epcsub, plot(datetime, Sub_metering_1, type="l", xlab="", ylab="Energy sub metering"))
with(epcsub, lines(datetime, Sub_metering_2, col="red"))
with(epcsub, lines(datetime, Sub_metering_3, col="blue"))
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red","blue"), lwd=1, bty="n")
with(epcsub, plot(datetime, Global_reactive_power, type="l"))
dev.off() |
#setting up a function to extract efficiency data
library(qpcR)
efdat <- function(dat, fluor, eftype) {
final = numeric()
for (x in fluor) {
ft<-pcrfit(data = dat , fluo = x, model = l4)
ef<- efficiency(ft, plot = FALSE, type = eftype)
final = c(final,ef[[eftype]])
}
return(final)
}
fcpD1 = efdat(f517,fluof,"cpD1")
hcpD1 = efdat(h517,fluoh,"cpD1")
fcpD2 = efdat(f517,fluof,"cpD2")
hcpD2 = efdat(h517,fluoh,"cpD2")
values= c(rep(300000,5),
rep(30000,5),
rep(3000,5),
rep(300,5),
rep(30,5),
rep(3,5),
rep(0,5))
#cpD1 plots
famframe = data.frame(PoI = fcpD1, Treatment = values)
hexframe = data.frame(PoI = hcpD1, Treatment = values)
plot(x = famframe[,2], y = famframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "FAM Main Trials")
plot(x = hexframe[,2], y = hexframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "HEX Main Trials")
#cpD2 plots
famframe = data.frame(PoI = fcpD2, Treatment = values)
hexframe = data.frame(PoI = hcpD2, Treatment = values)
plot(x = famframe[,2], y = famframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "FAM Main Trials")
plot(x = hexframe[,2], y = hexframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "HEX Main Trials")
# #now with multiplex data
# fluom <- c(12,13,24,25,36,37,48,49,60,61,72,73,84,85,86:94,96,97)
# values<- c(3000,3000,300,300,30,30,3,3,3000,3000,300,300,30,30,3000,300,30,3,rep(0,5),3,3)
# hexmcpD1 = efdat(h517,fluom,"cpD1")
# fammcpD1 = efdat(f517,fluom,"cpD1")
# fammframe = data.frame(PoI = fammcpD1, Treatment = values)
# hexmframe = data.frame(PoI = hexmcpD1, Treatment = values)
# plot(x = fammframe[,2], y = fammframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "FAM Main Trials")
# plot(x = hexmframe[,2], y = hexmframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "HEX Main Trials")
# #trying without the NTC data
# fluom <- c(12,13,24,25,36,37,48,49,60,61,72,73,84,85,86:89,96,97)
# values<- c(3000,3000,300,300,30,30,3,3,3000,3000,300,300,30,30,3000,300,30,3,3,3)
# hexmcpD1 = efdat(h517,fluom,"cpD1")
# fammcpD1 = efdat(f517,fluom,"cpD1")
# fammframe = data.frame(PoI = fammcpD1, Treatment = values)
# hexmframe = data.frame(PoI = hexmcpD1, Treatment = values)
# plot(x = fammframe[,2], y = fammframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "FAM Multiplex w/ Treatment")
# plot(x = hexmframe[,2], y = hexmframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "HEX Multiplex w/ Treatment")
# #seems that col 85 (G12) has a curve that can't be fit
plot(x = f517[,1], y = f517[,85])
#now without 85, but with NTC
fluom <- c(12,13,24,25,36,37,48,49,60,61,72,73,84,86:94,96,97)
values<- c(3000,3000,300,300,30,30,3,3,3000,3000,300,300,30,3000,300,30,3,rep(0,5),3,3)
hexmcpD1 = efdat(h517,fluom,"cpD1")
fammcpD1 = efdat(f517,fluom,"cpD1")
fammframe = data.frame(PoI = fammcpD1, Treatment = values)
hexmframe = data.frame(PoI = hexmcpD1, Treatment = values)
plot(x = fammframe[,2], y = fammframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "FAM Multiplex")
plot(x = hexmframe[,2], y = hexmframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "HEX Multiplex")
#also checkin cpD2
fluom <- c(12,13,24,25,36,37,48,49,60,61,72,73,84,86:94,96,97)
values<- c(3000,3000,300,300,30,30,3,3,3000,3000,300,300,30,3000,300,30,3,rep(0,5),3,3)
hexmcpD1 = efdat(h517,fluom,"cpD2")
fammcpD1 = efdat(f517,fluom,"cpD2")
fammframe = data.frame(PoI = fammcpD1, Treatment = values)
hexmframe = data.frame(PoI = hexmcpD1, Treatment = values)
plot(x = fammframe[,2], y = fammframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "FAM Multiplex")
plot(x = hexmframe[,2], y = hexmframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "HEX Multiplex")
#new 627 data
f627 <- read.csv(file = "f627.csv", header = TRUE, sep = ",")
h627<- read.csv(file = "h627.csv", header = TRUE, sep = ",")
#Generally what we want to use
#A8 is a FAM NTC
# use f627[,9]
mdl<-pcrfit(f627,1,9,l4)
efficiency(mdl)
# looks like last half of curve (sqrt looking)
#A9 is also
# use f627[,10]
mdl<-pcrfit(f627,1,10,l4)
efficiency(mdl)
# looks like above curve
#B2 is a 1:10 FAM: HEX multiplex
# use f627[,15] and h627[,15]
mdl<-pcrfit(f627,1,15,l4)
efficiency(mdl)
# fam looks like first half of curve (exponential section)
mdl<-pcrfit(h627,1,15,l4)
efficiency(mdl)
# hex looks the same, but has cpD1,cpD2 < 40
#C2 is a 1:100 multiplex
# use f627[,27] and h627[,27]
mdl<-pcrfit(f627,1,27,l4)
efficiency(mdl)
# fam looks like start up, level off, then exponential cpD1/D2= 40
mdl<-pcrfit(h627,1,27,l4)
efficiency(mdl)
# hex looks very exponental, but with cpD1/D2 < 40
#B5 is the same as B2
# use f627[,18] and h627[,18]
mdl<-pcrfit(f627,1,18,l4)
efficiency(mdl)
# fam looks the same as C2
mdl<-pcrfit(h627,1,18,l4)
efficiency(mdl)
# hex looks like C2
#D7 is .03 copies of FAM
# use f627[,44]
mdl<-pcrfit(f627,1,44,l4)
efficiency(mdl)
# just starts taking off, CpD1/D2 = 40
| /qpcR/POIcalc.R | no_license | jdc5884/Stapleton-Lab | R | false | false | 5,186 | r | #setting up a function to extract efficiency data
library(qpcR)
efdat <- function(dat, fluor, eftype) {
final = numeric()
for (x in fluor) {
ft<-pcrfit(data = dat , fluo = x, model = l4)
ef<- efficiency(ft, plot = FALSE, type = eftype)
final = c(final,ef[[eftype]])
}
return(final)
}
fcpD1 = efdat(f517,fluof,"cpD1")
hcpD1 = efdat(h517,fluoh,"cpD1")
fcpD2 = efdat(f517,fluof,"cpD2")
hcpD2 = efdat(h517,fluoh,"cpD2")
values= c(rep(300000,5),
rep(30000,5),
rep(3000,5),
rep(300,5),
rep(30,5),
rep(3,5),
rep(0,5))
#cpD1 plots
famframe = data.frame(PoI = fcpD1, Treatment = values)
hexframe = data.frame(PoI = hcpD1, Treatment = values)
plot(x = famframe[,2], y = famframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "FAM Main Trials")
plot(x = hexframe[,2], y = hexframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "HEX Main Trials")
#cpD2 plots
famframe = data.frame(PoI = fcpD2, Treatment = values)
hexframe = data.frame(PoI = hcpD2, Treatment = values)
plot(x = famframe[,2], y = famframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "FAM Main Trials")
plot(x = hexframe[,2], y = hexframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "HEX Main Trials")
# #now with multiplex data
# fluom <- c(12,13,24,25,36,37,48,49,60,61,72,73,84,85,86:94,96,97)
# values<- c(3000,3000,300,300,30,30,3,3,3000,3000,300,300,30,30,3000,300,30,3,rep(0,5),3,3)
# hexmcpD1 = efdat(h517,fluom,"cpD1")
# fammcpD1 = efdat(f517,fluom,"cpD1")
# fammframe = data.frame(PoI = fammcpD1, Treatment = values)
# hexmframe = data.frame(PoI = hexmcpD1, Treatment = values)
# plot(x = fammframe[,2], y = fammframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "FAM Main Trials")
# plot(x = hexmframe[,2], y = hexmframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "HEX Main Trials")
# #trying without the NTC data
# fluom <- c(12,13,24,25,36,37,48,49,60,61,72,73,84,85,86:89,96,97)
# values<- c(3000,3000,300,300,30,30,3,3,3000,3000,300,300,30,30,3000,300,30,3,3,3)
# hexmcpD1 = efdat(h517,fluom,"cpD1")
# fammcpD1 = efdat(f517,fluom,"cpD1")
# fammframe = data.frame(PoI = fammcpD1, Treatment = values)
# hexmframe = data.frame(PoI = hexmcpD1, Treatment = values)
# plot(x = fammframe[,2], y = fammframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "FAM Multiplex w/ Treatment")
# plot(x = hexmframe[,2], y = hexmframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "HEX Multiplex w/ Treatment")
# #seems that col 85 (G12) has a curve that can't be fit
plot(x = f517[,1], y = f517[,85])
#now without 85, but with NTC
fluom <- c(12,13,24,25,36,37,48,49,60,61,72,73,84,86:94,96,97)
values<- c(3000,3000,300,300,30,30,3,3,3000,3000,300,300,30,3000,300,30,3,rep(0,5),3,3)
hexmcpD1 = efdat(h517,fluom,"cpD1")
fammcpD1 = efdat(f517,fluom,"cpD1")
fammframe = data.frame(PoI = fammcpD1, Treatment = values)
hexmframe = data.frame(PoI = hexmcpD1, Treatment = values)
plot(x = fammframe[,2], y = fammframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "FAM Multiplex")
plot(x = hexmframe[,2], y = hexmframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "HEX Multiplex")
#also checkin cpD2
fluom <- c(12,13,24,25,36,37,48,49,60,61,72,73,84,86:94,96,97)
values<- c(3000,3000,300,300,30,30,3,3,3000,3000,300,300,30,3000,300,30,3,rep(0,5),3,3)
hexmcpD1 = efdat(h517,fluom,"cpD2")
fammcpD1 = efdat(f517,fluom,"cpD2")
fammframe = data.frame(PoI = fammcpD1, Treatment = values)
hexmframe = data.frame(PoI = hexmcpD1, Treatment = values)
plot(x = fammframe[,2], y = fammframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "FAM Multiplex")
plot(x = hexmframe[,2], y = hexmframe[,1], log = "x",xlab = "Treatment Level", ylab = "PoI Cycle", main = "HEX Multiplex")
#new 627 data
f627 <- read.csv(file = "f627.csv", header = TRUE, sep = ",")
h627<- read.csv(file = "h627.csv", header = TRUE, sep = ",")
#Generally what we want to use
#A8 is a FAM NTC
# use f627[,9]
mdl<-pcrfit(f627,1,9,l4)
efficiency(mdl)
# looks like last half of curve (sqrt looking)
#A9 is also
# use f627[,10]
mdl<-pcrfit(f627,1,10,l4)
efficiency(mdl)
# looks like above curve
#B2 is a 1:10 FAM: HEX multiplex
# use f627[,15] and h627[,15]
mdl<-pcrfit(f627,1,15,l4)
efficiency(mdl)
# fam looks like first half of curve (exponential section)
mdl<-pcrfit(h627,1,15,l4)
efficiency(mdl)
# hex looks the same, but has cpD1,cpD2 < 40
#C2 is a 1:100 multiplex
# use f627[,27] and h627[,27]
mdl<-pcrfit(f627,1,27,l4)
efficiency(mdl)
# fam looks like start up, level off, then exponential cpD1/D2= 40
mdl<-pcrfit(h627,1,27,l4)
efficiency(mdl)
# hex looks very exponental, but with cpD1/D2 < 40
#B5 is the same as B2
# use f627[,18] and h627[,18]
mdl<-pcrfit(f627,1,18,l4)
efficiency(mdl)
# fam looks the same as C2
mdl<-pcrfit(h627,1,18,l4)
efficiency(mdl)
# hex looks like C2
#D7 is .03 copies of FAM
# use f627[,44]
mdl<-pcrfit(f627,1,44,l4)
efficiency(mdl)
# just starts taking off, CpD1/D2 = 40
|
#
# Copyright 2007-2016 The OpenMx Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
mxRun <- function(model, ..., intervals=NULL, silent = FALSE,
suppressWarnings = FALSE, unsafe = FALSE,
checkpoint = FALSE, useSocket = FALSE, onlyFrontend = FALSE,
useOptimizer = TRUE){
if (.hasSlot(model, '.version')) {
mV <- model@.version
curV <- packageVersion('OpenMx')
if (curV$major != mV$major ||
curV$minor != mV$minor) {
warning(paste0("You are using OpenMx version ", curV,
" with a model created by OpenMx version ",
mV, ". This may work fine (fingers crossed), but if you run into ",
"trouble then please recreate your model with the ",
"current version of OpenMx."))
}
}
if (is.null(intervals)) {
# OK
} else if (length(intervals) != 1 ||
typeof(intervals) != "logical" ||
is.na(intervals)) {
stop(paste("'intervals' argument",
"must be TRUE or FALSE in",
deparse(width.cutoff = 400L, sys.call())), call. = FALSE)
}
frontendStart <- Sys.time()
garbageArguments <- list(...)
if (length(garbageArguments) > 0) {
stop("mxRun does not accept values for the '...' argument")
}
runHelper(model, frontendStart, intervals,
silent, suppressWarnings, unsafe,
checkpoint, useSocket, onlyFrontend, useOptimizer)
}
runHelper <- function(model, frontendStart,
intervals, silent, suppressWarnings,
unsafe, checkpoint, useSocket, onlyFrontend, useOptimizer, parentData = NULL) {
Rcpp::Module # ensure Rcpp is loaded
model <- imxPreprocessModel(model)
model <- eliminateObjectiveFunctions(model)
model <- zapExtraneousMatrices(model)
imxCheckMatrices(model)
imxVerifyModel(model)
model <- processParentData(model, parentData)
if (modelIsHollow(model)) {
independents <- getAllIndependents(model)
indepTimeStart <- Sys.time()
independents <- omxLapply(independents, runHelper,
frontendStart = frontendStart,
intervals = intervals, silent = silent,
suppressWarnings = suppressWarnings, unsafe = unsafe,
checkpoint = checkpoint, useSocket = useSocket,
onlyFrontend = onlyFrontend, useOptimizer = useOptimizer, parentData = model@data)
indepTimeStop <- Sys.time()
indepElapsed <- indepTimeStop - indepTimeStart
return(processHollowModel(model, independents,
frontendStart, indepElapsed))
}
dataList <- generateDataList(model)
dshare <- shareData(model)
independents <- getAllIndependents(dshare)
indepTimeStart <- Sys.time()
independents <- omxLapply(independents, mxRun,
intervals = intervals, silent = silent,
suppressWarnings = suppressWarnings, unsafe = unsafe,
checkpoint = checkpoint, useSocket = useSocket,
onlyFrontend = onlyFrontend, useOptimizer = useOptimizer)
indepTimeStop <- Sys.time()
indepElapsed <- indepTimeStop - indepTimeStart
if (modelIsHollow(model)) {
return(processHollowModel(model, independents,
frontendStart, indepElapsed))
}
frozen <- lapply(independents, imxFreezeModel)
model <- imxReplaceModels(model, frozen)
namespace <- imxGenerateNamespace(model)
flatModel <- imxFlattenModel(model, namespace)
options <- generateOptionsList(model, length(flatModel@constraints), useOptimizer)
options[['intervals']] <- intervals
if (!is.null(model@compute) && (!.hasSlot(model@compute, '.persist') || !model@compute@.persist)) {
model@compute <- NULL
}
if (!is.null(model@expectation) && is.null(model@fitfunction) && is.null(model@compute)) {
# The purpose of this check is to prevent analysts new to OpenMx
# from running nonsensical models.
stop(paste(model@name, " has expectation ", class(model@expectation),
", but there is no fitfunction given, and no default.\n",
"To fix, see, e.g. help(mxFitFunctionML) for an example fit function, and how these pair with the expectation", sep = ""))
}
defaultComputePlan <- (is.null(model@compute) || is(model@compute, 'MxComputeDefault'))
if (!useOptimizer && !defaultComputePlan) {
warning("mxRun(..., useOptimizer=FALSE) ignored due to custom compute plan")
}
if (!is.null(model@fitfunction) && defaultComputePlan) {
compute <- NULL
fitNum <- paste(model@name, 'fitfunction', sep=".")
if (!useOptimizer) {
compute <- mxComputeOnce(from=fitNum, 'fit', .is.bestfit=TRUE)
} else {
steps = list(GD=mxComputeGradientDescent(fitfunction=fitNum))
if (length(intervals) && intervals) {
ciOpt <- mxComputeGradientDescent(
fitfunction=fitNum, nudgeZeroStarts=FALSE, maxMajorIter=150)
cType <- 'ineq'
if (ciOpt$engine == "NPSOL") cType <- 'none'
steps <- c(steps, CI=mxComputeConfidenceInterval(
fitfunction=fitNum, constraintType=cType, plan=ciOpt))
}
if (options[["Calculate Hessian"]] == "Yes") {
steps <- c(steps, ND=mxComputeNumericDeriv(fitfunction=fitNum))
}
if (options[["Standard Errors"]] == "Yes") {
steps <- c(steps, SE=mxComputeStandardError(), HQ=mxComputeHessianQuality())
}
compute <- mxComputeSequence(c(steps,
RD=mxComputeReportDeriv(),
RE=mxComputeReportExpectation()))
}
compute@.persist <- FALSE
model@compute <- compute
}
if (!is.null(model@compute)) model@compute <- assignId(model@compute, 1L, '.')
flatModelCompute <- safeQualifyNames(model@compute, model@name, namespace)
omxCheckNamespace(model, namespace)
convertArguments <- imxCheckVariables(flatModel, namespace)
flatModel <- constraintsToAlgebras(flatModel)
flatModel <- eliminateObjectiveFunctions(flatModel)
flatModel <- convertAlgebras(flatModel, convertArguments)
defVars <- generateDefinitionList(flatModel, list())
model <- expectationFunctionAddEntities(model, flatModel, labelsData)
model <- preprocessDatasets(model, defVars, model@options) # DEPRECATED
flatModel@datasets <- collectDatasets(model) # done in imxFlattenModel, but confusingly do it again
labelsData <- imxGenerateLabels(model)
model <- fitFunctionAddEntities(model, flatModel, labelsData)
if (model@.newobjects) {
namespace <- imxGenerateNamespace(model)
flatModel <- imxFlattenModel(model, namespace)
labelsData <- imxGenerateLabels(model)
}
flatModel <- expectationFunctionConvertEntities(flatModel, namespace, labelsData)
if (model@.newobjects) {
convertArguments <- imxCheckVariables(flatModel, namespace)
flatModel <- constraintsToAlgebras(flatModel)
flatModel <- eliminateObjectiveFunctions(flatModel)
flatModel <- convertAlgebras(flatModel, convertArguments)
}
dependencies <- cycleDetection(flatModel)
dependencies <- transitiveClosure(flatModel, dependencies)
flatModel <- populateDefInitialValues(flatModel)
flatModel <- checkEvaluation(model, flatModel)
flatModel@compute <- flatModelCompute
freeVarGroups <- buildFreeVarGroupList(flatModel)
flatModel <- generateParameterList(flatModel, dependencies, freeVarGroups)
matrices <- generateMatrixList(flatModel)
algebras <- generateAlgebraList(flatModel)
if (length(defVars)) {
# We're only going to find them if we found them the first time
defVars <- generateDefinitionList(flatModel, dependencies)
}
expectations <- convertExpectationFunctions(flatModel, model, labelsData, dependencies)
if (length(expectations)) {
prec <- lapply(expectations, genericExpGetPrecision)
functionPrecision <- Reduce(max, c(as.numeric(options[['Function precision']]),
sapply(prec, function(x) x[['functionPrecision']])))
options[['Function precision']] <- as.character(functionPrecision)
if (defaultComputePlan && is(model@compute, "MxComputeSequence")) {
iterations <- Reduce(min, c(4L, sapply(prec, function(x) x[['iterations']])))
stepSize <- Reduce(max, c(1e-4, sapply(prec, function(x) x[['stepSize']])))
model <- adjustDefaultNumericDeriv(model, iterations, stepSize)
flatModel <- adjustDefaultNumericDeriv(flatModel, iterations, stepSize)
}
}
fitfunctions <- convertFitFunctions(flatModel, model, labelsData, dependencies)
data <- convertDatasets(flatModel@datasets, model, flatModel)
numAlgebras <- length(algebras)
algebras <- append(algebras, fitfunctions)
constraints <- convertConstraints(flatModel)
parameters <- flatModel@parameters
numParam <- length(parameters)
if (numParam == 0 && defaultComputePlan && !is.null(model@fitfunction)) {
compute <- mxComputeOnce(from=paste(model@name, 'fitfunction', sep="."),
'fit', .is.bestfit=TRUE)
compute@.persist <- FALSE
compute <- assignId(compute, 1L, '.')
model@compute <- compute
flatModel@compute <- compute
}
intervalList <- generateIntervalList(flatModel, model@name, parameters, labelsData)
communication <- generateCommunicationList(model, checkpoint, useSocket, model@options)
useOptimizer <- useOptimizer && PPML.Check.UseOptimizer(model@options$UsePPML)
options <- limitMajorIterations(options, numParam, length(constraints))
computes <- convertComputes(flatModel, model)
frontendStop <- Sys.time()
frontendElapsed <- (frontendStop - frontendStart) - indepElapsed
if(!silent) message("Running ", model@name, " with ", numParam, " parameter",
ifelse(numParam==1, "", "s"))
if (onlyFrontend) return(model)
output <- .Call(backend,
constraints, matrices, parameters,
algebras, expectations, computes,
data, intervalList, communication, options, defVars, PACKAGE = "OpenMx")
backendStop <- Sys.time()
backendElapsed <- backendStop - frontendStop
model <- updateModelMatrices(model, flatModel, output$matrices)
model <- updateModelAlgebras(model, flatModel, output$algebras)
model <- updateModelExpectations(model, flatModel, output$expectations)
model <- updateModelExpectationDims(model, expectations)
model <- updateModelData(model, flatModel, output$data)
model@compute <-updateModelCompute(model, output$computes)
output[['computes']] <- NULL
if (!is.null(output[['bounds']])) {
model <- omxSetParameters(model, names(parameters),
lbound=output[['bounds']][['l']],
ubound=output[['bounds']][['u']])
output[['bounds']] <- NULL
}
independents <- lapply(independents, undoDataShare, dataList)
model <- imxReplaceModels(model, independents)
model@output <- nameOptimizerOutput(suppressWarnings, flatModel,
names(matrices), names(algebras),
names(parameters), output)
theFitUnits <- model$output$fitUnits
if( length(theFitUnits) > 0 && theFitUnits %in% "r'Wr" ){
wlsSEs <- imxWlsStandardErrors(model)
model@output$standardErrors <- wlsSEs$SE
model@output$hessian <- 2*solve(wlsSEs$Cov) #puts in same units as m2ll Hessian
wlsChi <- imxWlsChiSquare(model, J=wlsSEs$Jac)
model@output$chi <- wlsChi$Chi
model@output$chiDoF <- wlsChi$ChiDoF
}
if (model@output$status$code < 5 && !is.null(model@output[['infoDefinite']]) &&
!is.na(model@output[['infoDefinite']]) && !model@output[['infoDefinite']]) {
model@output$status$code <- 5
}
# Currently runstate preserves the pre-backend state of the model.
# Eventually this needs to capture the post-backend state,
# but we need tests in place for summary output to ensure that we
# don't cause regressions.
runstate <- model@runstate
runstate$parameters <- parameters
runstate$matrices <- matrices
runstate$fitfunctions <- fitfunctions
runstate$expectations <- expectations
runstate$datalist <- data
runstate$constraints <- flatModel@constraints
runstate$independents <- independents
runstate$defvars <- names(defVars)
runstate$compute <- computes
model@runstate <- runstate
frontendStop <- Sys.time()
frontendElapsed <- frontendElapsed + (frontendStop - backendStop)
model@output <- calculateTiming(model@output, frontendElapsed,
backendElapsed, indepElapsed, frontendStop, independents)
processErrorConditions(model, unsafe, suppressWarnings)
model <- clearModifiedSinceRunRecursive(model)
return(model)
}
updateModelExpectationDims <- function(model, expectations){
expectationNames <- names(expectations)
for(aname in expectationNames){
if(!is.null(model[[aname]])){
model[[aname]]@.runDims <- expectations[[aname]]@dims
}
}
return(model)
}
| /OpenMx/R/MxRun.R | no_license | ingted/R-Examples | R | false | false | 12,491 | r | #
# Copyright 2007-2016 The OpenMx Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
mxRun <- function(model, ..., intervals=NULL, silent = FALSE,
suppressWarnings = FALSE, unsafe = FALSE,
checkpoint = FALSE, useSocket = FALSE, onlyFrontend = FALSE,
useOptimizer = TRUE){
if (.hasSlot(model, '.version')) {
mV <- model@.version
curV <- packageVersion('OpenMx')
if (curV$major != mV$major ||
curV$minor != mV$minor) {
warning(paste0("You are using OpenMx version ", curV,
" with a model created by OpenMx version ",
mV, ". This may work fine (fingers crossed), but if you run into ",
"trouble then please recreate your model with the ",
"current version of OpenMx."))
}
}
if (is.null(intervals)) {
# OK
} else if (length(intervals) != 1 ||
typeof(intervals) != "logical" ||
is.na(intervals)) {
stop(paste("'intervals' argument",
"must be TRUE or FALSE in",
deparse(width.cutoff = 400L, sys.call())), call. = FALSE)
}
frontendStart <- Sys.time()
garbageArguments <- list(...)
if (length(garbageArguments) > 0) {
stop("mxRun does not accept values for the '...' argument")
}
runHelper(model, frontendStart, intervals,
silent, suppressWarnings, unsafe,
checkpoint, useSocket, onlyFrontend, useOptimizer)
}
runHelper <- function(model, frontendStart,
intervals, silent, suppressWarnings,
unsafe, checkpoint, useSocket, onlyFrontend, useOptimizer, parentData = NULL) {
Rcpp::Module # ensure Rcpp is loaded
model <- imxPreprocessModel(model)
model <- eliminateObjectiveFunctions(model)
model <- zapExtraneousMatrices(model)
imxCheckMatrices(model)
imxVerifyModel(model)
model <- processParentData(model, parentData)
if (modelIsHollow(model)) {
independents <- getAllIndependents(model)
indepTimeStart <- Sys.time()
independents <- omxLapply(independents, runHelper,
frontendStart = frontendStart,
intervals = intervals, silent = silent,
suppressWarnings = suppressWarnings, unsafe = unsafe,
checkpoint = checkpoint, useSocket = useSocket,
onlyFrontend = onlyFrontend, useOptimizer = useOptimizer, parentData = model@data)
indepTimeStop <- Sys.time()
indepElapsed <- indepTimeStop - indepTimeStart
return(processHollowModel(model, independents,
frontendStart, indepElapsed))
}
dataList <- generateDataList(model)
dshare <- shareData(model)
independents <- getAllIndependents(dshare)
indepTimeStart <- Sys.time()
independents <- omxLapply(independents, mxRun,
intervals = intervals, silent = silent,
suppressWarnings = suppressWarnings, unsafe = unsafe,
checkpoint = checkpoint, useSocket = useSocket,
onlyFrontend = onlyFrontend, useOptimizer = useOptimizer)
indepTimeStop <- Sys.time()
indepElapsed <- indepTimeStop - indepTimeStart
if (modelIsHollow(model)) {
return(processHollowModel(model, independents,
frontendStart, indepElapsed))
}
frozen <- lapply(independents, imxFreezeModel)
model <- imxReplaceModels(model, frozen)
namespace <- imxGenerateNamespace(model)
flatModel <- imxFlattenModel(model, namespace)
options <- generateOptionsList(model, length(flatModel@constraints), useOptimizer)
options[['intervals']] <- intervals
if (!is.null(model@compute) && (!.hasSlot(model@compute, '.persist') || !model@compute@.persist)) {
model@compute <- NULL
}
if (!is.null(model@expectation) && is.null(model@fitfunction) && is.null(model@compute)) {
# The purpose of this check is to prevent analysts new to OpenMx
# from running nonsensical models.
stop(paste(model@name, " has expectation ", class(model@expectation),
", but there is no fitfunction given, and no default.\n",
"To fix, see, e.g. help(mxFitFunctionML) for an example fit function, and how these pair with the expectation", sep = ""))
}
defaultComputePlan <- (is.null(model@compute) || is(model@compute, 'MxComputeDefault'))
if (!useOptimizer && !defaultComputePlan) {
warning("mxRun(..., useOptimizer=FALSE) ignored due to custom compute plan")
}
if (!is.null(model@fitfunction) && defaultComputePlan) {
compute <- NULL
fitNum <- paste(model@name, 'fitfunction', sep=".")
if (!useOptimizer) {
compute <- mxComputeOnce(from=fitNum, 'fit', .is.bestfit=TRUE)
} else {
steps = list(GD=mxComputeGradientDescent(fitfunction=fitNum))
if (length(intervals) && intervals) {
ciOpt <- mxComputeGradientDescent(
fitfunction=fitNum, nudgeZeroStarts=FALSE, maxMajorIter=150)
cType <- 'ineq'
if (ciOpt$engine == "NPSOL") cType <- 'none'
steps <- c(steps, CI=mxComputeConfidenceInterval(
fitfunction=fitNum, constraintType=cType, plan=ciOpt))
}
if (options[["Calculate Hessian"]] == "Yes") {
steps <- c(steps, ND=mxComputeNumericDeriv(fitfunction=fitNum))
}
if (options[["Standard Errors"]] == "Yes") {
steps <- c(steps, SE=mxComputeStandardError(), HQ=mxComputeHessianQuality())
}
compute <- mxComputeSequence(c(steps,
RD=mxComputeReportDeriv(),
RE=mxComputeReportExpectation()))
}
compute@.persist <- FALSE
model@compute <- compute
}
if (!is.null(model@compute)) model@compute <- assignId(model@compute, 1L, '.')
flatModelCompute <- safeQualifyNames(model@compute, model@name, namespace)
omxCheckNamespace(model, namespace)
convertArguments <- imxCheckVariables(flatModel, namespace)
flatModel <- constraintsToAlgebras(flatModel)
flatModel <- eliminateObjectiveFunctions(flatModel)
flatModel <- convertAlgebras(flatModel, convertArguments)
defVars <- generateDefinitionList(flatModel, list())
model <- expectationFunctionAddEntities(model, flatModel, labelsData)
model <- preprocessDatasets(model, defVars, model@options) # DEPRECATED
flatModel@datasets <- collectDatasets(model) # done in imxFlattenModel, but confusingly do it again
labelsData <- imxGenerateLabels(model)
model <- fitFunctionAddEntities(model, flatModel, labelsData)
if (model@.newobjects) {
namespace <- imxGenerateNamespace(model)
flatModel <- imxFlattenModel(model, namespace)
labelsData <- imxGenerateLabels(model)
}
flatModel <- expectationFunctionConvertEntities(flatModel, namespace, labelsData)
if (model@.newobjects) {
convertArguments <- imxCheckVariables(flatModel, namespace)
flatModel <- constraintsToAlgebras(flatModel)
flatModel <- eliminateObjectiveFunctions(flatModel)
flatModel <- convertAlgebras(flatModel, convertArguments)
}
dependencies <- cycleDetection(flatModel)
dependencies <- transitiveClosure(flatModel, dependencies)
flatModel <- populateDefInitialValues(flatModel)
flatModel <- checkEvaluation(model, flatModel)
flatModel@compute <- flatModelCompute
freeVarGroups <- buildFreeVarGroupList(flatModel)
flatModel <- generateParameterList(flatModel, dependencies, freeVarGroups)
matrices <- generateMatrixList(flatModel)
algebras <- generateAlgebraList(flatModel)
if (length(defVars)) {
# We're only going to find them if we found them the first time
defVars <- generateDefinitionList(flatModel, dependencies)
}
expectations <- convertExpectationFunctions(flatModel, model, labelsData, dependencies)
if (length(expectations)) {
prec <- lapply(expectations, genericExpGetPrecision)
functionPrecision <- Reduce(max, c(as.numeric(options[['Function precision']]),
sapply(prec, function(x) x[['functionPrecision']])))
options[['Function precision']] <- as.character(functionPrecision)
if (defaultComputePlan && is(model@compute, "MxComputeSequence")) {
iterations <- Reduce(min, c(4L, sapply(prec, function(x) x[['iterations']])))
stepSize <- Reduce(max, c(1e-4, sapply(prec, function(x) x[['stepSize']])))
model <- adjustDefaultNumericDeriv(model, iterations, stepSize)
flatModel <- adjustDefaultNumericDeriv(flatModel, iterations, stepSize)
}
}
fitfunctions <- convertFitFunctions(flatModel, model, labelsData, dependencies)
data <- convertDatasets(flatModel@datasets, model, flatModel)
numAlgebras <- length(algebras)
algebras <- append(algebras, fitfunctions)
constraints <- convertConstraints(flatModel)
parameters <- flatModel@parameters
numParam <- length(parameters)
if (numParam == 0 && defaultComputePlan && !is.null(model@fitfunction)) {
compute <- mxComputeOnce(from=paste(model@name, 'fitfunction', sep="."),
'fit', .is.bestfit=TRUE)
compute@.persist <- FALSE
compute <- assignId(compute, 1L, '.')
model@compute <- compute
flatModel@compute <- compute
}
intervalList <- generateIntervalList(flatModel, model@name, parameters, labelsData)
communication <- generateCommunicationList(model, checkpoint, useSocket, model@options)
useOptimizer <- useOptimizer && PPML.Check.UseOptimizer(model@options$UsePPML)
options <- limitMajorIterations(options, numParam, length(constraints))
computes <- convertComputes(flatModel, model)
frontendStop <- Sys.time()
frontendElapsed <- (frontendStop - frontendStart) - indepElapsed
if(!silent) message("Running ", model@name, " with ", numParam, " parameter",
ifelse(numParam==1, "", "s"))
if (onlyFrontend) return(model)
output <- .Call(backend,
constraints, matrices, parameters,
algebras, expectations, computes,
data, intervalList, communication, options, defVars, PACKAGE = "OpenMx")
backendStop <- Sys.time()
backendElapsed <- backendStop - frontendStop
model <- updateModelMatrices(model, flatModel, output$matrices)
model <- updateModelAlgebras(model, flatModel, output$algebras)
model <- updateModelExpectations(model, flatModel, output$expectations)
model <- updateModelExpectationDims(model, expectations)
model <- updateModelData(model, flatModel, output$data)
model@compute <-updateModelCompute(model, output$computes)
output[['computes']] <- NULL
if (!is.null(output[['bounds']])) {
model <- omxSetParameters(model, names(parameters),
lbound=output[['bounds']][['l']],
ubound=output[['bounds']][['u']])
output[['bounds']] <- NULL
}
independents <- lapply(independents, undoDataShare, dataList)
model <- imxReplaceModels(model, independents)
model@output <- nameOptimizerOutput(suppressWarnings, flatModel,
names(matrices), names(algebras),
names(parameters), output)
theFitUnits <- model$output$fitUnits
if( length(theFitUnits) > 0 && theFitUnits %in% "r'Wr" ){
wlsSEs <- imxWlsStandardErrors(model)
model@output$standardErrors <- wlsSEs$SE
model@output$hessian <- 2*solve(wlsSEs$Cov) #puts in same units as m2ll Hessian
wlsChi <- imxWlsChiSquare(model, J=wlsSEs$Jac)
model@output$chi <- wlsChi$Chi
model@output$chiDoF <- wlsChi$ChiDoF
}
if (model@output$status$code < 5 && !is.null(model@output[['infoDefinite']]) &&
!is.na(model@output[['infoDefinite']]) && !model@output[['infoDefinite']]) {
model@output$status$code <- 5
}
# Currently runstate preserves the pre-backend state of the model.
# Eventually this needs to capture the post-backend state,
# but we need tests in place for summary output to ensure that we
# don't cause regressions.
runstate <- model@runstate
runstate$parameters <- parameters
runstate$matrices <- matrices
runstate$fitfunctions <- fitfunctions
runstate$expectations <- expectations
runstate$datalist <- data
runstate$constraints <- flatModel@constraints
runstate$independents <- independents
runstate$defvars <- names(defVars)
runstate$compute <- computes
model@runstate <- runstate
frontendStop <- Sys.time()
frontendElapsed <- frontendElapsed + (frontendStop - backendStop)
model@output <- calculateTiming(model@output, frontendElapsed,
backendElapsed, indepElapsed, frontendStop, independents)
processErrorConditions(model, unsafe, suppressWarnings)
model <- clearModifiedSinceRunRecursive(model)
return(model)
}
updateModelExpectationDims <- function(model, expectations){
expectationNames <- names(expectations)
for(aname in expectationNames){
if(!is.null(model[[aname]])){
model[[aname]]@.runDims <- expectations[[aname]]@dims
}
}
return(model)
}
|
#use this vector to scale the data so that the sum of squares for each column is equal to 1. This isn't written into the code but is useful for comparing to the given example.
scaling_vec <- apply(subset(wines, select = unlist(sets)), 2, function(x) sqrt(sum((x - mean(x))^2)))
#helper function for function that separates data into tables and preprocesses
shift_sets <- function(sets) {
data_start_col <- min(unlist(sets))
shift <- data_start_col - 1
new_sets <- lapply(sets, function(x) x - shift)
return(new_sets)
}
#separate data into individual tables, preprocess and store in a list
#should this list be an object?
data_tables <- function(data, sets, center, scale) {
new_sets <- shift_sets(sets)
tables <- list()
for(i in 1:length(sets)) {
table <- subset(data, select = sets[[i]])
if(class(scale) == "logical") {
scaling_vec <- scale
} else {
scaling_vec <- scale[new_sets[[i]]]
}
tables[[i]] <- scale(table, center = center, scale = scaling_vec)
}
return(tables)
}
#determine the weights for each individual table
#should this be a method?
weights <- function(list) {
#create a nested list with the svd matrices for each data table
svd_list <- lapply(list, svd)
#create vector of weights
weights <- vector()
for(i in 1:length(list)) {
#extract first singular value from diagonal matrix produced in SVD
val <- svd_list[[i]]$d[1]
#calculate weight from singular value
weights[i] <- 1/val^2
}
return(weights)
}
#create matrix of weightings
weighting_matrix <- function(weights, sets) {
weight_vec <- vector()
for(i in 1:length(sets)) {
weight_vec <- c(weight_vec, rep(weights[i], length(sets[[i]])))
}
A <- diag(weight_vec)
return(A)
}
#MFA as simple PCA
#should this also be a method?
pca_func <- function(list, sets, ncomps) {
#input is list of data tables
new_sets <- shift_sets(sets)
weights <- weights(list)
A <- weighting_matrix(weights, sets)
#combine data tables into one big matrix
x_tilde <- matrix(unlist(list), nrow(list[[1]])) %*% A^(1/2)
x_svd <- svd(x_tilde)
eigen_values <- (sqrt(1/nrow(list[[1]])) * x_svd$d)^2
dim(A)
#calculate list of partial loadings
negative_root_weights <- weights ^ (-1/2)
negative_root_A <- weighting_matrix(negative_root_weights, sets)
q <- t(t(x_svd$v) %*% negative_root_A)
partial_loadings <- list()
for(i in 1:length(sets)) {
partial_loadings[[i]] <- t(subset(t(q), select = new_sets[[i]]))
partial_loadings[[i]] <- partial_loadings[[i]][ ,1:ncomps]
}
#list of partial factor scores
F_partial <- list()
for(i in 1:length(list)) {
F_partial[[i]] <- length(list) * weights[i] * list[[i]] %*% partial_loadings[[i]]
F_partial[[i]] <- F_partial[[i]][ ,1:ncomps]
}
#save results we want in a list (we don't really need all of these but it is useful to have them while we test these functions against the example)
out <- list(
weightings_matrix = A,
simple_pca = x_svd,
eigen_values = eigen_values,
loadings = q,
partial_loadings = partial_loadings,
partial_factor_scores = F_partial
)
out
}
#constructor function
make_mfa <- function(pca,ncomps) {
res <- list(
eigen_values = pca$eigen_values, #vector
common_factor_scores = (pca$simple_pca$u %*% diag(pca$simple_pca$d))[ ,1:ncomps], #matrix
partial_factor_scores = pca$partial_factor_scores, #list
loadings = pca$loadings[ ,1:ncomps]
)
class(res) <- "mfa"
res
}
mfa <- function(data, sets, ncomps = NULL, center = TRUE, scale = TRUE) {
#data can be matrix or data frame
#sets is a list of vectors indicating sets/blocks of variables, can be character vectors with names or numeric vectors with position of variables in the data table
#ncomps is an integer indicating how many components/factors are to be extracted, NULL indicates all components
#center can be logical value or numeric vector of length equal to number of active variables; if numeric vector, each variable has corresponding value subtracted from it; if TRUE, subtract column means
#scale can be logical value or numeric vector of length equal to number of active variables
#return vector of eigenvalues, matrix of common factor scores, matrix of partial factor scores, matrix of loadings
tables <- data_tables(data, sets, center, scale)
pca <- pca_func(tables, sets, ncomps)
make_mfa(pca, ncomps)
}
### test code
wines <- read.csv("https://raw.githubusercontent.com/ucb-stat243/stat243-fall-2016/master/problem-sets/final-project/data/wines.csv", stringsAsFactors = FALSE)
sets <- list(2:7, 8:13, 14:19, 20:24, 25:30, 31:35, 36:39, 40:45, 46:50, 51:54)
scaling_vec <- apply(subset(wines, select = unlist(sets)), 2, function(x) sqrt(sum((x - mean(x))^2)))
tables <- data_tables(wines, sets, TRUE, scaling_vec)
round(tables[[1]], 2) #compare this output to (54) in the paper
wghts <- weights(tables)
wghts #compare to (61) in the paper
results <- pca_func(tables, sets, ncomps = 2)
round(results$eigen_values, 3) #compare to table 2 in the paper
round(results$partial_factor_scores[[1]], 3) #compare first 2 columns to (66) in the paper
mymfa <- mfa(wines, sets, ncomps = 2, T, scaling_vec)
| /MFA-Function/final_project_mainfunc_v09.R | no_license | BeaGir/JLNXB_243 | R | false | false | 5,193 | r | #use this vector to scale the data so that the sum of squares for each column is equal to 1. This isn't written into the code but is useful for comparing to the given example.
scaling_vec <- apply(subset(wines, select = unlist(sets)), 2, function(x) sqrt(sum((x - mean(x))^2)))
#helper function for function that separates data into tables and preprocesses
shift_sets <- function(sets) {
data_start_col <- min(unlist(sets))
shift <- data_start_col - 1
new_sets <- lapply(sets, function(x) x - shift)
return(new_sets)
}
#separate data into individual tables, preprocess and store in a list
#should this list be an object?
data_tables <- function(data, sets, center, scale) {
new_sets <- shift_sets(sets)
tables <- list()
for(i in 1:length(sets)) {
table <- subset(data, select = sets[[i]])
if(class(scale) == "logical") {
scaling_vec <- scale
} else {
scaling_vec <- scale[new_sets[[i]]]
}
tables[[i]] <- scale(table, center = center, scale = scaling_vec)
}
return(tables)
}
#determine the weights for each individual table
#should this be a method?
weights <- function(list) {
#create a nested list with the svd matrices for each data table
svd_list <- lapply(list, svd)
#create vector of weights
weights <- vector()
for(i in 1:length(list)) {
#extract first singular value from diagonal matrix produced in SVD
val <- svd_list[[i]]$d[1]
#calculate weight from singular value
weights[i] <- 1/val^2
}
return(weights)
}
#create matrix of weightings
weighting_matrix <- function(weights, sets) {
weight_vec <- vector()
for(i in 1:length(sets)) {
weight_vec <- c(weight_vec, rep(weights[i], length(sets[[i]])))
}
A <- diag(weight_vec)
return(A)
}
#MFA as simple PCA
#should this also be a method?
pca_func <- function(list, sets, ncomps) {
#input is list of data tables
new_sets <- shift_sets(sets)
weights <- weights(list)
A <- weighting_matrix(weights, sets)
#combine data tables into one big matrix
x_tilde <- matrix(unlist(list), nrow(list[[1]])) %*% A^(1/2)
x_svd <- svd(x_tilde)
eigen_values <- (sqrt(1/nrow(list[[1]])) * x_svd$d)^2
dim(A)
#calculate list of partial loadings
negative_root_weights <- weights ^ (-1/2)
negative_root_A <- weighting_matrix(negative_root_weights, sets)
q <- t(t(x_svd$v) %*% negative_root_A)
partial_loadings <- list()
for(i in 1:length(sets)) {
partial_loadings[[i]] <- t(subset(t(q), select = new_sets[[i]]))
partial_loadings[[i]] <- partial_loadings[[i]][ ,1:ncomps]
}
#list of partial factor scores
F_partial <- list()
for(i in 1:length(list)) {
F_partial[[i]] <- length(list) * weights[i] * list[[i]] %*% partial_loadings[[i]]
F_partial[[i]] <- F_partial[[i]][ ,1:ncomps]
}
#save results we want in a list (we don't really need all of these but it is useful to have them while we test these functions against the example)
out <- list(
weightings_matrix = A,
simple_pca = x_svd,
eigen_values = eigen_values,
loadings = q,
partial_loadings = partial_loadings,
partial_factor_scores = F_partial
)
out
}
#constructor function
make_mfa <- function(pca,ncomps) {
res <- list(
eigen_values = pca$eigen_values, #vector
common_factor_scores = (pca$simple_pca$u %*% diag(pca$simple_pca$d))[ ,1:ncomps], #matrix
partial_factor_scores = pca$partial_factor_scores, #list
loadings = pca$loadings[ ,1:ncomps]
)
class(res) <- "mfa"
res
}
mfa <- function(data, sets, ncomps = NULL, center = TRUE, scale = TRUE) {
#data can be matrix or data frame
#sets is a list of vectors indicating sets/blocks of variables, can be character vectors with names or numeric vectors with position of variables in the data table
#ncomps is an integer indicating how many components/factors are to be extracted, NULL indicates all components
#center can be logical value or numeric vector of length equal to number of active variables; if numeric vector, each variable has corresponding value subtracted from it; if TRUE, subtract column means
#scale can be logical value or numeric vector of length equal to number of active variables
#return vector of eigenvalues, matrix of common factor scores, matrix of partial factor scores, matrix of loadings
tables <- data_tables(data, sets, center, scale)
pca <- pca_func(tables, sets, ncomps)
make_mfa(pca, ncomps)
}
### test code
wines <- read.csv("https://raw.githubusercontent.com/ucb-stat243/stat243-fall-2016/master/problem-sets/final-project/data/wines.csv", stringsAsFactors = FALSE)
sets <- list(2:7, 8:13, 14:19, 20:24, 25:30, 31:35, 36:39, 40:45, 46:50, 51:54)
scaling_vec <- apply(subset(wines, select = unlist(sets)), 2, function(x) sqrt(sum((x - mean(x))^2)))
tables <- data_tables(wines, sets, TRUE, scaling_vec)
round(tables[[1]], 2) #compare this output to (54) in the paper
wghts <- weights(tables)
wghts #compare to (61) in the paper
results <- pca_func(tables, sets, ncomps = 2)
round(results$eigen_values, 3) #compare to table 2 in the paper
round(results$partial_factor_scores[[1]], 3) #compare first 2 columns to (66) in the paper
mymfa <- mfa(wines, sets, ncomps = 2, T, scaling_vec)
|
####### Figure 2: Setsize Effect on Accuracy #######
rm(list=ls())
graphics.off()
library("Hmisc")
library("readxl")
library("stats")
setwd(dirname(rstudioapi::getSourceEditorContext()$path)) # sets the directory of location of this script as the current directory
source(paste(dirname(getwd()), "/functions/plot.confint.R", sep=""))
source(paste(dirname(getwd()), "/functions/lineplot.ci.R", sep=""))
source(paste(dirname(getwd()), "/functions/Confint.R", sep=""))
source(paste(dirname(getwd()), "/functions/Bakeman.R", sep=""))
ptypes <- c(21:25, 21:25)
bgcolors <- c("black", "grey", "white", "grey80", "grey20", "black", "white")
# Load data for simple and complex span
d = read_excel("Unsworth.Engle.Listlength.xlsx") # data from Unsworth & Engle 2006
wordspan <- d[,which(grepl("wor", colnames(d)))]
letterspan <- d[,which(grepl("let", colnames(d)))]
opspan <- d[,which(grepl("op", colnames(d)))]
rspan <- d[,which(grepl("rsp", colnames(d)))]
simplespan <- (wordspan + letterspan[,1:6])/2 #average the 2 simple spans for each subject and set size
complexspan <- (opspan + rspan)/2 #same for complex span
simple <- Confint(Bakeman(simplespan))
complex <- Confint(Bakeman(complexspan))
# Load data for running memory span
d = read_excel("Bunting.Cowan.Running.xls", sheet=3) # data from Bunting & Cowan, Exp. 1
RSPCfast <- d[which(names(d)=="f7sp7_ac"):which(names(d)=="f1sp1_ac")]
RSPCslow <- d[which(names(d)=="s7sp7_ac"):which(names(d)=="s1sp1_ac")]
runningfast <- matrix(0,dim(RSPCfast)[1],7)
runningslow <- matrix(0,dim(RSPCslow)[1],7)
pointer <- 1
for (setsize in 7:2) {
allsp <- pointer:(pointer+setsize-1)
runningfast[,setsize] <- rowMeans(RSPCfast[,allsp])
runningslow[,setsize] <- rowMeans(RSPCslow[,allsp])
pointer <- pointer+setsize
}
runningfast[,1] <- as.matrix(RSPCfast[,pointer])
runningslow[,1] <- as.matrix(RSPCslow[,pointer])
runningfast <- Bakeman(runningfast)
runningslow <- Bakeman(runningslow)
#Load data for Memory Updating, Oberauer & Kliegl (2006)
colnames1 <- c("id", "setsize", "trial", "pt0", "pt1", "ptcat", "crit",
"corrval1", "resp1", "correct1", "rt1",
"corrval2", "resp2", "correct2", "rt2",
"corrval3", "resp3", "correct3", "rt3",
"corrval4", "resp4", "correct4", "rt4")
mutaf1 <- read.table("Oberauer.Kliegl.MU1.DAT", header=F, fill=T, col.names=colnames1) #with col.names given, read.table reads in as many columsn as there are names
colnames2 <- c("id", "setsize", "trial", "pt0", "pt1", "ptcat", "crit",
"corrval1", "resp1", "correct1", "rt1",
"corrval2", "resp2", "correct2", "rt2",
"corrval3", "resp3", "correct3", "rt3",
"corrval4", "resp4", "correct4", "rt4",
"corrval5", "resp5", "correct5", "rt5",
"corrval6", "resp6", "correct6", "rt6")
mutaf2 <- read.table("Oberauer.Kliegl.MU2.dat", header=F, fill=T, col.names=colnames2) #with col.names given, read.table reads in as many columsn as there are names
mutaf1$exp = 1
mutaf2$exp = 2
mutaf1 <- mutaf1[mutaf1$setsize>0,]
pcidx1 <- which(grepl("correct", colnames(mutaf1)))
pcidx2 <- which(grepl("correct", colnames(mutaf2)))
ssidx <- which(colnames(mutaf1)=="setsize")
computePC <- function(x) {
setsize <- as.numeric(x[1])
pcvector <- as.numeric(x[2:(setsize+1)])
return(mean(pcvector))}
mutaf1$PC <- NULL
for (j in 1:dim(mutaf1)[1]) {
mutaf1[j,"PC"] <- computePC(mutaf1[j,c(ssidx, pcidx1)])
}
#mutaf1$pc <- apply(mutaf1[,c(ssidx, pcidx1)], MARGIN=2, FUN=computePC) # should do the same in theory, but does not work
mutaf2$PC <- NULL
for (j in 1:dim(mutaf2)[1]) {
mutaf2[j,"PC"] <- computePC(mutaf2[j,c(ssidx, pcidx2)])
}
mt1 <- mutaf1[, which(colnames(mutaf1) %in% c("id", "exp", "setsize", "pt0", "PC"))]
mt2 <- mutaf2[, which(colnames(mutaf2) %in% c("id", "exp", "setsize", "pt0", "PC"))]
mutaf <- rbind(mt1, mt2)
mt1.y.long <- subset(mt1, id < 30 & pt0 > 5999)
mt2.y.long <- subset(mt2, id < 30 & pt0 > 5999)
nsubj <- length(unique(mt1.y.long$id))
MUarray1 <- array(NA,dim=c(4,1,nsubj))
for (ss in 1:4) {
d <- subset(mt1.y.long, setsize==ss)
aggdat <- aggregate(PC ~ id, data=d, FUN=mean)
MUarray1[ss,1,] <- aggdat$PC
}
nsubj <- length(unique(mt2.y.long$id))
MUarray2 <- array(NA,dim=c(3,1,nsubj))
for (ss in 4:6) {
d <- subset(mt2.y.long, setsize==ss)
aggdat <- aggregate(PC ~ id, data=d, FUN=mean)
MUarray2[ss-3,1,] <- aggdat$PC
}
#Item Recognition: McElree 1989 Exp 2
ss3 <- c(.09, .06, .05)
ss4 <- c(.13, .15, .08, .06)
ss5 <- c(.23, .26, .14, .08, .06)
ss6 <- c(.35, .30, .28, .13, .10, .05)
RecPE <- c(mean(ss3), mean(ss4), mean(ss5), mean(ss6))
#N-back: Jonides et al 1997
NbackPE <- c(0.03, 0.05, 0.065, 0.115)
#Nback: Verhaeghen & Basak 2005 young
NbackVerhaeghenY <- c(0.97, 0.96, 0.945, 0.92, 0.86)
# Change detection: Adam et al (2015)
CD <- read.table("Adam.ChangeDet.dat", header=F)
names(CD) <- c("id", "setsize", "change", "correct")
CDagg <- aggregate(correct ~ id+setsize, data=CD, FUN=mean)
library("tidyr")
CDwide <- CDagg %>% spread(setsize, correct) # takes variable correct and writes into separate variables for each level of setsize
############# Start Plotting #####################
x11()
layout(matrix(1:6, 3, 2, byrow=T))
errbar(2:7, y=simple[1,], yplus=simple[2,], yminus=simple[3,], type="b", pch=ptypes[1],
bg=bgcolors[1], errbar.col=bgcolors[1], xlim=c(1,7), ylim=c(0,1), xlab="Set Size", ylab="P(correct)")
par(new=T)
errbar(2:5, y=complex[1,], yplus=complex[2,], yminus=complex[3,], type="b", pch=ptypes[2],
bg=bgcolors[2], errbar.col=bgcolors[2], xlim=c(1,7), ylim=c(0,1), xlab="Set Size", ylab="P(correct)")
legend(1, 0, c("Simple Span", "Complex Span"), pch=ptypes, pt.bg=bgcolors, yjust=0)
title("Serial Recall")
mtext("A", side=3, adj=0, line=1)
plot.confint(runningfast, 1:7, 1:7, type="b", pch=ptypes[1], bg=bgcolors[1], xlim=c(1,7), ylim=c(0,1),
xlab="Set Size", ylab="P(correct)")
par(new=T)
plot.confint(runningslow, 1:7, 1:7, type="b", pch=ptypes[2], bg=bgcolors[2], xlim=c(1,7), ylim=c(0,1),
xlab="Set Size", ylab="P(correct)")
title("Running Span")
legend(1, 0, c("Fast", "Slow"), pch=ptypes, pt.bg=bgcolors, yjust=0)
mtext("B", side=3, adj=0, line=1)
plot(3:6, 1-RecPE, type="b", pch=ptypes[1], bg=bgcolors[1], xlim=c(1,6), ylim=c(0.5,1),
xlab="Set Size", ylab="P(hits)")
title("Item Recognition (Words)")
mtext("C", side=3, adj=0, line=1)
plot(0:3, 1-NbackPE, type="b", pch=ptypes[1], bg=bgcolors[1], xlim=c(0,5), ylim=c(0.5,1),
xlab="N", ylab="P(correct)")
par(new=T)
plot(1:5, NbackVerhaeghenY, type="b", pch=ptypes[2], bg=bgcolors[2], xlim=c(0,5), ylim=c(0.5,1),
xlab="N", ylab="P(correct)")
legend(0, 0.5, c("Standard", "Columns"), pch=ptypes, pt.bg=bgcolors, yjust=0)
title("N-back")
mtext("D", side=3, adj=0, line=1)
lineplot.ci(1:4, data=MUarray1, off=0, xlim=c(1,6), ylim=c(0.5,1),
pt=ptypes, ptcol=bgcolors[1], xlab="Set Size", ylab="P(correct)")
par(new=T)
lineplot.ci(4:6, data=MUarray2, off=0.05, xlim=c(1,6), ylim=c(0.5,1),
pt=ptypes, ptcol=bgcolors[2], xlab="Set Size", ylab="P(correct)")
legend(1,0.5,c("Study 1", "Study 2"), pch=ptypes, pt.bg=bgcolors, yjust=0)
title("Memory Updating")
mtext("E", side=3, adj=0, line=1)
plot.confint(CDwide, 2:6, 2:6, off=0, xlim=c(1,6.5), ylim=c(0.5,1), xlab="Set Size", ylab="P(correct)")
title("Change Detection")
mtext("F", side=3, adj=0, line=1)
| /BenchmarksWM.Data/BM1.1.SetsizeAccuracy/BM1.1.SetsizeAccuracy.R | no_license | ajwills72/BenchmarksWM | R | false | false | 7,457 | r | ####### Figure 2: Setsize Effect on Accuracy #######
rm(list=ls())
graphics.off()
library("Hmisc")
library("readxl")
library("stats")
setwd(dirname(rstudioapi::getSourceEditorContext()$path)) # sets the directory of location of this script as the current directory
source(paste(dirname(getwd()), "/functions/plot.confint.R", sep=""))
source(paste(dirname(getwd()), "/functions/lineplot.ci.R", sep=""))
source(paste(dirname(getwd()), "/functions/Confint.R", sep=""))
source(paste(dirname(getwd()), "/functions/Bakeman.R", sep=""))
ptypes <- c(21:25, 21:25)
bgcolors <- c("black", "grey", "white", "grey80", "grey20", "black", "white")
# Load data for simple and complex span
d = read_excel("Unsworth.Engle.Listlength.xlsx") # data from Unsworth & Engle 2006
wordspan <- d[,which(grepl("wor", colnames(d)))]
letterspan <- d[,which(grepl("let", colnames(d)))]
opspan <- d[,which(grepl("op", colnames(d)))]
rspan <- d[,which(grepl("rsp", colnames(d)))]
simplespan <- (wordspan + letterspan[,1:6])/2 #average the 2 simple spans for each subject and set size
complexspan <- (opspan + rspan)/2 #same for complex span
simple <- Confint(Bakeman(simplespan))
complex <- Confint(Bakeman(complexspan))
# Load data for running memory span
d = read_excel("Bunting.Cowan.Running.xls", sheet=3) # data from Bunting & Cowan, Exp. 1
RSPCfast <- d[which(names(d)=="f7sp7_ac"):which(names(d)=="f1sp1_ac")]
RSPCslow <- d[which(names(d)=="s7sp7_ac"):which(names(d)=="s1sp1_ac")]
runningfast <- matrix(0,dim(RSPCfast)[1],7)
runningslow <- matrix(0,dim(RSPCslow)[1],7)
pointer <- 1
for (setsize in 7:2) {
allsp <- pointer:(pointer+setsize-1)
runningfast[,setsize] <- rowMeans(RSPCfast[,allsp])
runningslow[,setsize] <- rowMeans(RSPCslow[,allsp])
pointer <- pointer+setsize
}
runningfast[,1] <- as.matrix(RSPCfast[,pointer])
runningslow[,1] <- as.matrix(RSPCslow[,pointer])
runningfast <- Bakeman(runningfast)
runningslow <- Bakeman(runningslow)
#Load data for Memory Updating, Oberauer & Kliegl (2006)
colnames1 <- c("id", "setsize", "trial", "pt0", "pt1", "ptcat", "crit",
"corrval1", "resp1", "correct1", "rt1",
"corrval2", "resp2", "correct2", "rt2",
"corrval3", "resp3", "correct3", "rt3",
"corrval4", "resp4", "correct4", "rt4")
mutaf1 <- read.table("Oberauer.Kliegl.MU1.DAT", header=F, fill=T, col.names=colnames1) #with col.names given, read.table reads in as many columsn as there are names
colnames2 <- c("id", "setsize", "trial", "pt0", "pt1", "ptcat", "crit",
"corrval1", "resp1", "correct1", "rt1",
"corrval2", "resp2", "correct2", "rt2",
"corrval3", "resp3", "correct3", "rt3",
"corrval4", "resp4", "correct4", "rt4",
"corrval5", "resp5", "correct5", "rt5",
"corrval6", "resp6", "correct6", "rt6")
mutaf2 <- read.table("Oberauer.Kliegl.MU2.dat", header=F, fill=T, col.names=colnames2) #with col.names given, read.table reads in as many columsn as there are names
mutaf1$exp = 1
mutaf2$exp = 2
mutaf1 <- mutaf1[mutaf1$setsize>0,]
pcidx1 <- which(grepl("correct", colnames(mutaf1)))
pcidx2 <- which(grepl("correct", colnames(mutaf2)))
ssidx <- which(colnames(mutaf1)=="setsize")
computePC <- function(x) {
setsize <- as.numeric(x[1])
pcvector <- as.numeric(x[2:(setsize+1)])
return(mean(pcvector))}
mutaf1$PC <- NULL
for (j in 1:dim(mutaf1)[1]) {
mutaf1[j,"PC"] <- computePC(mutaf1[j,c(ssidx, pcidx1)])
}
#mutaf1$pc <- apply(mutaf1[,c(ssidx, pcidx1)], MARGIN=2, FUN=computePC) # should do the same in theory, but does not work
mutaf2$PC <- NULL
for (j in 1:dim(mutaf2)[1]) {
mutaf2[j,"PC"] <- computePC(mutaf2[j,c(ssidx, pcidx2)])
}
mt1 <- mutaf1[, which(colnames(mutaf1) %in% c("id", "exp", "setsize", "pt0", "PC"))]
mt2 <- mutaf2[, which(colnames(mutaf2) %in% c("id", "exp", "setsize", "pt0", "PC"))]
mutaf <- rbind(mt1, mt2)
mt1.y.long <- subset(mt1, id < 30 & pt0 > 5999)
mt2.y.long <- subset(mt2, id < 30 & pt0 > 5999)
nsubj <- length(unique(mt1.y.long$id))
MUarray1 <- array(NA,dim=c(4,1,nsubj))
for (ss in 1:4) {
d <- subset(mt1.y.long, setsize==ss)
aggdat <- aggregate(PC ~ id, data=d, FUN=mean)
MUarray1[ss,1,] <- aggdat$PC
}
nsubj <- length(unique(mt2.y.long$id))
MUarray2 <- array(NA,dim=c(3,1,nsubj))
for (ss in 4:6) {
d <- subset(mt2.y.long, setsize==ss)
aggdat <- aggregate(PC ~ id, data=d, FUN=mean)
MUarray2[ss-3,1,] <- aggdat$PC
}
#Item Recognition: McElree 1989 Exp 2
ss3 <- c(.09, .06, .05)
ss4 <- c(.13, .15, .08, .06)
ss5 <- c(.23, .26, .14, .08, .06)
ss6 <- c(.35, .30, .28, .13, .10, .05)
RecPE <- c(mean(ss3), mean(ss4), mean(ss5), mean(ss6))
#N-back: Jonides et al 1997
NbackPE <- c(0.03, 0.05, 0.065, 0.115)
#Nback: Verhaeghen & Basak 2005 young
NbackVerhaeghenY <- c(0.97, 0.96, 0.945, 0.92, 0.86)
# Change detection: Adam et al (2015)
CD <- read.table("Adam.ChangeDet.dat", header=F)
names(CD) <- c("id", "setsize", "change", "correct")
CDagg <- aggregate(correct ~ id+setsize, data=CD, FUN=mean)
library("tidyr")
CDwide <- CDagg %>% spread(setsize, correct) # takes variable correct and writes into separate variables for each level of setsize
############# Start Plotting #####################
x11()
layout(matrix(1:6, 3, 2, byrow=T))
errbar(2:7, y=simple[1,], yplus=simple[2,], yminus=simple[3,], type="b", pch=ptypes[1],
bg=bgcolors[1], errbar.col=bgcolors[1], xlim=c(1,7), ylim=c(0,1), xlab="Set Size", ylab="P(correct)")
par(new=T)
errbar(2:5, y=complex[1,], yplus=complex[2,], yminus=complex[3,], type="b", pch=ptypes[2],
bg=bgcolors[2], errbar.col=bgcolors[2], xlim=c(1,7), ylim=c(0,1), xlab="Set Size", ylab="P(correct)")
legend(1, 0, c("Simple Span", "Complex Span"), pch=ptypes, pt.bg=bgcolors, yjust=0)
title("Serial Recall")
mtext("A", side=3, adj=0, line=1)
plot.confint(runningfast, 1:7, 1:7, type="b", pch=ptypes[1], bg=bgcolors[1], xlim=c(1,7), ylim=c(0,1),
xlab="Set Size", ylab="P(correct)")
par(new=T)
plot.confint(runningslow, 1:7, 1:7, type="b", pch=ptypes[2], bg=bgcolors[2], xlim=c(1,7), ylim=c(0,1),
xlab="Set Size", ylab="P(correct)")
title("Running Span")
legend(1, 0, c("Fast", "Slow"), pch=ptypes, pt.bg=bgcolors, yjust=0)
mtext("B", side=3, adj=0, line=1)
plot(3:6, 1-RecPE, type="b", pch=ptypes[1], bg=bgcolors[1], xlim=c(1,6), ylim=c(0.5,1),
xlab="Set Size", ylab="P(hits)")
title("Item Recognition (Words)")
mtext("C", side=3, adj=0, line=1)
plot(0:3, 1-NbackPE, type="b", pch=ptypes[1], bg=bgcolors[1], xlim=c(0,5), ylim=c(0.5,1),
xlab="N", ylab="P(correct)")
par(new=T)
plot(1:5, NbackVerhaeghenY, type="b", pch=ptypes[2], bg=bgcolors[2], xlim=c(0,5), ylim=c(0.5,1),
xlab="N", ylab="P(correct)")
legend(0, 0.5, c("Standard", "Columns"), pch=ptypes, pt.bg=bgcolors, yjust=0)
title("N-back")
mtext("D", side=3, adj=0, line=1)
lineplot.ci(1:4, data=MUarray1, off=0, xlim=c(1,6), ylim=c(0.5,1),
pt=ptypes, ptcol=bgcolors[1], xlab="Set Size", ylab="P(correct)")
par(new=T)
lineplot.ci(4:6, data=MUarray2, off=0.05, xlim=c(1,6), ylim=c(0.5,1),
pt=ptypes, ptcol=bgcolors[2], xlab="Set Size", ylab="P(correct)")
legend(1,0.5,c("Study 1", "Study 2"), pch=ptypes, pt.bg=bgcolors, yjust=0)
title("Memory Updating")
mtext("E", side=3, adj=0, line=1)
plot.confint(CDwide, 2:6, 2:6, off=0, xlim=c(1,6.5), ylim=c(0.5,1), xlab="Set Size", ylab="P(correct)")
title("Change Detection")
mtext("F", side=3, adj=0, line=1)
|
q_map<-function(r=5,x_o=runif(1,0,1),N=100,burn_in=0,...)
{
par(mfrow=c(2,1),mar=c(4,4,1,2),lwd=2)
############# Trace #############
x<-array(dim=N)
x[1]<-x_o
for(i in 2:N)
x[i]<-r*x[i-1]**2*(1-x[i-1])
plot(x[(burn_in+1):N],type='l',xlab='t',ylab='x', ylim=c(0,1))
#################################
########## Quadradic Map ########
x<-seq(from=0,to=1,length.out=100)
x_np1<-array(dim=100)
for(i in 1:length(x))
x_np1[i]<-r*x[i]**2*(1-x[i])
plot(x,x_np1,type='l',xlab=expression(x[t]),ylab=expression(x[t+1]))
abline(0,1)
start=x_o
vert=FALSE
lines(x=c(start,start),y=c(0,r*start**2*(1-start)) )
for(i in 1:(2*N))
{
if(vert)
{
lines(x=c(start,start),y=c(start,r*start**2*(1-start)) )
vert=FALSE
}
else
{
lines(x=c(start,
r*start**2*(1-start)),
y=c(r*start**2*(1-start),
r*start**2*(1-start)) )
vert=TRUE
start=r*start**2*(1-start)
}
}
#################################
}
q_map(r=6.4,x_o=0.80001)
| /Lesson 1/cobweb.R | no_license | itaguas/Modelizaci-n | R | false | false | 1,101 | r | q_map<-function(r=5,x_o=runif(1,0,1),N=100,burn_in=0,...)
{
par(mfrow=c(2,1),mar=c(4,4,1,2),lwd=2)
############# Trace #############
x<-array(dim=N)
x[1]<-x_o
for(i in 2:N)
x[i]<-r*x[i-1]**2*(1-x[i-1])
plot(x[(burn_in+1):N],type='l',xlab='t',ylab='x', ylim=c(0,1))
#################################
########## Quadradic Map ########
x<-seq(from=0,to=1,length.out=100)
x_np1<-array(dim=100)
for(i in 1:length(x))
x_np1[i]<-r*x[i]**2*(1-x[i])
plot(x,x_np1,type='l',xlab=expression(x[t]),ylab=expression(x[t+1]))
abline(0,1)
start=x_o
vert=FALSE
lines(x=c(start,start),y=c(0,r*start**2*(1-start)) )
for(i in 1:(2*N))
{
if(vert)
{
lines(x=c(start,start),y=c(start,r*start**2*(1-start)) )
vert=FALSE
}
else
{
lines(x=c(start,
r*start**2*(1-start)),
y=c(r*start**2*(1-start),
r*start**2*(1-start)) )
vert=TRUE
start=r*start**2*(1-start)
}
}
#################################
}
q_map(r=6.4,x_o=0.80001)
|
i = 548
library(asSeq, lib="/nas02/home/w/e/weisun/R/Rlibs/")
# -------------------------------------------------------------------------
# read in the list of the SNP to be excluded
# -------------------------------------------------------------------------
setwd("/lustre/scr/w/e/weisun/TCGA/hetSNP_EA/")
files = list.files(path = ".", pattern="hetSNP_")
sams = gsub("hetSNP_", "", files)
sams = gsub(".txt", "", sams, fixed=TRUE)
#for(i in 1:length(files)){
f1 = files[i]
sam1 = sams[i]
cat("\n", sam1, date(), "\n")
input = sprintf("../bam/%s_sorted_by_name_uniq_filtered.bam", sam1)
outputTag = sprintf("../bam/%s_asCounts_hetSNP_EA", sam1)
snpList = f1
if(! file.exists(f1)){
stop("snpList file does not exist")
}
extractAsReads(input, snpList, outputTag)
#}
| /data_preparation/R_batch3/_step2/step2_filter_asCounts.547.R | no_license | jasa-acs/Mapping-Tumor-Specific-Expression-QTLs-in-Impure-Tumor-Samples | R | false | false | 809 | r | i = 548
library(asSeq, lib="/nas02/home/w/e/weisun/R/Rlibs/")
# -------------------------------------------------------------------------
# read in the list of the SNP to be excluded
# -------------------------------------------------------------------------
setwd("/lustre/scr/w/e/weisun/TCGA/hetSNP_EA/")
files = list.files(path = ".", pattern="hetSNP_")
sams = gsub("hetSNP_", "", files)
sams = gsub(".txt", "", sams, fixed=TRUE)
#for(i in 1:length(files)){
f1 = files[i]
sam1 = sams[i]
cat("\n", sam1, date(), "\n")
input = sprintf("../bam/%s_sorted_by_name_uniq_filtered.bam", sam1)
outputTag = sprintf("../bam/%s_asCounts_hetSNP_EA", sam1)
snpList = f1
if(! file.exists(f1)){
stop("snpList file does not exist")
}
extractAsReads(input, snpList, outputTag)
#}
|
library(dplyr)
library(tibble)
library(ggplot2)
install.packages("hexbin")
diamonds
## A subset of data, log transform for linear relationship
diamonds_2 <- diamonds %>%
filter(carat < 2.5) %>%
mutate(lprice = log2(price), lcarat = log2(carat))
diamonds_2 %>% ggplot(aes(lcarat, lprice)) +
geom_hex(bins =50)
## implement the intuition as a model => lprice ~ lcarat, remove the obvious confounding
mod_diamond <- lm(lprice ~ lcarat, data = diamonds_2)
grid <- diamonds_2 %>%
data_grid(carat = seq_range(carat, 20)) %>%
mutate(lcarat = log2(carat)) %>%
add_predictions(mod_diamond, "lprice") %>%
mutate(price = 2 ^ lprice)
grid
ggplot(diamonds_2, aes(carat, price)) +
geom_hex(bins = 50) +
geom_line(data = grid, color = "red", size = 1)
## Now, we can focus on the residual
diamonds2 <- diamonds_2 %>%
add_residuals(mod_diamond, "lresid")
ggplot(diamonds2, aes(lcarat, lresid)) +
geom_hex(bins = 50)
ggplot(diamonds2, aes(cut, lresid)) + geom_boxplot()
ggplot(diamonds2, aes(color, lresid)) + geom_boxplot()
ggplot(diamonds2, aes(clarity, lresid)) + geom_boxplot()
## A model with multiple predictors, without interactions
mod_diamond2 <- lm(
lprice ~ lcarat + color + cut + clarity,
data = diamonds2
)
## data_grid(x_focus, .model = fitted_model) <== gives you the "typical" value for other predictors, very useful for visualizaiton
grid <- diamonds2 %>%
data_grid(cut, .model = mod_diamond2) %>%
add_predictions(mod_diamond2)
grid
ggplot(grid, aes(cut, pred)) +
geom_point()
#####
#####
##### Example 2 NYCFlight
library(nycflights13)
library(dplyr)
library(tibble)
library(ggplot2)
library(lubridate)
library(modelr)
daily <- flights %>%
mutate(date = make_date(year, month, day)) %>%
group_by(date) %>%
summarise(n = n())
daily <- daily %>%
mutate(wday = wday(date, label = TRUE))
daily
ggplot(daily, aes(date, n)) +
geom_line()
ggplot(daily, aes(wday, n)) +
geom_boxplot()
mod <- lm(n ~ wday, data = daily)
grid <- daily %>%
data_grid(wday) %>%
add_predictions(mod, "n")
grid
ggplot(daily, aes(x=wday, y=n)) +
geom_boxplot() +
geom_point(data = grid, color = "red")
daily <- daily %>%
add_residuals(mod)
daily %>% ggplot(aes(x=date, y=resid)) +
geom_ref_line(h=0) +
geom_line()
daily %>% ggplot(aes(x=date, y=resid, color = wday)) +
geom_ref_line(h=0) +
geom_line()
daily %>%
filter(resid < -100)
daily %>%
filter(resid > 100)
daily %>%
ggplot(aes(date, resid)) + geom_ref_line(h = 0) + geom_line(color = "grey50") + geom_smooth(se = FALSE, span = 0.20) # span is a parameter for loess
daily %>%
filter(wday == "Sat") %>% ## only look at SAT
ggplot(aes(date, n)) +
geom_point() +
geom_line() +
scale_x_date(
NULL,
date_breaks = "1 month", date_labels = "%b"
)
## create a function to determine term
term <- function(date) {
cut(date,
breaks = ymd(20130101, 20130605, 20130825, 20140101),
labels = c("spring", "summer", "fall")
)
}
daily <- daily %>%
mutate(term = term(date))
daily
daily %>%
filter(wday == "Sat") %>%
ggplot(aes(date, n, color = term)) +
geom_point(alpha = 1/3) +
geom_line() +
scale_x_date(
NULL,
date_breaks = "1 month", date_labels = "%b"
)
daily %>%
ggplot(aes(wday, n, color = term)) +
geom_boxplot()
## many outliners in FALL
mod1 <- lm(n ~ wday, data = daily)
mod2 <- lm(n ~ wday * term, data = daily)
daily %>%
gather_residuals(without_term = mod1, with_term = mod2) %>%
ggplot(aes(date, resid, color = model)) +
geom_line(alpha = 0.75) +
geom_smooth(span = 0.2, se = FALSE)
grid <- daily %>%
data_grid(wday, term) %>%
add_predictions(mod2, "n")
## A generic technique => overlay the model on the original data points
ggplot(daily, aes(wday, n)) +
geom_boxplot() +
geom_point(data = grid, color = "red") +
facet_wrap(~ term)
mod3 <- MASS::rlm(n ~ wday * term, data = daily)
daily %>%
add_residuals(mod3, "resid") %>%
ggplot(aes(date, resid)) +
geom_hline(yintercept = 0, size = 2, color = "white") +
geom_line()
library(splines)
mod4 <- MASS::rlm(n ~ wday * ns(date, 5), data = daily)
daily %>%
data_grid(wday, date = seq_range(date, n = 13)) %>%
add_predictions(mod4) %>%
ggplot(aes(date, pred, color = wday)) +
geom_line() +
geom_point()
ggplot(mpg, aes(displ, hwy)) + geom_point(aes(color = class)) +
geom_smooth(se = FALSE) + theme(legend.position = "bottom") +
labs(title = "hwy decreases with displ") +
guides(
color = guide_legend(
nrow = 1,
override.aes = list(size = 4)
) )
ggplot(diamonds, aes(log10(carat), log10(price))) +
geom_bin2d()
ggplot(diamonds, aes(carat, price)) +
geom_bin2d() +
scale_x_log10() +
scale_y_log10()
ggplot(mpg, aes(displ, hwy)) +
geom_point(aes(color = drv, shape = drv)) +
scale_color_brewer(palette = "Set1")
presidential %>%
mutate(id = 33 + row_number()) %>%
ggplot(aes(start, id, color = party)) +
geom_point() +
geom_segment(aes(xend = end, yend = id)) +
scale_colour_manual(
values = c(Republican = "red", Democratic = "blue")
)
suv <- mpg %>% filter(class == "suv")
compact <- mpg %>% filter(class == "compact")
x_scale <- scale_x_continuous(limits = range(mpg$displ))
y_scale <- scale_y_continuous(limits = range(mpg$hwy))
col_scale <- scale_color_discrete(limits = unique(mpg$drv))
ggplot(suv, aes(displ, hwy, color = drv)) +
geom_point() +
x_scale +
y_scale +
col_scale
ggplot(compact, aes(displ, hwy, color = drv)) +
geom_point() +
x_scale +
y_scale +
col_scale
| /R/RForDataScience/R for Data Science_note7_real_data.R | no_license | terrylaw888/code_recipe | R | false | false | 5,540 | r | library(dplyr)
library(tibble)
library(ggplot2)
install.packages("hexbin")
diamonds
## A subset of data, log transform for linear relationship
diamonds_2 <- diamonds %>%
filter(carat < 2.5) %>%
mutate(lprice = log2(price), lcarat = log2(carat))
diamonds_2 %>% ggplot(aes(lcarat, lprice)) +
geom_hex(bins =50)
## implement the intuition as a model => lprice ~ lcarat, remove the obvious confounding
mod_diamond <- lm(lprice ~ lcarat, data = diamonds_2)
grid <- diamonds_2 %>%
data_grid(carat = seq_range(carat, 20)) %>%
mutate(lcarat = log2(carat)) %>%
add_predictions(mod_diamond, "lprice") %>%
mutate(price = 2 ^ lprice)
grid
ggplot(diamonds_2, aes(carat, price)) +
geom_hex(bins = 50) +
geom_line(data = grid, color = "red", size = 1)
## Now, we can focus on the residual
diamonds2 <- diamonds_2 %>%
add_residuals(mod_diamond, "lresid")
ggplot(diamonds2, aes(lcarat, lresid)) +
geom_hex(bins = 50)
ggplot(diamonds2, aes(cut, lresid)) + geom_boxplot()
ggplot(diamonds2, aes(color, lresid)) + geom_boxplot()
ggplot(diamonds2, aes(clarity, lresid)) + geom_boxplot()
## A model with multiple predictors, without interactions
mod_diamond2 <- lm(
lprice ~ lcarat + color + cut + clarity,
data = diamonds2
)
## data_grid(x_focus, .model = fitted_model) <== gives you the "typical" value for other predictors, very useful for visualizaiton
grid <- diamonds2 %>%
data_grid(cut, .model = mod_diamond2) %>%
add_predictions(mod_diamond2)
grid
ggplot(grid, aes(cut, pred)) +
geom_point()
#####
#####
##### Example 2 NYCFlight
library(nycflights13)
library(dplyr)
library(tibble)
library(ggplot2)
library(lubridate)
library(modelr)
daily <- flights %>%
mutate(date = make_date(year, month, day)) %>%
group_by(date) %>%
summarise(n = n())
daily <- daily %>%
mutate(wday = wday(date, label = TRUE))
daily
ggplot(daily, aes(date, n)) +
geom_line()
ggplot(daily, aes(wday, n)) +
geom_boxplot()
mod <- lm(n ~ wday, data = daily)
grid <- daily %>%
data_grid(wday) %>%
add_predictions(mod, "n")
grid
ggplot(daily, aes(x=wday, y=n)) +
geom_boxplot() +
geom_point(data = grid, color = "red")
daily <- daily %>%
add_residuals(mod)
daily %>% ggplot(aes(x=date, y=resid)) +
geom_ref_line(h=0) +
geom_line()
daily %>% ggplot(aes(x=date, y=resid, color = wday)) +
geom_ref_line(h=0) +
geom_line()
daily %>%
filter(resid < -100)
daily %>%
filter(resid > 100)
daily %>%
ggplot(aes(date, resid)) + geom_ref_line(h = 0) + geom_line(color = "grey50") + geom_smooth(se = FALSE, span = 0.20) # span is a parameter for loess
daily %>%
filter(wday == "Sat") %>% ## only look at SAT
ggplot(aes(date, n)) +
geom_point() +
geom_line() +
scale_x_date(
NULL,
date_breaks = "1 month", date_labels = "%b"
)
## create a function to determine term
term <- function(date) {
cut(date,
breaks = ymd(20130101, 20130605, 20130825, 20140101),
labels = c("spring", "summer", "fall")
)
}
daily <- daily %>%
mutate(term = term(date))
daily
daily %>%
filter(wday == "Sat") %>%
ggplot(aes(date, n, color = term)) +
geom_point(alpha = 1/3) +
geom_line() +
scale_x_date(
NULL,
date_breaks = "1 month", date_labels = "%b"
)
daily %>%
ggplot(aes(wday, n, color = term)) +
geom_boxplot()
## many outliners in FALL
mod1 <- lm(n ~ wday, data = daily)
mod2 <- lm(n ~ wday * term, data = daily)
daily %>%
gather_residuals(without_term = mod1, with_term = mod2) %>%
ggplot(aes(date, resid, color = model)) +
geom_line(alpha = 0.75) +
geom_smooth(span = 0.2, se = FALSE)
grid <- daily %>%
data_grid(wday, term) %>%
add_predictions(mod2, "n")
## A generic technique => overlay the model on the original data points
ggplot(daily, aes(wday, n)) +
geom_boxplot() +
geom_point(data = grid, color = "red") +
facet_wrap(~ term)
mod3 <- MASS::rlm(n ~ wday * term, data = daily)
daily %>%
add_residuals(mod3, "resid") %>%
ggplot(aes(date, resid)) +
geom_hline(yintercept = 0, size = 2, color = "white") +
geom_line()
library(splines)
mod4 <- MASS::rlm(n ~ wday * ns(date, 5), data = daily)
daily %>%
data_grid(wday, date = seq_range(date, n = 13)) %>%
add_predictions(mod4) %>%
ggplot(aes(date, pred, color = wday)) +
geom_line() +
geom_point()
ggplot(mpg, aes(displ, hwy)) + geom_point(aes(color = class)) +
geom_smooth(se = FALSE) + theme(legend.position = "bottom") +
labs(title = "hwy decreases with displ") +
guides(
color = guide_legend(
nrow = 1,
override.aes = list(size = 4)
) )
ggplot(diamonds, aes(log10(carat), log10(price))) +
geom_bin2d()
ggplot(diamonds, aes(carat, price)) +
geom_bin2d() +
scale_x_log10() +
scale_y_log10()
ggplot(mpg, aes(displ, hwy)) +
geom_point(aes(color = drv, shape = drv)) +
scale_color_brewer(palette = "Set1")
presidential %>%
mutate(id = 33 + row_number()) %>%
ggplot(aes(start, id, color = party)) +
geom_point() +
geom_segment(aes(xend = end, yend = id)) +
scale_colour_manual(
values = c(Republican = "red", Democratic = "blue")
)
suv <- mpg %>% filter(class == "suv")
compact <- mpg %>% filter(class == "compact")
x_scale <- scale_x_continuous(limits = range(mpg$displ))
y_scale <- scale_y_continuous(limits = range(mpg$hwy))
col_scale <- scale_color_discrete(limits = unique(mpg$drv))
ggplot(suv, aes(displ, hwy, color = drv)) +
geom_point() +
x_scale +
y_scale +
col_scale
ggplot(compact, aes(displ, hwy, color = drv)) +
geom_point() +
x_scale +
y_scale +
col_scale
|
# basic reactive functions for accessing PIA
# last update: 2016-10-29
baseReactive <- function(input, output, session, tr){
currApp <- reactive({
ns <- session$ns
input$p2next
input$disconnectPIA
input$lang
input$addKeyItem
input$updateKeyItem
input$delKeyItem
rv$v
shinyBS::closeAlert(session, 'alertPiaStatus')
app <- list()
piaMsg <- ''
pia_url <- session$userData$piaUrl
app_key <- session$userData$appKey
app_secret <- session$userData$appSecret
keyItems <- session$userData$keyItems
if(is.null(keyItems)) {
keyItems <- data.frame()
}
if(is.null(pia_url) |
is.null(app_key) |
is.null(app_secret)) {
piaMsg <- tr('missingIncompletePiaData')
} else {
if((nchar(pia_url) > 0) &
(nchar(app_key) > 0) &
(nchar(app_secret) > 0)) {
app <- setupApp(pia_url,
app_key,
app_secret,
keyItems)
if(length(app) == 0){
piaMsg <- tr('invalidPiaData')
} else {
if(is.na(app$token)){
piaMsg <- tr('invalidPiaData')
}
}
} else {
piaMsg <- tr('missingIncompletePiaData')
}
}
if(nchar(piaMsg) > 0){
shinyBS::createAlert(session, 'piaStatus',
alertId = 'alertPiaStatus',
style = 'warning', append = FALSE,
title = tr('piaConnectionMsgTitle'),
content = piaMsg)
app <- list()
} else {
shinyBS::closeAlert(session, 'alertPiaStatus')
url <- itemsUrl(app$url, appRepoDefault)
retVal <- readRawItems(app, url)
if(nrow(retVal) > 0){
if(checkItemEncryption(retVal)){
if(nrow(keyItems) == 0){
shinyBS::createAlert(
session, 'piaStatus',
alertId = 'alertPiaStatus',
style = 'warning', append = FALSE,
title = tr('piaEncryptedMsgTitle', input$lang),
content = tr('piaEncryptedMsg', input$lang))
}
}
}
}
app
})
currData <- reactive({
# list any input controls that effect currData
input$modalPiaUrl
input$modalPiaId
input$modalPiaSecret
input$p2next
app <- currApp()
retVal <- data.frame()
if(length(app) > 0) {
url <- itemsUrl(app$url, appRepoDefault)
retVal <- readItems(app, url)
}
retVal
})
currDataDateSelectTimestamp <- reactive({
shinyBS::closeAlert(session, ns('myDataStatus'))
data <- currData()
if(nrow(data) > 0){
mymin <- as.Date(input$dateRange[1], '%d.%m.%Y')
mymax <- as.Date(input$dateRange[2], '%d.%m.%Y')
if(mymax > mymin){
daterange <- seq(mymin, mymax, 'days')
data$dat <- as.Date(as.POSIXct(data$time/1000, origin='1970-01-01'))
data <- data[data$dat %in% daterange, ]
if(nrow(data) > 0){
data
} else {
shinyBS::createAlert(session, ns('dataStatus'),
alertId = 'myDataStatus',
style = 'warning', append = FALSE,
title = 'Keine Daten im gewählten Zeitfenster',
content = 'Für das ausgewählte Zeitfenster sind keine Daten vorhanden.')
data.frame()
}
} else {
shinyBS::createAlert(session, ns('dataStatus'),
alertId = 'myDataStatus',
style = 'warning', append = FALSE,
title = 'Ungültiges Zeitfenster',
content = 'Im ausgewählten Zeitfenster liegt das End-Datum vor dem Beginn-Datum. Korriege die Eingabe!')
data.frame()
}
} else {
shinyBS::createAlert(session, ns('dataStatus'),
alertId = 'myDataStatus',
style = 'warning', append = FALSE,
title = 'Keine Website-Daten im Datentresor vorhanden',
content = 'Derzeit sind noch keine Website-Daten im Datentresor gespeichert. Wechsle zu "Datenquellen" und installiere das passende Plugin für deinen Browser!')
data.frame()
}
})
checkInconsistencyWrite <- function(repoEncrypted){
ns <- session$ns
msg <- ''
if(repoEncrypted){
msg <- tr('checkInconsistencyWriteUnencryptedTxt')
} else {
msg <- tr('checkInconsistencyWriteEncryptedTxt')
}
shiny::modalDialog(
shiny::span(msg),
footer = shiny::tagList(
shiny::actionButton(
ns('writeInconsistencyCancelBtn'),
tr('cancelLbl')),
shiny::actionButton(
ns('writeInconsistencyBtn'),
tr('okLbl'))),
size = 's'
)
}
return(list(currApp=currApp,
currData=currData,
currDataDateSelectTimestamp=currDataDateSelectTimestamp,
checkInconsistencyWrite=checkInconsistencyWrite))
}
| /R/srvBaseReactive.R | permissive | OwnYourData/oydapp | R | false | false | 7,555 | r | # basic reactive functions for accessing PIA
# last update: 2016-10-29
baseReactive <- function(input, output, session, tr){
currApp <- reactive({
ns <- session$ns
input$p2next
input$disconnectPIA
input$lang
input$addKeyItem
input$updateKeyItem
input$delKeyItem
rv$v
shinyBS::closeAlert(session, 'alertPiaStatus')
app <- list()
piaMsg <- ''
pia_url <- session$userData$piaUrl
app_key <- session$userData$appKey
app_secret <- session$userData$appSecret
keyItems <- session$userData$keyItems
if(is.null(keyItems)) {
keyItems <- data.frame()
}
if(is.null(pia_url) |
is.null(app_key) |
is.null(app_secret)) {
piaMsg <- tr('missingIncompletePiaData')
} else {
if((nchar(pia_url) > 0) &
(nchar(app_key) > 0) &
(nchar(app_secret) > 0)) {
app <- setupApp(pia_url,
app_key,
app_secret,
keyItems)
if(length(app) == 0){
piaMsg <- tr('invalidPiaData')
} else {
if(is.na(app$token)){
piaMsg <- tr('invalidPiaData')
}
}
} else {
piaMsg <- tr('missingIncompletePiaData')
}
}
if(nchar(piaMsg) > 0){
shinyBS::createAlert(session, 'piaStatus',
alertId = 'alertPiaStatus',
style = 'warning', append = FALSE,
title = tr('piaConnectionMsgTitle'),
content = piaMsg)
app <- list()
} else {
shinyBS::closeAlert(session, 'alertPiaStatus')
url <- itemsUrl(app$url, appRepoDefault)
retVal <- readRawItems(app, url)
if(nrow(retVal) > 0){
if(checkItemEncryption(retVal)){
if(nrow(keyItems) == 0){
shinyBS::createAlert(
session, 'piaStatus',
alertId = 'alertPiaStatus',
style = 'warning', append = FALSE,
title = tr('piaEncryptedMsgTitle', input$lang),
content = tr('piaEncryptedMsg', input$lang))
}
}
}
}
app
})
currData <- reactive({
# list any input controls that effect currData
input$modalPiaUrl
input$modalPiaId
input$modalPiaSecret
input$p2next
app <- currApp()
retVal <- data.frame()
if(length(app) > 0) {
url <- itemsUrl(app$url, appRepoDefault)
retVal <- readItems(app, url)
}
retVal
})
currDataDateSelectTimestamp <- reactive({
shinyBS::closeAlert(session, ns('myDataStatus'))
data <- currData()
if(nrow(data) > 0){
mymin <- as.Date(input$dateRange[1], '%d.%m.%Y')
mymax <- as.Date(input$dateRange[2], '%d.%m.%Y')
if(mymax > mymin){
daterange <- seq(mymin, mymax, 'days')
data$dat <- as.Date(as.POSIXct(data$time/1000, origin='1970-01-01'))
data <- data[data$dat %in% daterange, ]
if(nrow(data) > 0){
data
} else {
shinyBS::createAlert(session, ns('dataStatus'),
alertId = 'myDataStatus',
style = 'warning', append = FALSE,
title = 'Keine Daten im gewählten Zeitfenster',
content = 'Für das ausgewählte Zeitfenster sind keine Daten vorhanden.')
data.frame()
}
} else {
shinyBS::createAlert(session, ns('dataStatus'),
alertId = 'myDataStatus',
style = 'warning', append = FALSE,
title = 'Ungültiges Zeitfenster',
content = 'Im ausgewählten Zeitfenster liegt das End-Datum vor dem Beginn-Datum. Korriege die Eingabe!')
data.frame()
}
} else {
shinyBS::createAlert(session, ns('dataStatus'),
alertId = 'myDataStatus',
style = 'warning', append = FALSE,
title = 'Keine Website-Daten im Datentresor vorhanden',
content = 'Derzeit sind noch keine Website-Daten im Datentresor gespeichert. Wechsle zu "Datenquellen" und installiere das passende Plugin für deinen Browser!')
data.frame()
}
})
checkInconsistencyWrite <- function(repoEncrypted){
ns <- session$ns
msg <- ''
if(repoEncrypted){
msg <- tr('checkInconsistencyWriteUnencryptedTxt')
} else {
msg <- tr('checkInconsistencyWriteEncryptedTxt')
}
shiny::modalDialog(
shiny::span(msg),
footer = shiny::tagList(
shiny::actionButton(
ns('writeInconsistencyCancelBtn'),
tr('cancelLbl')),
shiny::actionButton(
ns('writeInconsistencyBtn'),
tr('okLbl'))),
size = 's'
)
}
return(list(currApp=currApp,
currData=currData,
currDataDateSelectTimestamp=currDataDateSelectTimestamp,
checkInconsistencyWrite=checkInconsistencyWrite))
}
|
# August 30, 2021. Mon
# Analyzing categories
library(tidyverse)
library(xtable)
frame <- read_csv("draft/data/sample_frame.csv") %>%
mutate(issue_dates=str_sub(issue_dates, 3, -3)) %>%
mutate(issue_year=as.numeric(str_sub(issue_dates, 1, 4))) %>%
filter(issue_year>2015) %>%
mutate(mention_density_group=if_else(mention_density_group=="[0,1]",
"(0,1]", mention_density_group)) %>%
mutate(stratum=if_else(stratum=="[1001,12895]", "[1001,12982]",
stratum))
frame$stratum <- factor(frame$stratum, levels=c("[1,10]", "[11,100]",
"[101,1000]","[1001,12982]",
"No Impact Factor"))
frame %>%
distinct(doc_key, mention_density, stratum) %>%
mutate(mention_density=as.integer(mention_density)) %>%
filter(stratum=="[1,10]") %>%
arrange(desc(mention_density)) %>% View
ggplot(aes(x=stratum, y=mention_density)) +
# geom_boxplot(notch=T, outlier.alpha=0.1) +
geom_violin(draw_quantiles=0.5, trim=T, fill="darkgrey") +
scale_y_log10(limits=c(0.5, 400), breaks=c(1, 10, 100, 300)) +
labs(x="Journal impact factor stratum", y="Mention density per article")
ggsave(filename="draft/output/mention_dist_by_strata.png", width=6, height=4)
james_a <- read_csv("cord19-sw-analysis/data/sample_coding/coding_sheet_v1_james-coded.csv")
james_b <- read_csv("cord19-sw-analysis/data/sample_coding/coding_sheet_v1.1_james.csv")
hannah <- read_csv("cord19-sw-analysis/data/sample_coding/coding_sheet_v1_hannah-updated.csv")
fan <- read_csv("cord19-sw-analysis/data/sample_coding/coding_sheet_v1_fan.csv")
agreement <- read_csv("cord19-sw-analysis/data/agreement_coding/agreement_coding.csv")
james_c <- read_csv("cord19-sw-analysis/data/sample_coding/coding_sheet_v1.2_james-updated.csv")
hannah_b <- read_csv("cord19-sw-analysis/data/sample_coding/coding_sheet_v1.2_hannah-updated.csv")
fan_b <- read_csv("cord19-sw-analysis/data/sample_coding/coding_sheet_v1.2_fan-updated.csv")
fan_c <- read_csv("cord19-sw-analysis/data/sample_coding/coding_sheet_v1.2_fan_2.csv")
fan_d <- read_csv("cord19-sw-analysis/data/sample_coding/coding_sheet_v1.2_fan_3.csv")
all <- rbind(james_a, james_b, hannah, fan, agreement, james_c, hannah_b,
fan_b, fan_c, fan_d)
all %>% distinct(sample_id)
all %>% write_csv("data/full_coding_results.csv")
all <- read_csv("draft/data/full_coding_results.csv")
# sanity check
all %>%
filter(coding_id=="A1" & coding_result==1) %>%
distinct(sample_id) %>%
mutate(sample_id=str_extract(sample_id, "\\d+-\\d+-\\d+")) %>%
left_join(frame, by="sample_id") %>% drop_na(anno_key) %>%
distinct(sample_id, anno_key, doc_key, group_num, doc_num, anno_num, issue_year) %>%
filter(issue_year > 2015) %>%
group_by(group_num) %>%
summarise(doc_count=n_distinct(doc_key)) %>% View
frame %>%
distinct(mention_density, doc_key) %>%
mutate(mention_density_group = cut(mention_density, breaks=c(0,1,8,350),
labels=c("(0,1]","[2,8]","[9,350]"))) %>%
group_by(mention_density_group) %>%
summarise(doc_count=n_distinct(doc_key)) %>%
ggplot(aes(x=mention_density_group, y=doc_count)) +
geom_bar(stat='identity', fill='darkgray') +
geom_text(aes(label=doc_count, x=mention_density_group, y=doc_count),
position=position_dodge(width=0.4), vjust=-.8, size=3) +
scale_x_discrete(name="Mention density per article") +
scale_y_continuous(name="Number of articles mentioning software", limits=c(0,30000))
ggsave(filename="output/mention_density_group.png", width=6, height=4)
frame %>%
distinct(doc_key, mention_density_group, stratum) %>%
group_by(mention_density_group, stratum) %>%
summarise(doc_count=n_distinct(doc_key)) %>%
ungroup() %>%
pivot_wider(names_from=stratum, values_from=doc_count) %>%
mutate(`[1,10]`=replace_na(`[1,10]`, 0)) %>%
rename(`Impact stratum\nMention density`="mention_density_group") %>%
xtable(., type='latex')
all_valid <- all %>%
filter(coding_id=="A1" & coding_result==1) %>%
distinct(sample_id) %>%
mutate(sample_id=str_extract(sample_id, "\\d+-\\d+-\\d+")) %>%
left_join(frame, by="sample_id") %>% drop_na(anno_key) %>%
distinct(sample_id, anno_key, doc_key, group_num, doc_num, anno_num,
issue_year, stratum, mention_density_group) %>%
filter(issue_year > 2015)
all_valid_id <- all_valid %>% distinct(sample_id) %>% pull()
all_for_join <- all %>%
mutate(sample_id=str_extract(sample_id, "\\d+-\\d+-\\d+"))
all %>% group_by(sample_id) %>%
summarise(row_n = n()) %>%
arrange(desc(row_n)) %>% View
all_cleaned <- all_for_join %>%
filter(sample_id %in% all_valid_id) %>%
mutate(coding_result=replace_na(coding_result, 0)) %>%
filter(!coding_id %in% c("A12", "A13", "B1", "B8", "D15", "D16", "E4"))
# calculate false positive rate
false_pos <- all %>%
filter(coding_id == "A1") %>%
mutate(coding_result=replace_na(coding_result, 0)) %>%
distinct(sample_id, coding_id, coding_result) %>%
group_by(coding_result) %>%
summarise(mention_count = n_distinct(sample_id)) %>% pull(mention_count)
conf_int <- prop.test(false_pos[1], false_pos[2])$conf.int
round(conf_int[1], 3)
round(conf_int[2], 3)
# categorizing
categories <- all_cleaned %>%
filter(coding_id == "A1" & coding_result == 1) %>%
distinct(sample_id) %>%
left_join(all_cleaned, by="sample_id") %>% distinct() %>%
select(-hint, -memo, -explanation) %>%
filter(!coding_id %in% c("A1", "A4", "A6", "A8", "A10", "B1", "B2")) %>%
mutate(category = case_when(
coding_id == "A2" ~ "like instrument",
coding_id == "A3" ~ "in-text name",
coding_id == "A5" ~ "in-text version",
coding_id == "A7" ~ "in-text publisher",
coding_id == "A9" ~ "in-text URL",
coding_id == "A11" ~ "configuration details",
coding_id == "A12" ~ "software used",
coding_id == "A13" ~ "software not used",
coding_id == "B3" ~ "cite to software publication",
coding_id == "B4" ~ "cite to software",
coding_id == "B5" ~ "cite to domain publication",
coding_id == "B6" ~ "cite to user manual/guide",
coding_id == "B7" ~ "cite to a project",
coding_id == "B9" ~ "in-reference name",
coding_id == "B10" ~ "in-reference version",
coding_id == "B11" ~ "in-reference URL",
coding_id == "B12" ~ "in-reference publisher",
coding_id == "C1" ~ "identifiable",
coding_id == "C2" ~ "findable",
coding_id == "C3" ~ "findable version",
coding_id == "C4" ~ "cite to a unique, persistent identifier that points to software",
coding_id == "C5" ~ "cite to a commit hash",
coding_id == "C6" ~ "no access",
coding_id == "C7" ~ "proprietary",
coding_id == "C8" ~ "free access",
coding_id == "C9" ~ "source code accessible",
coding_id == "C10" ~ "modifiable",
coding_id == "C11" ~ "open source licensed",
coding_id == "D1" ~ "matched to citation request",
coding_id == "D2" ~ "plain text citation request",
coding_id == "D3" ~ "BibTex citation request",
coding_id == "D4" ~ "citation request in repo README",
coding_id == "D5" ~ "citation request on webpage",
coding_id == "D6" ~ "CITATION file",
coding_id == "D7" ~ "CITATION.cff",
coding_id == "D8" ~ "CodeMeta",
coding_id == "D9" ~ "domain-specific citation request",
coding_id == "D10" ~ "request to cite software",
coding_id == "D11" ~ "request to cite software publication",
coding_id == "D12" ~ "request to cite domain science publication",
coding_id == "D13" ~ "request to cite project",
coding_id == "D14" ~ "request to cite other research product",
coding_id == "E1" ~ "software is archived",
coding_id == "E2" ~ "software has unique, persistent identifier",
coding_id == "E3" ~ "software has publicly accessible metadata",
TRUE ~ as.character(coding_id)
))
# types of software mentions
mention_types <- categories %>%
filter(coding_id %in% c("A2", "A3", "A5", "A7", "A9", "A11",
"B3", "B4", "B5", "B6", "B7", "B9",
"B10", "B11", "B12", "C4", "C5")) %>%
# note that 157 true positive software mentions here
group_by(category, coding_result) %>%
summarise(mention_count = n_distinct(sample_id)) %>%
# ungroup() %>%
pivot_wider(names_from=coding_result,
values_from=mention_count) %>%
rename(c(true="1", false="0")) %>%
mutate(false = replace_na(false, 0),
true = replace_na(true, 0),
sum = false + true) %>%
mutate(proportion = round(true/sum,3)) %>%
rowwise() %>%
mutate(conf_int_low=prop.test(true,sum)$conf.int[1],
conf_int_high=prop.test(true,sum)$conf.int[2])
# we don't have non-named software this time
# but this could be biased by the extraction?
# The utility of them for detection would mostly be:
# if they are software publications, they provide names etc. of the software
categories %>%
filter(coding_id %in% c("B3", "B4", "B5", "B6", "B7", "B9", "B10", "B11",
"B12")) %>%
group_by(sample_id) %>%
summarise(reference_check = sum(coding_result)) %>%
filter(reference_check > 0) %>% View
# 38 formal citations (18%), 172 informal mentions (82%)
categories %>%
filter(category=="cite to software" & coding_result==1) %>%
distinct(sample_id) %>%
left_join(categories, by="sample_id") %>%
View
mention_type_plot <- categories %>%
distinct(sample_id, category, coding_result) %>%
pivot_wider(names_from=category, values_from=coding_result) %>%
mutate(label = case_when(
`cite to software` == 1 ~ "Cite to software",
`cite to software publication` == 1 ~ "Software publication",
`cite to domain publication` == 1 ~ "Domain publication",
`like instrument` == 1 ~ "Like instrument",
`in-text URL` == 1 ~ "URL in text",
`in-text name` == 1 & `in-text version` == 0 &
`in-text publisher` == 0 & `in-text URL` == 0 ~ "Name only",
# `in-text name` == 1 & `in-text version` == 1 &
# `in-text publisher` == 0 & `in-text URL` == 0 ~ "In-text version",
TRUE ~ as.character("Other")
)) %>%
select(sample_id, label) %>%
group_by(label) %>%
summarise(mention_count = as.numeric(n_distinct(sample_id))) %>%
ungroup() %>%
filter(label != "NA") %>%
mutate(sum = 210) %>%
mutate(proportion = round(mention_count/sum, 3)) %>%
rowwise() %>%
mutate(conf_int_low=prop.test(mention_count, sum)$conf.int[1],
conf_int_high=prop.test(mention_count, sum)$conf.int[2]) %>%
mutate(type=if_else(
label %in% c("Name only", "URL in text", "Like instrument", "Other"),
"Informal mention", "Formal citation")) %>%
group_by(type) %>%
mutate(type_prop=if_else(type=="Informal mention", 0.82, 0.18))
# mention_type_plot$type <- factor(mention_type_plot$type,
# leveels=c("Informal mention", "Formal citation"))
mention_type_plot$label <- factor(mention_type_plot$label,
levels=c("Cite to software",
"Domain publication",
"Software publication",
"Like instrument",
"URL in text",
"Name only",
"Other"))
mention_type_plot %>%
ggplot(aes(x=type, y=proportion, fill=label)) +
geom_bar(position="dodge", stat='identity') +
geom_errorbar(aes(x=type, ymin=conf_int_low, ymax=conf_int_high), width=.2,
position=position_dodge(.9)) +
geom_col(data=mention_type_plot %>% distinct(type, type_prop),
aes(x=type, y=type_prop), fill=NA, colour="darkgrey") +
geom_text(aes(label=label), position=position_dodge(.9), vjust=-4, hjust=.5,
size=3, colour="black") +
# geom_hline(yintercept=0.82, linetype="dotted", colour="darkgrey", size=1) +
scale_fill_grey(start=0.4, end=0.8) +
scale_x_discrete(name="") +
scale_y_continuous(limits=c(-0,0.9), breaks=c(0, 0.2, 0.4, 0.6, 0.8),
name="Proportion") +
theme(legend.position="none",
axis.text.y=element_text(angle=90, hjust=0.5))
ggsave(filename="draft/output/mention_class.png", width=6.4, height=4)
mention_type_plot %>%
ggplot(aes(x=label, y=proportion)) +
geom_bar(stat="identity", fill="darkgray") +
geom_errorbar(aes(ymin=conf_int_low, ymax=conf_int_high), width=.2,
position=position_dodge(.9)) +
scale_x_discrete(name="") +
scale_y_continuous(limits=c(0, 0.7),
name="Proportion") +
theme(panel.grid.major.x = element_blank(),
panel.grid.minor.y = element_blank(),
panel.border = element_blank(),
text = element_text(size=10),
axis.title.y = element_text(vjust=0.3),
axis.text.x = element_text(angle=30, hjust=1))
ggsave(filename="draft/output/mention_types.png", width=6.4, height=4)
condensed_categories <- categories %>%
filter(category == "like instrument") %>%
bind_rows(categories %>% filter(category == "cite to domain publication")) %>%
bind_rows(categories %>% filter(category == "cite to software publication")) %>%
bind_rows(categories %>% filter(category == "cite to software")) %>%
bind_rows(categories %>% filter(category == "in-text name")) %>%
select(-coding_id, -coding_scheme) %>%
pivot_wider(names_from="category", values_from="coding_result") %>%
mutate(label = case_when(
`cite to software` == 1 ~ "Cite to software",
`like instrument` == 1 ~ "Like instrument",
`cite to domain publication` == 1 ~ "Cite to publication",
`cite to software publication` == 1 ~ "Cite to publication",
`in-text name` == 1 ~ "Informal",
TRUE ~ as.character(sample_id),
)) %>%
group_by(label) %>%
summarise(mention_count=n_distinct(sample_id)) %>%
mutate(sum=sum(mention_count)) %>%
mutate(proportion = mention_count/sum) %>%
rowwise() %>%
mutate(conf_int_low=prop.test(mention_count, sum)$conf.int[1],
conf_int_high=prop.test(mention_count, sum)$conf.int[2])
condensed_categories$label <- factor(condensed_categories$label,
levels=c("Cite to software",
"Cite to publication",
"Like instrument",
"Informal"))
condensed_categories %>%
ggplot(aes(x=label, y=proportion)) +
geom_bar(stat="identity", fill="darkgray") +
geom_errorbar(aes(ymin=conf_int_low, ymax=conf_int_high), width=.2,
position=position_dodge(.9)) +
scale_x_discrete(name="") +
scale_y_continuous(limits=c(0, 0.7),
name="Proportion") +
theme(panel.grid.major.x = element_blank(),
panel.grid.minor.y = element_blank(),
panel.border = element_blank(),
text = element_text(size=10),
axis.title.y = element_text(vjust=0.3),
axis.text.x = element_text(angle=30, hjust=1))
ggsave(filename="draft/output/mention_types_condensed.png", width=6, height=4)
name_list <- frame %>%
distinct(sample_id, software_name)
accessibility <- categories %>%
filter(category %in% c("no access", "proprietary",
"free access", "source code accessible", "modifiable")) %>%
select(-coding_id, -coding_scheme) %>%
pivot_wider(names_from="category", values_from="coding_result") %>%
mutate(accessible=if_else(`no access`==0, 1, 0)) %>%
select(-`no access`) %>%
mutate(proprietary=as.numeric(proprietary),
`free access`=as.numeric(`free access`),
`source code accessible`=as.numeric(`source code accessible`),
modifiable=as.numeric(modifiable)) %>%
pivot_longer(!sample_id, names_to='label', values_to='value') %>%
mutate(label=case_when(
label=="proprietary" ~ "accessible",
label=="free access" ~ "free",
label=="modifiable" ~ "source code modifiable",
TRUE ~ as.character(label)
)) %>%
distinct() %>%
left_join(name_list, by="sample_id") %>%
group_by(label, value) %>%
summarise(software_count = n_distinct(software_name)) %>%
filter(value==1) %>%
mutate(sum=155) %>%
mutate(proportion = round(software_count/sum,3)) %>%
rowwise() %>%
mutate(conf_int_low=prop.test(software_count,sum)$conf.int[1],
conf_int_high=prop.test(software_count,sum)$conf.int[2])
# now plotting citation functions
# access <- categories %>%
# filter(category %in% c("no access", "proprietary", "free access")) %>%
# select(-coding_id, -coding_scheme) %>%
# pivot_wider(names_from="category", values_from="coding_result") %>%
# mutate(category = case_when(
# `no access` == 0 ~ "accessible",
# `proprietary` == 1 ~ "accessible",
# `free access` == 1 ~ "accessible",
# TRUE ~ as.character("not accessible")
# )) %>%
# select(-`no access`, -proprietary, -`free access`) %>%
# mutate(coding_result = if_else(category=="accessible", "1", "0")) %>%
# mutate(category ="accessible") %>%
# select(sample_id, coding_result, category)
#
# mention_functions <- categories %>%
# filter(category %in% c("identifiable", "findable", "source code accessible",
# "modifiable")) %>%
# select(-coding_id, -coding_scheme) %>%
# bind_rows(access) %>%
# arrange(sample_id) %>%
# group_by(category, coding_result) %>%
# summarise(mention_count = n_distinct(sample_id)) %>%
# filter(coding_result == "1") %>%
# mutate(sum = 210) %>%
# mutate(proportion = mention_count/sum) %>%
# select(-coding_result) %>%
# rowwise() %>%
# mutate(conf_int_low=prop.test(mention_count,sum)$conf.int[1],
# conf_int_high=prop.test(mention_count,sum)$conf.int[2])
accessibility$label <- factor(accessibility$label,
levels=c("accessible", "free",
"source code accessible",
"source code modifiable"))
accessibility %>%
ggplot(aes(x=label, y=proportion)) +
geom_bar(stat='identity', fill='darkgray') +
geom_errorbar(aes(ymin=conf_int_low, ymax=conf_int_high), width=.2,
position=position_dodge(.9)) +
scale_x_discrete(name="") +
scale_y_continuous(name="Proportion") +
theme(panel.grid.major.x = element_blank(),
panel.grid.minor.y = element_blank(),
panel.border = element_blank(),
text = element_text(size=14),
axis.title.y = element_text(vjust=0.3),
axis.text.x = element_text(angle=30, hjust=1))
ggsave(filename="draft/output/accessibility.png", width=6, height=4)
mentioned_access <- categories %>%
filter(category %in% c("no access", "proprietary", "free access",
"source code accessible", "modifiable",
"open source licensed")) %>%
select(-coding_id, -coding_scheme) %>%
pivot_wider(names_from=category, values_from=coding_result) %>%
rename(c(`not accessible`="no access",
`open source`="open source licensed")) %>%
mutate(`non commercial`=if_else(`not accessible`=="0" &
`open source`=="0" & `proprietary`=="0",
1, 0)) %>%
select(-`free access`, -`source code accessible`, -modifiable) %>%
pivot_longer(!sample_id, names_to="category", values_to="coding_result") %>%
filter(coding_result==1) %>%
select(-coding_result) %>%
left_join(name_list, by="sample_id") %>%
group_by(category) %>%
summarise(software_count=n_distinct(software_name)) %>%
mutate(sum = sum(software_count)) %>%
mutate(proportion=software_count/sum) %>%
rowwise() %>%
mutate(conf_int_low=prop.test(software_count,sum)$conf.int[1],
conf_int_high=prop.test(software_count,sum)$conf.int[2])
mentioned_access$category <- factor(mentioned_access$category,
levels=c("not accessible", "proprietary",
"non commercial", "open source"))
mentioned_access %>%
ggplot(aes(x=category, y=proportion)) +
geom_bar(stat='identity', fill='darkgray') +
geom_errorbar(aes(ymin=conf_int_low, ymax=conf_int_high), width=.2,
position=position_dodge(.9)) +
scale_x_discrete(name="") +
scale_y_continuous(name="Proportion") +
theme(panel.grid.major.x = element_blank(),
panel.grid.minor.y = element_blank(),
panel.border = element_blank(),
text = element_text(size=14),
axis.title.y = element_text(vjust=0.3),
axis.text.x = element_text(angle=30, hjust=1))
ggsave(filename="draft/output/mentioned_software_types.png", width=6, height=4)
# software access X mention type
a <- categories %>%
filter(category %in% c("no access", "proprietary", "free access",
"source code accessible", "modifiable",
"open source licensed")) %>%
select(-coding_id, -coding_scheme) %>%
pivot_wider(names_from=category, values_from=coding_result) %>%
rename(c(`Not accessible`="no access",
Proprietary="proprietary",
`Open source`="open source licensed")) %>%
mutate(`on commercial`=if_else(`Not accessible`=="0" &
`Open source`=="0" & `Proprietary`=="0",
1, 0)) %>%
select(-`free access`, -`source code accessible`, -modifiable) %>%
pivot_longer(!sample_id, names_to="category", values_to="coding_result") %>%
filter(coding_result==1) %>%
select(-coding_result)
a %>%
left_join(name_list, by="sample_id") %>%
group_by(category) %>%
summarise(software_n = n_distinct(software_name)) %>% View
b <- categories %>%
filter(category == "like instrument") %>%
bind_rows(categories %>% filter(category == "cite to domain publication")) %>%
bind_rows(categories %>% filter(category == "cite to software publication")) %>%
bind_rows(categories %>% filter(category == "cite to software")) %>%
bind_rows(categories %>% filter(category == "in-text name")) %>%
select(-coding_id, -coding_scheme) %>%
pivot_wider(names_from="category", values_from="coding_result") %>%
mutate(label = case_when(
`cite to software` == 1 ~ "cite to software",
`like instrument` == 1 ~ "like instrument",
`cite to domain publication` == 1 ~ "cite to publication",
`cite to software publication` == 1 ~ "cite to publication",
`in-text name` == 1 ~ "informal",
TRUE ~ as.character(sample_id),
)) %>%
select(sample_id, label)
c<- a %>%
left_join(b, by="sample_id") %>%
rename(c(software_type="category", mention_type="label")) %>%
group_by(software_type, mention_type) %>%
summarise(mention_count=n_distinct(sample_id)) %>%
pivot_wider(names_from="mention_type", values_from="mention_count") %>%
mutate(`cite to publication`=replace_na(`cite to publication`,0),
`cite to software`=replace_na(`cite to software`, 0))
sum(c[,2:5])
chisq.test(c[,2:5])
type_cite <- a %>%
left_join(b, by="sample_id") %>%
rename(c(software_type="category", mention_type="label")) %>%
group_by(software_type, mention_type) %>%
summarise(mention_count=n_distinct(sample_id)) %>%
ungroup() %>%
group_by(software_type) %>%
mutate(mention_type_count=sum(mention_count)) %>%
mutate(proportion=mention_count/mention_type_count) %>%
rowwise() %>%
mutate(conf_int_low=prop.test(mention_count, mention_type_count)$conf.int[1],
conf_int_high=prop.test(mention_count, mention_type_count)$conf.int[2])
type_cite$software_type <- factor(type_cite$software_type,
levels=c("not accessible", "proprietary",
"non commercial", "open source"))
type_cite$mention_type <- factor(type_cite$mention_type,
levels=c("cite to software", "cite to publication",
"like instrument", "informal"))
c %>%
ggplot(aes(x=mention_type, y=proportion)) +
geom_bar(stat='identity', fill='darkgray') +
facet_wrap(vars(software_type), nrow=1) +
geom_errorbar(aes(ymin=conf_int_low, ymax=conf_int_high),width=.2,
position=position_dodge(.9)) +
scale_x_discrete(name="") +
scale_y_continuous(name="Proportion") +
theme(panel.grid.major.x = element_blank(),
panel.grid.minor.y = element_blank(),
panel.border = element_blank(),
text = element_text(size=10),
axis.title.y = element_text(vjust=0.3),
axis.text.x = element_text(angle=30, hjust=1))
ggsave(filename="output/software_x_mention_type.png", width=6.4, height=4)
| /code/categories_a.R | permissive | caifand/cord19-sw-analysis | R | false | false | 24,861 | r | # August 30, 2021. Mon
# Analyzing categories
library(tidyverse)
library(xtable)
frame <- read_csv("draft/data/sample_frame.csv") %>%
mutate(issue_dates=str_sub(issue_dates, 3, -3)) %>%
mutate(issue_year=as.numeric(str_sub(issue_dates, 1, 4))) %>%
filter(issue_year>2015) %>%
mutate(mention_density_group=if_else(mention_density_group=="[0,1]",
"(0,1]", mention_density_group)) %>%
mutate(stratum=if_else(stratum=="[1001,12895]", "[1001,12982]",
stratum))
frame$stratum <- factor(frame$stratum, levels=c("[1,10]", "[11,100]",
"[101,1000]","[1001,12982]",
"No Impact Factor"))
frame %>%
distinct(doc_key, mention_density, stratum) %>%
mutate(mention_density=as.integer(mention_density)) %>%
filter(stratum=="[1,10]") %>%
arrange(desc(mention_density)) %>% View
ggplot(aes(x=stratum, y=mention_density)) +
# geom_boxplot(notch=T, outlier.alpha=0.1) +
geom_violin(draw_quantiles=0.5, trim=T, fill="darkgrey") +
scale_y_log10(limits=c(0.5, 400), breaks=c(1, 10, 100, 300)) +
labs(x="Journal impact factor stratum", y="Mention density per article")
ggsave(filename="draft/output/mention_dist_by_strata.png", width=6, height=4)
james_a <- read_csv("cord19-sw-analysis/data/sample_coding/coding_sheet_v1_james-coded.csv")
james_b <- read_csv("cord19-sw-analysis/data/sample_coding/coding_sheet_v1.1_james.csv")
hannah <- read_csv("cord19-sw-analysis/data/sample_coding/coding_sheet_v1_hannah-updated.csv")
fan <- read_csv("cord19-sw-analysis/data/sample_coding/coding_sheet_v1_fan.csv")
agreement <- read_csv("cord19-sw-analysis/data/agreement_coding/agreement_coding.csv")
james_c <- read_csv("cord19-sw-analysis/data/sample_coding/coding_sheet_v1.2_james-updated.csv")
hannah_b <- read_csv("cord19-sw-analysis/data/sample_coding/coding_sheet_v1.2_hannah-updated.csv")
fan_b <- read_csv("cord19-sw-analysis/data/sample_coding/coding_sheet_v1.2_fan-updated.csv")
fan_c <- read_csv("cord19-sw-analysis/data/sample_coding/coding_sheet_v1.2_fan_2.csv")
fan_d <- read_csv("cord19-sw-analysis/data/sample_coding/coding_sheet_v1.2_fan_3.csv")
all <- rbind(james_a, james_b, hannah, fan, agreement, james_c, hannah_b,
fan_b, fan_c, fan_d)
all %>% distinct(sample_id)
all %>% write_csv("data/full_coding_results.csv")
all <- read_csv("draft/data/full_coding_results.csv")
# sanity check
all %>%
filter(coding_id=="A1" & coding_result==1) %>%
distinct(sample_id) %>%
mutate(sample_id=str_extract(sample_id, "\\d+-\\d+-\\d+")) %>%
left_join(frame, by="sample_id") %>% drop_na(anno_key) %>%
distinct(sample_id, anno_key, doc_key, group_num, doc_num, anno_num, issue_year) %>%
filter(issue_year > 2015) %>%
group_by(group_num) %>%
summarise(doc_count=n_distinct(doc_key)) %>% View
frame %>%
distinct(mention_density, doc_key) %>%
mutate(mention_density_group = cut(mention_density, breaks=c(0,1,8,350),
labels=c("(0,1]","[2,8]","[9,350]"))) %>%
group_by(mention_density_group) %>%
summarise(doc_count=n_distinct(doc_key)) %>%
ggplot(aes(x=mention_density_group, y=doc_count)) +
geom_bar(stat='identity', fill='darkgray') +
geom_text(aes(label=doc_count, x=mention_density_group, y=doc_count),
position=position_dodge(width=0.4), vjust=-.8, size=3) +
scale_x_discrete(name="Mention density per article") +
scale_y_continuous(name="Number of articles mentioning software", limits=c(0,30000))
ggsave(filename="output/mention_density_group.png", width=6, height=4)
frame %>%
distinct(doc_key, mention_density_group, stratum) %>%
group_by(mention_density_group, stratum) %>%
summarise(doc_count=n_distinct(doc_key)) %>%
ungroup() %>%
pivot_wider(names_from=stratum, values_from=doc_count) %>%
mutate(`[1,10]`=replace_na(`[1,10]`, 0)) %>%
rename(`Impact stratum\nMention density`="mention_density_group") %>%
xtable(., type='latex')
all_valid <- all %>%
filter(coding_id=="A1" & coding_result==1) %>%
distinct(sample_id) %>%
mutate(sample_id=str_extract(sample_id, "\\d+-\\d+-\\d+")) %>%
left_join(frame, by="sample_id") %>% drop_na(anno_key) %>%
distinct(sample_id, anno_key, doc_key, group_num, doc_num, anno_num,
issue_year, stratum, mention_density_group) %>%
filter(issue_year > 2015)
all_valid_id <- all_valid %>% distinct(sample_id) %>% pull()
all_for_join <- all %>%
mutate(sample_id=str_extract(sample_id, "\\d+-\\d+-\\d+"))
all %>% group_by(sample_id) %>%
summarise(row_n = n()) %>%
arrange(desc(row_n)) %>% View
all_cleaned <- all_for_join %>%
filter(sample_id %in% all_valid_id) %>%
mutate(coding_result=replace_na(coding_result, 0)) %>%
filter(!coding_id %in% c("A12", "A13", "B1", "B8", "D15", "D16", "E4"))
# calculate false positive rate
false_pos <- all %>%
filter(coding_id == "A1") %>%
mutate(coding_result=replace_na(coding_result, 0)) %>%
distinct(sample_id, coding_id, coding_result) %>%
group_by(coding_result) %>%
summarise(mention_count = n_distinct(sample_id)) %>% pull(mention_count)
conf_int <- prop.test(false_pos[1], false_pos[2])$conf.int
round(conf_int[1], 3)
round(conf_int[2], 3)
# categorizing
categories <- all_cleaned %>%
filter(coding_id == "A1" & coding_result == 1) %>%
distinct(sample_id) %>%
left_join(all_cleaned, by="sample_id") %>% distinct() %>%
select(-hint, -memo, -explanation) %>%
filter(!coding_id %in% c("A1", "A4", "A6", "A8", "A10", "B1", "B2")) %>%
mutate(category = case_when(
coding_id == "A2" ~ "like instrument",
coding_id == "A3" ~ "in-text name",
coding_id == "A5" ~ "in-text version",
coding_id == "A7" ~ "in-text publisher",
coding_id == "A9" ~ "in-text URL",
coding_id == "A11" ~ "configuration details",
coding_id == "A12" ~ "software used",
coding_id == "A13" ~ "software not used",
coding_id == "B3" ~ "cite to software publication",
coding_id == "B4" ~ "cite to software",
coding_id == "B5" ~ "cite to domain publication",
coding_id == "B6" ~ "cite to user manual/guide",
coding_id == "B7" ~ "cite to a project",
coding_id == "B9" ~ "in-reference name",
coding_id == "B10" ~ "in-reference version",
coding_id == "B11" ~ "in-reference URL",
coding_id == "B12" ~ "in-reference publisher",
coding_id == "C1" ~ "identifiable",
coding_id == "C2" ~ "findable",
coding_id == "C3" ~ "findable version",
coding_id == "C4" ~ "cite to a unique, persistent identifier that points to software",
coding_id == "C5" ~ "cite to a commit hash",
coding_id == "C6" ~ "no access",
coding_id == "C7" ~ "proprietary",
coding_id == "C8" ~ "free access",
coding_id == "C9" ~ "source code accessible",
coding_id == "C10" ~ "modifiable",
coding_id == "C11" ~ "open source licensed",
coding_id == "D1" ~ "matched to citation request",
coding_id == "D2" ~ "plain text citation request",
coding_id == "D3" ~ "BibTex citation request",
coding_id == "D4" ~ "citation request in repo README",
coding_id == "D5" ~ "citation request on webpage",
coding_id == "D6" ~ "CITATION file",
coding_id == "D7" ~ "CITATION.cff",
coding_id == "D8" ~ "CodeMeta",
coding_id == "D9" ~ "domain-specific citation request",
coding_id == "D10" ~ "request to cite software",
coding_id == "D11" ~ "request to cite software publication",
coding_id == "D12" ~ "request to cite domain science publication",
coding_id == "D13" ~ "request to cite project",
coding_id == "D14" ~ "request to cite other research product",
coding_id == "E1" ~ "software is archived",
coding_id == "E2" ~ "software has unique, persistent identifier",
coding_id == "E3" ~ "software has publicly accessible metadata",
TRUE ~ as.character(coding_id)
))
# types of software mentions
mention_types <- categories %>%
filter(coding_id %in% c("A2", "A3", "A5", "A7", "A9", "A11",
"B3", "B4", "B5", "B6", "B7", "B9",
"B10", "B11", "B12", "C4", "C5")) %>%
# note that 157 true positive software mentions here
group_by(category, coding_result) %>%
summarise(mention_count = n_distinct(sample_id)) %>%
# ungroup() %>%
pivot_wider(names_from=coding_result,
values_from=mention_count) %>%
rename(c(true="1", false="0")) %>%
mutate(false = replace_na(false, 0),
true = replace_na(true, 0),
sum = false + true) %>%
mutate(proportion = round(true/sum,3)) %>%
rowwise() %>%
mutate(conf_int_low=prop.test(true,sum)$conf.int[1],
conf_int_high=prop.test(true,sum)$conf.int[2])
# we don't have non-named software this time
# but this could be biased by the extraction?
# The utility of them for detection would mostly be:
# if they are software publications, they provide names etc. of the software
categories %>%
filter(coding_id %in% c("B3", "B4", "B5", "B6", "B7", "B9", "B10", "B11",
"B12")) %>%
group_by(sample_id) %>%
summarise(reference_check = sum(coding_result)) %>%
filter(reference_check > 0) %>% View
# 38 formal citations (18%), 172 informal mentions (82%)
categories %>%
filter(category=="cite to software" & coding_result==1) %>%
distinct(sample_id) %>%
left_join(categories, by="sample_id") %>%
View
mention_type_plot <- categories %>%
distinct(sample_id, category, coding_result) %>%
pivot_wider(names_from=category, values_from=coding_result) %>%
mutate(label = case_when(
`cite to software` == 1 ~ "Cite to software",
`cite to software publication` == 1 ~ "Software publication",
`cite to domain publication` == 1 ~ "Domain publication",
`like instrument` == 1 ~ "Like instrument",
`in-text URL` == 1 ~ "URL in text",
`in-text name` == 1 & `in-text version` == 0 &
`in-text publisher` == 0 & `in-text URL` == 0 ~ "Name only",
# `in-text name` == 1 & `in-text version` == 1 &
# `in-text publisher` == 0 & `in-text URL` == 0 ~ "In-text version",
TRUE ~ as.character("Other")
)) %>%
select(sample_id, label) %>%
group_by(label) %>%
summarise(mention_count = as.numeric(n_distinct(sample_id))) %>%
ungroup() %>%
filter(label != "NA") %>%
mutate(sum = 210) %>%
mutate(proportion = round(mention_count/sum, 3)) %>%
rowwise() %>%
mutate(conf_int_low=prop.test(mention_count, sum)$conf.int[1],
conf_int_high=prop.test(mention_count, sum)$conf.int[2]) %>%
mutate(type=if_else(
label %in% c("Name only", "URL in text", "Like instrument", "Other"),
"Informal mention", "Formal citation")) %>%
group_by(type) %>%
mutate(type_prop=if_else(type=="Informal mention", 0.82, 0.18))
# mention_type_plot$type <- factor(mention_type_plot$type,
# leveels=c("Informal mention", "Formal citation"))
mention_type_plot$label <- factor(mention_type_plot$label,
levels=c("Cite to software",
"Domain publication",
"Software publication",
"Like instrument",
"URL in text",
"Name only",
"Other"))
mention_type_plot %>%
ggplot(aes(x=type, y=proportion, fill=label)) +
geom_bar(position="dodge", stat='identity') +
geom_errorbar(aes(x=type, ymin=conf_int_low, ymax=conf_int_high), width=.2,
position=position_dodge(.9)) +
geom_col(data=mention_type_plot %>% distinct(type, type_prop),
aes(x=type, y=type_prop), fill=NA, colour="darkgrey") +
geom_text(aes(label=label), position=position_dodge(.9), vjust=-4, hjust=.5,
size=3, colour="black") +
# geom_hline(yintercept=0.82, linetype="dotted", colour="darkgrey", size=1) +
scale_fill_grey(start=0.4, end=0.8) +
scale_x_discrete(name="") +
scale_y_continuous(limits=c(-0,0.9), breaks=c(0, 0.2, 0.4, 0.6, 0.8),
name="Proportion") +
theme(legend.position="none",
axis.text.y=element_text(angle=90, hjust=0.5))
ggsave(filename="draft/output/mention_class.png", width=6.4, height=4)
mention_type_plot %>%
ggplot(aes(x=label, y=proportion)) +
geom_bar(stat="identity", fill="darkgray") +
geom_errorbar(aes(ymin=conf_int_low, ymax=conf_int_high), width=.2,
position=position_dodge(.9)) +
scale_x_discrete(name="") +
scale_y_continuous(limits=c(0, 0.7),
name="Proportion") +
theme(panel.grid.major.x = element_blank(),
panel.grid.minor.y = element_blank(),
panel.border = element_blank(),
text = element_text(size=10),
axis.title.y = element_text(vjust=0.3),
axis.text.x = element_text(angle=30, hjust=1))
ggsave(filename="draft/output/mention_types.png", width=6.4, height=4)
condensed_categories <- categories %>%
filter(category == "like instrument") %>%
bind_rows(categories %>% filter(category == "cite to domain publication")) %>%
bind_rows(categories %>% filter(category == "cite to software publication")) %>%
bind_rows(categories %>% filter(category == "cite to software")) %>%
bind_rows(categories %>% filter(category == "in-text name")) %>%
select(-coding_id, -coding_scheme) %>%
pivot_wider(names_from="category", values_from="coding_result") %>%
mutate(label = case_when(
`cite to software` == 1 ~ "Cite to software",
`like instrument` == 1 ~ "Like instrument",
`cite to domain publication` == 1 ~ "Cite to publication",
`cite to software publication` == 1 ~ "Cite to publication",
`in-text name` == 1 ~ "Informal",
TRUE ~ as.character(sample_id),
)) %>%
group_by(label) %>%
summarise(mention_count=n_distinct(sample_id)) %>%
mutate(sum=sum(mention_count)) %>%
mutate(proportion = mention_count/sum) %>%
rowwise() %>%
mutate(conf_int_low=prop.test(mention_count, sum)$conf.int[1],
conf_int_high=prop.test(mention_count, sum)$conf.int[2])
condensed_categories$label <- factor(condensed_categories$label,
levels=c("Cite to software",
"Cite to publication",
"Like instrument",
"Informal"))
condensed_categories %>%
ggplot(aes(x=label, y=proportion)) +
geom_bar(stat="identity", fill="darkgray") +
geom_errorbar(aes(ymin=conf_int_low, ymax=conf_int_high), width=.2,
position=position_dodge(.9)) +
scale_x_discrete(name="") +
scale_y_continuous(limits=c(0, 0.7),
name="Proportion") +
theme(panel.grid.major.x = element_blank(),
panel.grid.minor.y = element_blank(),
panel.border = element_blank(),
text = element_text(size=10),
axis.title.y = element_text(vjust=0.3),
axis.text.x = element_text(angle=30, hjust=1))
ggsave(filename="draft/output/mention_types_condensed.png", width=6, height=4)
name_list <- frame %>%
distinct(sample_id, software_name)
accessibility <- categories %>%
filter(category %in% c("no access", "proprietary",
"free access", "source code accessible", "modifiable")) %>%
select(-coding_id, -coding_scheme) %>%
pivot_wider(names_from="category", values_from="coding_result") %>%
mutate(accessible=if_else(`no access`==0, 1, 0)) %>%
select(-`no access`) %>%
mutate(proprietary=as.numeric(proprietary),
`free access`=as.numeric(`free access`),
`source code accessible`=as.numeric(`source code accessible`),
modifiable=as.numeric(modifiable)) %>%
pivot_longer(!sample_id, names_to='label', values_to='value') %>%
mutate(label=case_when(
label=="proprietary" ~ "accessible",
label=="free access" ~ "free",
label=="modifiable" ~ "source code modifiable",
TRUE ~ as.character(label)
)) %>%
distinct() %>%
left_join(name_list, by="sample_id") %>%
group_by(label, value) %>%
summarise(software_count = n_distinct(software_name)) %>%
filter(value==1) %>%
mutate(sum=155) %>%
mutate(proportion = round(software_count/sum,3)) %>%
rowwise() %>%
mutate(conf_int_low=prop.test(software_count,sum)$conf.int[1],
conf_int_high=prop.test(software_count,sum)$conf.int[2])
# now plotting citation functions
# access <- categories %>%
# filter(category %in% c("no access", "proprietary", "free access")) %>%
# select(-coding_id, -coding_scheme) %>%
# pivot_wider(names_from="category", values_from="coding_result") %>%
# mutate(category = case_when(
# `no access` == 0 ~ "accessible",
# `proprietary` == 1 ~ "accessible",
# `free access` == 1 ~ "accessible",
# TRUE ~ as.character("not accessible")
# )) %>%
# select(-`no access`, -proprietary, -`free access`) %>%
# mutate(coding_result = if_else(category=="accessible", "1", "0")) %>%
# mutate(category ="accessible") %>%
# select(sample_id, coding_result, category)
#
# mention_functions <- categories %>%
# filter(category %in% c("identifiable", "findable", "source code accessible",
# "modifiable")) %>%
# select(-coding_id, -coding_scheme) %>%
# bind_rows(access) %>%
# arrange(sample_id) %>%
# group_by(category, coding_result) %>%
# summarise(mention_count = n_distinct(sample_id)) %>%
# filter(coding_result == "1") %>%
# mutate(sum = 210) %>%
# mutate(proportion = mention_count/sum) %>%
# select(-coding_result) %>%
# rowwise() %>%
# mutate(conf_int_low=prop.test(mention_count,sum)$conf.int[1],
# conf_int_high=prop.test(mention_count,sum)$conf.int[2])
accessibility$label <- factor(accessibility$label,
levels=c("accessible", "free",
"source code accessible",
"source code modifiable"))
accessibility %>%
ggplot(aes(x=label, y=proportion)) +
geom_bar(stat='identity', fill='darkgray') +
geom_errorbar(aes(ymin=conf_int_low, ymax=conf_int_high), width=.2,
position=position_dodge(.9)) +
scale_x_discrete(name="") +
scale_y_continuous(name="Proportion") +
theme(panel.grid.major.x = element_blank(),
panel.grid.minor.y = element_blank(),
panel.border = element_blank(),
text = element_text(size=14),
axis.title.y = element_text(vjust=0.3),
axis.text.x = element_text(angle=30, hjust=1))
ggsave(filename="draft/output/accessibility.png", width=6, height=4)
mentioned_access <- categories %>%
filter(category %in% c("no access", "proprietary", "free access",
"source code accessible", "modifiable",
"open source licensed")) %>%
select(-coding_id, -coding_scheme) %>%
pivot_wider(names_from=category, values_from=coding_result) %>%
rename(c(`not accessible`="no access",
`open source`="open source licensed")) %>%
mutate(`non commercial`=if_else(`not accessible`=="0" &
`open source`=="0" & `proprietary`=="0",
1, 0)) %>%
select(-`free access`, -`source code accessible`, -modifiable) %>%
pivot_longer(!sample_id, names_to="category", values_to="coding_result") %>%
filter(coding_result==1) %>%
select(-coding_result) %>%
left_join(name_list, by="sample_id") %>%
group_by(category) %>%
summarise(software_count=n_distinct(software_name)) %>%
mutate(sum = sum(software_count)) %>%
mutate(proportion=software_count/sum) %>%
rowwise() %>%
mutate(conf_int_low=prop.test(software_count,sum)$conf.int[1],
conf_int_high=prop.test(software_count,sum)$conf.int[2])
mentioned_access$category <- factor(mentioned_access$category,
levels=c("not accessible", "proprietary",
"non commercial", "open source"))
mentioned_access %>%
ggplot(aes(x=category, y=proportion)) +
geom_bar(stat='identity', fill='darkgray') +
geom_errorbar(aes(ymin=conf_int_low, ymax=conf_int_high), width=.2,
position=position_dodge(.9)) +
scale_x_discrete(name="") +
scale_y_continuous(name="Proportion") +
theme(panel.grid.major.x = element_blank(),
panel.grid.minor.y = element_blank(),
panel.border = element_blank(),
text = element_text(size=14),
axis.title.y = element_text(vjust=0.3),
axis.text.x = element_text(angle=30, hjust=1))
ggsave(filename="draft/output/mentioned_software_types.png", width=6, height=4)
# software access X mention type
a <- categories %>%
filter(category %in% c("no access", "proprietary", "free access",
"source code accessible", "modifiable",
"open source licensed")) %>%
select(-coding_id, -coding_scheme) %>%
pivot_wider(names_from=category, values_from=coding_result) %>%
rename(c(`Not accessible`="no access",
Proprietary="proprietary",
`Open source`="open source licensed")) %>%
mutate(`on commercial`=if_else(`Not accessible`=="0" &
`Open source`=="0" & `Proprietary`=="0",
1, 0)) %>%
select(-`free access`, -`source code accessible`, -modifiable) %>%
pivot_longer(!sample_id, names_to="category", values_to="coding_result") %>%
filter(coding_result==1) %>%
select(-coding_result)
a %>%
left_join(name_list, by="sample_id") %>%
group_by(category) %>%
summarise(software_n = n_distinct(software_name)) %>% View
b <- categories %>%
filter(category == "like instrument") %>%
bind_rows(categories %>% filter(category == "cite to domain publication")) %>%
bind_rows(categories %>% filter(category == "cite to software publication")) %>%
bind_rows(categories %>% filter(category == "cite to software")) %>%
bind_rows(categories %>% filter(category == "in-text name")) %>%
select(-coding_id, -coding_scheme) %>%
pivot_wider(names_from="category", values_from="coding_result") %>%
mutate(label = case_when(
`cite to software` == 1 ~ "cite to software",
`like instrument` == 1 ~ "like instrument",
`cite to domain publication` == 1 ~ "cite to publication",
`cite to software publication` == 1 ~ "cite to publication",
`in-text name` == 1 ~ "informal",
TRUE ~ as.character(sample_id),
)) %>%
select(sample_id, label)
c<- a %>%
left_join(b, by="sample_id") %>%
rename(c(software_type="category", mention_type="label")) %>%
group_by(software_type, mention_type) %>%
summarise(mention_count=n_distinct(sample_id)) %>%
pivot_wider(names_from="mention_type", values_from="mention_count") %>%
mutate(`cite to publication`=replace_na(`cite to publication`,0),
`cite to software`=replace_na(`cite to software`, 0))
sum(c[,2:5])
chisq.test(c[,2:5])
type_cite <- a %>%
left_join(b, by="sample_id") %>%
rename(c(software_type="category", mention_type="label")) %>%
group_by(software_type, mention_type) %>%
summarise(mention_count=n_distinct(sample_id)) %>%
ungroup() %>%
group_by(software_type) %>%
mutate(mention_type_count=sum(mention_count)) %>%
mutate(proportion=mention_count/mention_type_count) %>%
rowwise() %>%
mutate(conf_int_low=prop.test(mention_count, mention_type_count)$conf.int[1],
conf_int_high=prop.test(mention_count, mention_type_count)$conf.int[2])
type_cite$software_type <- factor(type_cite$software_type,
levels=c("not accessible", "proprietary",
"non commercial", "open source"))
type_cite$mention_type <- factor(type_cite$mention_type,
levels=c("cite to software", "cite to publication",
"like instrument", "informal"))
c %>%
ggplot(aes(x=mention_type, y=proportion)) +
geom_bar(stat='identity', fill='darkgray') +
facet_wrap(vars(software_type), nrow=1) +
geom_errorbar(aes(ymin=conf_int_low, ymax=conf_int_high),width=.2,
position=position_dodge(.9)) +
scale_x_discrete(name="") +
scale_y_continuous(name="Proportion") +
theme(panel.grid.major.x = element_blank(),
panel.grid.minor.y = element_blank(),
panel.border = element_blank(),
text = element_text(size=10),
axis.title.y = element_text(vjust=0.3),
axis.text.x = element_text(angle=30, hjust=1))
ggsave(filename="output/software_x_mention_type.png", width=6.4, height=4)
|
# TODO: group the completions into different catagories according to
# https://github.com/wch/r-source/blob/trunk/src/library/utils/R/completion.R
CompletionItemKind <- list(
Text = 1,
Method = 2,
Function = 3,
Constructor = 4,
Field = 5,
Variable = 6,
Class = 7,
Interface = 8,
Module = 9,
Property = 10,
Unit = 11,
Value = 12,
Enum = 13,
Keyword = 14,
Snippet = 15,
Color = 16,
File = 17,
Reference = 18,
Folder = 19,
EnumMember = 20,
Constant = 21,
Struct = 22,
Event = 23,
Operator = 24,
TypeParameter = 25
)
InsertTextFormat <- list(
PlainText = 1,
Snippet = 2
)
sort_prefixes <- list(
arg = "0-",
scope = "1-",
workspace = "2-",
imported = "3-",
global = "4-"
)
constants <- c("TRUE", "FALSE", "NULL",
"NA", "NA_integer_", "NA_real_", "NA_complex_", "NA_character_",
"Inf", "NaN")
#' Complete language constants
#' @keywords internal
constant_completion <- function(token) {
consts <- constants[startsWith(constants, token)]
completions <- lapply(consts, function(const) {
list(label = const,
kind = CompletionItemKind$Constant,
sortText = paste0(sort_prefixes$global, const),
data = list(type = "constant")
)
})
}
#' Complete a package name
#' @keywords internal
package_completion <- function(token) {
installed_packages <- .packages(all.available = TRUE)
token_packages <- installed_packages[startsWith(installed_packages, token)]
completions <- lapply(token_packages, function(package) {
list(label = package,
kind = CompletionItemKind$Module,
sortText = paste0(sort_prefixes$global, package),
data = list(type = "package")
)
})
completions
}
#' Complete a function argument
#' @keywords internal
arg_completion <- function(workspace, token, funct, package = NULL, exported_only = TRUE) {
if (is.null(package)) {
package <- workspace$guess_namespace(funct, isf = TRUE)
}
if (!is.null(package)) {
args <- names(workspace$get_formals(funct, package, exported_only = exported_only))
if (is.character(args)) {
token_args <- args[startsWith(args, token)]
completions <- lapply(token_args, function(arg) {
list(label = arg,
kind = CompletionItemKind$Variable,
detail = "parameter",
sortText = paste0(sort_prefixes$arg, arg),
insertText = paste0(arg, " = "),
insertTextFormat = InsertTextFormat$PlainText,
data = list(
type = "parameter",
funct = funct,
package = package
))
})
completions
}
}
}
ns_function_completion <- function(ns, token, exported_only, snippet_support) {
nsname <- ns$package_name
functs <- ns$get_symbols(want_functs = TRUE, exported_only = exported_only)
functs <- functs[startsWith(functs, token)]
if (nsname == WORKSPACE) {
tag <- "[workspace]"
sort_prefix <- sort_prefixes$workspace
} else {
tag <- paste0("{", nsname, "}")
sort_prefix <- sort_prefixes$global
}
if (isTRUE(snippet_support)) {
completions <- lapply(functs, function(object) {
list(label = object,
kind = CompletionItemKind$Function,
detail = tag,
sortText = paste0(sort_prefix, object),
insertText = paste0(object, "($0)"),
insertTextFormat = InsertTextFormat$Snippet,
data = list(
type = "function",
package = nsname
))
})
} else {
completions <- lapply(functs, function(object) {
list(label = object,
kind = CompletionItemKind$Function,
detail = tag,
sortText = paste0(sort_prefix, object),
data = list(
type = "function",
package = nsname
))
})
}
completions
}
imported_object_completion <- function(workspace, token, snippet_support) {
completions <- NULL
for (object in workspace$imported_objects$keys()) {
if (!startsWith(object, token)) {
next
}
nsname <- workspace$imported_objects$get(object)
ns <- workspace$get_namespace(nsname)
if (is.null(ns)) {
next
}
if (ns$exists_funct(object)) {
if (isTRUE(snippet_support)) {
item <- list(label = object,
kind = CompletionItemKind$Function,
detail = paste0("{", nsname, "}"),
sortText = paste0(sort_prefixes$imported, object),
insertText = paste0(object, "($0)"),
insertTextFormat = InsertTextFormat$Snippet,
data = list(
type = "function",
package = nsname
))
} else {
item <- list(label = object,
kind = CompletionItemKind$Function,
detail = paste0("{", nsname, "}"),
sortText = paste0(sort_prefixes$imported, object),
data = list(
type = "function",
package = nsname
))
}
completions <- append(completions, list(item))
}
}
completions
}
#' Complete any object in the workspace
#' @keywords internal
workspace_completion <- function(workspace, token,
package = NULL, exported_only = TRUE, snippet_support = NULL) {
completions <- list()
if (is.null(package)) {
packages <- c(WORKSPACE, workspace$loaded_packages)
} else {
packages <- c(package)
}
if (is.null(package) || exported_only) {
for (nsname in packages) {
ns <- workspace$get_namespace(nsname)
if (is.null(ns)) {
next
}
if (nsname == WORKSPACE) {
tag <- "[workspace]"
sort_prefix <- sort_prefixes$workspace
} else {
tag <- paste0("{", nsname, "}")
sort_prefix <- sort_prefixes$global
}
functs_completions <- ns_function_completion(ns, token,
exported_only = TRUE, snippet_support = snippet_support)
nonfuncts <- ns$get_symbols(want_functs = FALSE, exported_only = TRUE)
nonfuncts <- nonfuncts[startsWith(nonfuncts, token)]
nonfuncts_completions <- lapply(nonfuncts, function(object) {
list(label = object,
kind = CompletionItemKind$Field,
detail = tag,
sortText = paste0(sort_prefix, object),
data = list(
type = "nonfunction",
package = nsname
))
})
lazydata <- ns$get_lazydata()
lazydata <- lazydata[startsWith(lazydata, token)]
lazydata_completions <- lapply(lazydata, function(object) {
list(label = object,
kind = CompletionItemKind$Field,
detail = tag,
sortText = paste0(sort_prefix, object),
data = list(
type = "lazydata",
package = nsname
))
})
completions <- c(completions,
functs_completions,
nonfuncts_completions,
lazydata_completions)
}
} else {
ns <- workspace$get_namespace(package)
if (!is.null(ns)) {
tag <- paste0("{", package, "}")
functs_completions <- ns_function_completion(ns, token,
exported_only = FALSE, snippet_support = snippet_support)
nonfuncts <- ns$get_symbols(want_functs = FALSE, exported_only = FALSE)
nonfuncts <- nonfuncts[startsWith(nonfuncts, token)]
nonfuncts_completions <- lapply(nonfuncts, function(object) {
list(label = object,
kind = CompletionItemKind$Field,
detail = tag,
sortText = paste0(sort_prefixes$global, object),
data = list(
type = "nonfunction",
package = package
))
})
completions <- c(completions,
functs_completions,
nonfuncts_completions)
}
}
imported_object <- imported_object_completion(workspace, token, snippet_support)
completions <- c(
completions,
imported_object)
completions
}
scope_completion_symbols_xpath <- paste(
"FUNCTION/following-sibling::SYMBOL_FORMALS",
"forcond/SYMBOL",
"expr/LEFT_ASSIGN[not(following-sibling::expr/FUNCTION)]/preceding-sibling::expr[count(*)=1]/SYMBOL",
"expr/RIGHT_ASSIGN[not(preceding-sibling::expr/FUNCTION)]/following-sibling::expr[count(*)=1]/SYMBOL",
"equal_assign/EQ_ASSIGN[not(following-sibling::expr/FUNCTION)]/preceding-sibling::expr[count(*)=1]/SYMBOL",
sep = "|")
scope_completion_functs_xpath <- paste(
"expr/LEFT_ASSIGN[following-sibling::expr/FUNCTION]/preceding-sibling::expr[count(*)=1]/SYMBOL",
"expr/RIGHT_ASSIGN[preceding-sibling::expr/FUNCTION]/following-sibling::expr[count(*)=1]/SYMBOL",
"equal_assign/EQ_ASSIGN[following-sibling::expr/FUNCTION]/preceding-sibling::expr[count(*)=1]/SYMBOL",
sep = "|")
scope_completion <- function(uri, workspace, token, point, snippet_support = NULL) {
xdoc <- workspace$get_parse_data(uri)$xml_doc
if (is.null(xdoc)) {
return(list())
}
enclosing_scopes <- xdoc_find_enclosing_scopes(xdoc,
point$row + 1, point$col + 1)
scope_symbols <- unique(xml_text(xml_find_all(enclosing_scopes, scope_completion_symbols_xpath)))
scope_symbols <- scope_symbols[startsWith(scope_symbols, token)]
scope_symbol_completions <- lapply(scope_symbols, function(symbol) {
list(
label = symbol,
kind = CompletionItemKind$Field,
sortText = paste0(sort_prefixes$scope, symbol),
detail = "[scope]"
)
})
scope_functs <- unique(xml_text(xml_find_all(enclosing_scopes, scope_completion_functs_xpath)))
scope_functs <- scope_functs[startsWith(scope_functs, token)]
if (isTRUE(snippet_support)) {
scope_funct_completions <- lapply(scope_functs, function(symbol) {
list(
label = symbol,
kind = CompletionItemKind$Function,
detail = "[scope]",
sortText = paste0(sort_prefixes$scope, symbol),
insertText = paste0(symbol, "($0)"),
insertTextFormat = InsertTextFormat$Snippet
)
})
} else {
scope_funct_completions <- lapply(scope_functs, function(symbol) {
list(
label = symbol,
kind = CompletionItemKind$Function,
sortText = paste0(sort_prefixes$scope, symbol),
detail = "[scope]"
)
})
}
completions <- c(scope_symbol_completions, scope_funct_completions)
completions
}
#' The response to a textDocument/completion request
#' @keywords internal
completion_reply <- function(id, uri, workspace, document, point, capabilities) {
if (!check_scope(uri, document, point)) {
return(Response$new(
id,
result = list(
isIncomplete = FALSE,
items = list()
)))
}
snippet_support <- isTRUE(capabilities$completionItem$snippetSupport) &&
getOption("languageserver.snippet_support", TRUE)
completions <- list()
token_result <- document$detect_token(point, forward = FALSE)
full_token <- token_result$full_token
token <- token_result$token
package <- token_result$package
if (nzchar(full_token)) {
if (is.null(package)) {
completions <- c(
completions,
constant_completion(token),
package_completion(token),
scope_completion(uri, workspace, token, point, snippet_support))
}
completions <- c(
completions,
workspace_completion(
workspace, token, package, token_result$accessor == "::", snippet_support))
}
call_result <- document$detect_call(point)
if (nzchar(call_result$token)) {
completions <- c(
completions,
arg_completion(workspace, token,
call_result$token, call_result$package,
exported_only = call_result$accessor != ":::"))
}
logger$info("completions: ", length(completions))
Response$new(
id,
result = list(
isIncomplete = FALSE,
items = completions
)
)
}
#' The response to a completionItem/resolve request
#' @keywords internal
completion_item_resolve_reply <- function(id, workspace, params) {
resolved <- FALSE
if (is.null(params$data) || is.null(params$data$type)) {
} else {
if (params$data$type == "package") {
if (length(find.package(params$label, quiet = TRUE))) {
desc <- utils::packageDescription(params$label, fields = c("Title", "Description"))
description <- gsub("\\s*\n\\s*", " ", desc$Description)
params$documentation <- list(
kind = "markdown",
value = sprintf("**%s**\n\n%s", desc$Title, description)
)
resolved <- TRUE
}
} else if (params$data$type == "parameter") {
doc <- workspace$get_documentation(params$data$funct, params$data$package, isf = TRUE)
doc_string <- NULL
if (is.list(doc)) {
doc_string <- doc$arguments[[params$label]]
}
if (!is.null(doc_string)) {
params$documentation <- list(kind = "markdown", value = doc_string)
resolved <- TRUE
}
} else if (params$data$type %in% c("constant", "function", "nonfunction", "lazydata")) {
doc <- workspace$get_documentation(params$label, params$data$package,
isf = params$data$type == "function")
doc_string <- NULL
if (is.character(doc)) {
doc_string <- doc
} else if (is.list(doc)) {
doc_string <- doc$description
}
if (!is.null(doc_string)) {
params$documentation <- list(kind = "markdown", value = doc_string)
resolved <- TRUE
}
}
}
if (resolved) {
params$data <- NULL
Response$new(
id,
result = params
)
} else {
Response$new(id)
}
}
| /R/completion.R | no_license | hongooi73/languageserver | R | false | false | 15,338 | r | # TODO: group the completions into different catagories according to
# https://github.com/wch/r-source/blob/trunk/src/library/utils/R/completion.R
CompletionItemKind <- list(
Text = 1,
Method = 2,
Function = 3,
Constructor = 4,
Field = 5,
Variable = 6,
Class = 7,
Interface = 8,
Module = 9,
Property = 10,
Unit = 11,
Value = 12,
Enum = 13,
Keyword = 14,
Snippet = 15,
Color = 16,
File = 17,
Reference = 18,
Folder = 19,
EnumMember = 20,
Constant = 21,
Struct = 22,
Event = 23,
Operator = 24,
TypeParameter = 25
)
InsertTextFormat <- list(
PlainText = 1,
Snippet = 2
)
sort_prefixes <- list(
arg = "0-",
scope = "1-",
workspace = "2-",
imported = "3-",
global = "4-"
)
constants <- c("TRUE", "FALSE", "NULL",
"NA", "NA_integer_", "NA_real_", "NA_complex_", "NA_character_",
"Inf", "NaN")
#' Complete language constants
#' @keywords internal
constant_completion <- function(token) {
consts <- constants[startsWith(constants, token)]
completions <- lapply(consts, function(const) {
list(label = const,
kind = CompletionItemKind$Constant,
sortText = paste0(sort_prefixes$global, const),
data = list(type = "constant")
)
})
}
#' Complete a package name
#' @keywords internal
package_completion <- function(token) {
installed_packages <- .packages(all.available = TRUE)
token_packages <- installed_packages[startsWith(installed_packages, token)]
completions <- lapply(token_packages, function(package) {
list(label = package,
kind = CompletionItemKind$Module,
sortText = paste0(sort_prefixes$global, package),
data = list(type = "package")
)
})
completions
}
#' Complete a function argument
#' @keywords internal
arg_completion <- function(workspace, token, funct, package = NULL, exported_only = TRUE) {
if (is.null(package)) {
package <- workspace$guess_namespace(funct, isf = TRUE)
}
if (!is.null(package)) {
args <- names(workspace$get_formals(funct, package, exported_only = exported_only))
if (is.character(args)) {
token_args <- args[startsWith(args, token)]
completions <- lapply(token_args, function(arg) {
list(label = arg,
kind = CompletionItemKind$Variable,
detail = "parameter",
sortText = paste0(sort_prefixes$arg, arg),
insertText = paste0(arg, " = "),
insertTextFormat = InsertTextFormat$PlainText,
data = list(
type = "parameter",
funct = funct,
package = package
))
})
completions
}
}
}
ns_function_completion <- function(ns, token, exported_only, snippet_support) {
nsname <- ns$package_name
functs <- ns$get_symbols(want_functs = TRUE, exported_only = exported_only)
functs <- functs[startsWith(functs, token)]
if (nsname == WORKSPACE) {
tag <- "[workspace]"
sort_prefix <- sort_prefixes$workspace
} else {
tag <- paste0("{", nsname, "}")
sort_prefix <- sort_prefixes$global
}
if (isTRUE(snippet_support)) {
completions <- lapply(functs, function(object) {
list(label = object,
kind = CompletionItemKind$Function,
detail = tag,
sortText = paste0(sort_prefix, object),
insertText = paste0(object, "($0)"),
insertTextFormat = InsertTextFormat$Snippet,
data = list(
type = "function",
package = nsname
))
})
} else {
completions <- lapply(functs, function(object) {
list(label = object,
kind = CompletionItemKind$Function,
detail = tag,
sortText = paste0(sort_prefix, object),
data = list(
type = "function",
package = nsname
))
})
}
completions
}
imported_object_completion <- function(workspace, token, snippet_support) {
completions <- NULL
for (object in workspace$imported_objects$keys()) {
if (!startsWith(object, token)) {
next
}
nsname <- workspace$imported_objects$get(object)
ns <- workspace$get_namespace(nsname)
if (is.null(ns)) {
next
}
if (ns$exists_funct(object)) {
if (isTRUE(snippet_support)) {
item <- list(label = object,
kind = CompletionItemKind$Function,
detail = paste0("{", nsname, "}"),
sortText = paste0(sort_prefixes$imported, object),
insertText = paste0(object, "($0)"),
insertTextFormat = InsertTextFormat$Snippet,
data = list(
type = "function",
package = nsname
))
} else {
item <- list(label = object,
kind = CompletionItemKind$Function,
detail = paste0("{", nsname, "}"),
sortText = paste0(sort_prefixes$imported, object),
data = list(
type = "function",
package = nsname
))
}
completions <- append(completions, list(item))
}
}
completions
}
#' Complete any object in the workspace
#' @keywords internal
workspace_completion <- function(workspace, token,
package = NULL, exported_only = TRUE, snippet_support = NULL) {
completions <- list()
if (is.null(package)) {
packages <- c(WORKSPACE, workspace$loaded_packages)
} else {
packages <- c(package)
}
if (is.null(package) || exported_only) {
for (nsname in packages) {
ns <- workspace$get_namespace(nsname)
if (is.null(ns)) {
next
}
if (nsname == WORKSPACE) {
tag <- "[workspace]"
sort_prefix <- sort_prefixes$workspace
} else {
tag <- paste0("{", nsname, "}")
sort_prefix <- sort_prefixes$global
}
functs_completions <- ns_function_completion(ns, token,
exported_only = TRUE, snippet_support = snippet_support)
nonfuncts <- ns$get_symbols(want_functs = FALSE, exported_only = TRUE)
nonfuncts <- nonfuncts[startsWith(nonfuncts, token)]
nonfuncts_completions <- lapply(nonfuncts, function(object) {
list(label = object,
kind = CompletionItemKind$Field,
detail = tag,
sortText = paste0(sort_prefix, object),
data = list(
type = "nonfunction",
package = nsname
))
})
lazydata <- ns$get_lazydata()
lazydata <- lazydata[startsWith(lazydata, token)]
lazydata_completions <- lapply(lazydata, function(object) {
list(label = object,
kind = CompletionItemKind$Field,
detail = tag,
sortText = paste0(sort_prefix, object),
data = list(
type = "lazydata",
package = nsname
))
})
completions <- c(completions,
functs_completions,
nonfuncts_completions,
lazydata_completions)
}
} else {
ns <- workspace$get_namespace(package)
if (!is.null(ns)) {
tag <- paste0("{", package, "}")
functs_completions <- ns_function_completion(ns, token,
exported_only = FALSE, snippet_support = snippet_support)
nonfuncts <- ns$get_symbols(want_functs = FALSE, exported_only = FALSE)
nonfuncts <- nonfuncts[startsWith(nonfuncts, token)]
nonfuncts_completions <- lapply(nonfuncts, function(object) {
list(label = object,
kind = CompletionItemKind$Field,
detail = tag,
sortText = paste0(sort_prefixes$global, object),
data = list(
type = "nonfunction",
package = package
))
})
completions <- c(completions,
functs_completions,
nonfuncts_completions)
}
}
imported_object <- imported_object_completion(workspace, token, snippet_support)
completions <- c(
completions,
imported_object)
completions
}
scope_completion_symbols_xpath <- paste(
"FUNCTION/following-sibling::SYMBOL_FORMALS",
"forcond/SYMBOL",
"expr/LEFT_ASSIGN[not(following-sibling::expr/FUNCTION)]/preceding-sibling::expr[count(*)=1]/SYMBOL",
"expr/RIGHT_ASSIGN[not(preceding-sibling::expr/FUNCTION)]/following-sibling::expr[count(*)=1]/SYMBOL",
"equal_assign/EQ_ASSIGN[not(following-sibling::expr/FUNCTION)]/preceding-sibling::expr[count(*)=1]/SYMBOL",
sep = "|")
scope_completion_functs_xpath <- paste(
"expr/LEFT_ASSIGN[following-sibling::expr/FUNCTION]/preceding-sibling::expr[count(*)=1]/SYMBOL",
"expr/RIGHT_ASSIGN[preceding-sibling::expr/FUNCTION]/following-sibling::expr[count(*)=1]/SYMBOL",
"equal_assign/EQ_ASSIGN[following-sibling::expr/FUNCTION]/preceding-sibling::expr[count(*)=1]/SYMBOL",
sep = "|")
scope_completion <- function(uri, workspace, token, point, snippet_support = NULL) {
xdoc <- workspace$get_parse_data(uri)$xml_doc
if (is.null(xdoc)) {
return(list())
}
enclosing_scopes <- xdoc_find_enclosing_scopes(xdoc,
point$row + 1, point$col + 1)
scope_symbols <- unique(xml_text(xml_find_all(enclosing_scopes, scope_completion_symbols_xpath)))
scope_symbols <- scope_symbols[startsWith(scope_symbols, token)]
scope_symbol_completions <- lapply(scope_symbols, function(symbol) {
list(
label = symbol,
kind = CompletionItemKind$Field,
sortText = paste0(sort_prefixes$scope, symbol),
detail = "[scope]"
)
})
scope_functs <- unique(xml_text(xml_find_all(enclosing_scopes, scope_completion_functs_xpath)))
scope_functs <- scope_functs[startsWith(scope_functs, token)]
if (isTRUE(snippet_support)) {
scope_funct_completions <- lapply(scope_functs, function(symbol) {
list(
label = symbol,
kind = CompletionItemKind$Function,
detail = "[scope]",
sortText = paste0(sort_prefixes$scope, symbol),
insertText = paste0(symbol, "($0)"),
insertTextFormat = InsertTextFormat$Snippet
)
})
} else {
scope_funct_completions <- lapply(scope_functs, function(symbol) {
list(
label = symbol,
kind = CompletionItemKind$Function,
sortText = paste0(sort_prefixes$scope, symbol),
detail = "[scope]"
)
})
}
completions <- c(scope_symbol_completions, scope_funct_completions)
completions
}
#' The response to a textDocument/completion request
#' @keywords internal
completion_reply <- function(id, uri, workspace, document, point, capabilities) {
if (!check_scope(uri, document, point)) {
return(Response$new(
id,
result = list(
isIncomplete = FALSE,
items = list()
)))
}
snippet_support <- isTRUE(capabilities$completionItem$snippetSupport) &&
getOption("languageserver.snippet_support", TRUE)
completions <- list()
token_result <- document$detect_token(point, forward = FALSE)
full_token <- token_result$full_token
token <- token_result$token
package <- token_result$package
if (nzchar(full_token)) {
if (is.null(package)) {
completions <- c(
completions,
constant_completion(token),
package_completion(token),
scope_completion(uri, workspace, token, point, snippet_support))
}
completions <- c(
completions,
workspace_completion(
workspace, token, package, token_result$accessor == "::", snippet_support))
}
call_result <- document$detect_call(point)
if (nzchar(call_result$token)) {
completions <- c(
completions,
arg_completion(workspace, token,
call_result$token, call_result$package,
exported_only = call_result$accessor != ":::"))
}
logger$info("completions: ", length(completions))
Response$new(
id,
result = list(
isIncomplete = FALSE,
items = completions
)
)
}
#' The response to a completionItem/resolve request
#' @keywords internal
completion_item_resolve_reply <- function(id, workspace, params) {
resolved <- FALSE
if (is.null(params$data) || is.null(params$data$type)) {
} else {
if (params$data$type == "package") {
if (length(find.package(params$label, quiet = TRUE))) {
desc <- utils::packageDescription(params$label, fields = c("Title", "Description"))
description <- gsub("\\s*\n\\s*", " ", desc$Description)
params$documentation <- list(
kind = "markdown",
value = sprintf("**%s**\n\n%s", desc$Title, description)
)
resolved <- TRUE
}
} else if (params$data$type == "parameter") {
doc <- workspace$get_documentation(params$data$funct, params$data$package, isf = TRUE)
doc_string <- NULL
if (is.list(doc)) {
doc_string <- doc$arguments[[params$label]]
}
if (!is.null(doc_string)) {
params$documentation <- list(kind = "markdown", value = doc_string)
resolved <- TRUE
}
} else if (params$data$type %in% c("constant", "function", "nonfunction", "lazydata")) {
doc <- workspace$get_documentation(params$label, params$data$package,
isf = params$data$type == "function")
doc_string <- NULL
if (is.character(doc)) {
doc_string <- doc
} else if (is.list(doc)) {
doc_string <- doc$description
}
if (!is.null(doc_string)) {
params$documentation <- list(kind = "markdown", value = doc_string)
resolved <- TRUE
}
}
}
if (resolved) {
params$data <- NULL
Response$new(
id,
result = params
)
} else {
Response$new(id)
}
}
|
panel.levelplot = function (x, y, z, subscripts, at = pretty(z), shrink, labels = FALSE,
label.style = c("mixed", "flat", "align"), contour = FALSE,
region = TRUE, col = add.line$col, lty = add.line$lty, lwd = add.line$lwd,
..., col.regions = regions$col, alpha.regions = regions$alpha, rez=NULL )
{
# copy from lattice: panel.levelplot
# modifying the grid.rect() to be a user specified (fixed) value
require(grid)
if (length(subscripts) == 0)
return()
regions <- trellis.par.get("regions")
label.style <- match.arg(label.style)
x.is.factor <- is.factor(x)
y.is.factor <- is.factor(y)
x <- as.numeric(x)
y <- as.numeric(y)
z <- as.numeric(z)
zcol <- level.colors(z, at, col.regions, colors = TRUE)
x <- x[subscripts]
y <- y[subscripts]
minXwid <- if (length(unique(x)) > 1)
min(diff(sort(unique(x))))
else 1
minYwid <- if (length(unique(x)) > 1)
min(diff(sort(unique(y))))
else 1
fullZrange <- range(as.numeric(z), finite = TRUE)
z <- z[subscripts]
zcol <- zcol[subscripts]
shrinkx <- c(1, 1)
shrinky <- c(1, 1)
if (!missing(shrink)) {
if (is.numeric(shrink)) {
shrinkx <- rep(shrink, length.out = 2)
shrinky <- rep(shrink, length.out = 2)
}
else if (is.list(shrink)) {
shrinkx <- rep(shrink[[1]], length.out = 2)
shrinky <- rep(shrink[[1]], length.out = 2)
if ("x" %in% names(shrink))
shrinkx <- rep(shrink$x, length.out = 2)
if ("y" %in% names(shrink))
shrinky <- rep(shrink$y, length.out = 2)
}
else warning("Invalid 'shrink' parameter ignored")
}
scaleWidth <- function(z, min = 0.8, max = 0.8, zl = range(z,
finite = TRUE)) {
if (diff(zl) == 0)
rep(0.5 * (min + max), length(z))
else min + (max - min) * (z - zl[1])/diff(zl)
}
if (x.is.factor) {
ux <- sort(unique(x[!is.na(x)]))
lx <- rep(1, length(ux))
cx <- ux
}
else {
ux <- sort(unique(x[!is.na(x)]))
bx <- if (length(ux) > 1)
c(3 * ux[1] - ux[2], ux[-length(ux)] + ux[-1], 3 *
ux[length(ux)] - ux[length(ux) - 1])/2
else ux + c(-0.5, 0.5) * minXwid
lx <- diff(bx)
cx <- (bx[-1] + bx[-length(bx)])/2
}
if (y.is.factor) {
uy <- sort(unique(y[!is.na(y)]))
ly <- rep(1, length(uy))
cy <- uy
}
else {
uy <- sort(unique(y[!is.na(y)]))
by <- if (length(uy) > 1)
c(3 * uy[1] - uy[2], uy[-length(uy)] + uy[-1], 3 *
uy[length(uy)] - uy[length(uy) - 1])/2
else uy + c(-0.5, 0.5) * minYwid
ly <- diff(by)
cy <- (by[-1] + by[-length(by)])/2
}
idx <- match(x, ux)
idy <- match(y, uy)
if (region) {
grid.rect(x = cx[idx], y = cy[idy], width = rez[1],
height = rez[2], default.units = "native", gp = gpar(fill = zcol,
lwd = 0.00001, col = "transparent", alpha = alpha.regions))
}
if (contour) {
cpl <- current.panel.limits(unit = "cm")
asp <- diff(cpl$ylim)/diff(cpl$xlim)
if (is.logical(labels) && !labels)
labels <- NULL
else {
if (is.characterOrExpression(labels))
labels <- list(labels = labels)
text <- trellis.par.get("add.text")
tmp <- list(col = text$col, alpha = text$alpha, cex = text$cex,
fontfamily = text$fontfamily, fontface = text$fontface,
font = text$font)
labels <- if (is.list(labels))
updateList(tmp, labels)
else tmp
if (!is.characterOrExpression(labels$labels))
labels$labels <- format(at, trim = TRUE)
}
add.line <- trellis.par.get("add.line")
m <- matrix(NA_real_, nrow = length(ux), ncol = length(uy))
m[(idy - 1) * length(ux) + idx] <- z
clines <- contourLines(x = ux, y = uy, z = m, nlevels = length(at),
levels = at)
for (val in clines) {
llines(val, col = col, lty = lty, lwd = lwd)
if (length(val$x) > 5) {
if (!is.null(labels)) {
slopes <- diff(val$y)/diff(val$x)
if (label.style == "flat") {
textloc <- which.min(abs(slopes))
rotangle <- 0
}
else if (label.style == "align") {
rx <- range(ux)
ry <- range(uy)
depth <- pmin(pmin(val$x - rx[1], rx[2] -
val$x)/diff(rx), pmin(val$y - ry[1], ry[2] -
val$y)/diff(ry))
textloc <- min(which.max(depth), length(slopes))
rotangle <- atan(asp * slopes[textloc] *
diff(rx)/diff(ry)) * 180/base::pi
}
else if (label.style == "mixed") {
rx <- range(ux)
ry <- range(uy)
depth <- pmin(pmin(val$x - rx[1], rx[2] -
val$x)/diff(rx), pmin(val$y - ry[1], ry[2] -
val$y)/diff(ry))
textloc <- which.min(abs(slopes))
rotangle <- 0
if (depth[textloc] < 0.05) {
textloc <- min(which.max(depth), length(slopes))
rotangle <- atan(asp * slopes[textloc] *
diff(rx)/diff(ry)) * 180/base::pi
}
}
else stop("Invalid label.style")
i <- match(val$level, at)
ltext(labels$labels[i], adj = c(0.5, 0), srt = rotangle,
col = labels$col, alpha = labels$alpha, cex = labels$cex,
font = labels$font, fontfamily = labels$fontfamily,
fontface = labels$fontface, x = 0.5 * (val$x[textloc] +
val$x[textloc + 1]), y = 0.5 * (val$y[textloc] +
val$y[textloc + 1]))
}
}
}
}
}
| /R/panel.levelplot.r | permissive | PEDsnowcrab/aegis | R | false | false | 6,267 | r |
panel.levelplot = function (x, y, z, subscripts, at = pretty(z), shrink, labels = FALSE,
label.style = c("mixed", "flat", "align"), contour = FALSE,
region = TRUE, col = add.line$col, lty = add.line$lty, lwd = add.line$lwd,
..., col.regions = regions$col, alpha.regions = regions$alpha, rez=NULL )
{
# copy from lattice: panel.levelplot
# modifying the grid.rect() to be a user specified (fixed) value
require(grid)
if (length(subscripts) == 0)
return()
regions <- trellis.par.get("regions")
label.style <- match.arg(label.style)
x.is.factor <- is.factor(x)
y.is.factor <- is.factor(y)
x <- as.numeric(x)
y <- as.numeric(y)
z <- as.numeric(z)
zcol <- level.colors(z, at, col.regions, colors = TRUE)
x <- x[subscripts]
y <- y[subscripts]
minXwid <- if (length(unique(x)) > 1)
min(diff(sort(unique(x))))
else 1
minYwid <- if (length(unique(x)) > 1)
min(diff(sort(unique(y))))
else 1
fullZrange <- range(as.numeric(z), finite = TRUE)
z <- z[subscripts]
zcol <- zcol[subscripts]
shrinkx <- c(1, 1)
shrinky <- c(1, 1)
if (!missing(shrink)) {
if (is.numeric(shrink)) {
shrinkx <- rep(shrink, length.out = 2)
shrinky <- rep(shrink, length.out = 2)
}
else if (is.list(shrink)) {
shrinkx <- rep(shrink[[1]], length.out = 2)
shrinky <- rep(shrink[[1]], length.out = 2)
if ("x" %in% names(shrink))
shrinkx <- rep(shrink$x, length.out = 2)
if ("y" %in% names(shrink))
shrinky <- rep(shrink$y, length.out = 2)
}
else warning("Invalid 'shrink' parameter ignored")
}
scaleWidth <- function(z, min = 0.8, max = 0.8, zl = range(z,
finite = TRUE)) {
if (diff(zl) == 0)
rep(0.5 * (min + max), length(z))
else min + (max - min) * (z - zl[1])/diff(zl)
}
if (x.is.factor) {
ux <- sort(unique(x[!is.na(x)]))
lx <- rep(1, length(ux))
cx <- ux
}
else {
ux <- sort(unique(x[!is.na(x)]))
bx <- if (length(ux) > 1)
c(3 * ux[1] - ux[2], ux[-length(ux)] + ux[-1], 3 *
ux[length(ux)] - ux[length(ux) - 1])/2
else ux + c(-0.5, 0.5) * minXwid
lx <- diff(bx)
cx <- (bx[-1] + bx[-length(bx)])/2
}
if (y.is.factor) {
uy <- sort(unique(y[!is.na(y)]))
ly <- rep(1, length(uy))
cy <- uy
}
else {
uy <- sort(unique(y[!is.na(y)]))
by <- if (length(uy) > 1)
c(3 * uy[1] - uy[2], uy[-length(uy)] + uy[-1], 3 *
uy[length(uy)] - uy[length(uy) - 1])/2
else uy + c(-0.5, 0.5) * minYwid
ly <- diff(by)
cy <- (by[-1] + by[-length(by)])/2
}
idx <- match(x, ux)
idy <- match(y, uy)
if (region) {
grid.rect(x = cx[idx], y = cy[idy], width = rez[1],
height = rez[2], default.units = "native", gp = gpar(fill = zcol,
lwd = 0.00001, col = "transparent", alpha = alpha.regions))
}
if (contour) {
cpl <- current.panel.limits(unit = "cm")
asp <- diff(cpl$ylim)/diff(cpl$xlim)
if (is.logical(labels) && !labels)
labels <- NULL
else {
if (is.characterOrExpression(labels))
labels <- list(labels = labels)
text <- trellis.par.get("add.text")
tmp <- list(col = text$col, alpha = text$alpha, cex = text$cex,
fontfamily = text$fontfamily, fontface = text$fontface,
font = text$font)
labels <- if (is.list(labels))
updateList(tmp, labels)
else tmp
if (!is.characterOrExpression(labels$labels))
labels$labels <- format(at, trim = TRUE)
}
add.line <- trellis.par.get("add.line")
m <- matrix(NA_real_, nrow = length(ux), ncol = length(uy))
m[(idy - 1) * length(ux) + idx] <- z
clines <- contourLines(x = ux, y = uy, z = m, nlevels = length(at),
levels = at)
for (val in clines) {
llines(val, col = col, lty = lty, lwd = lwd)
if (length(val$x) > 5) {
if (!is.null(labels)) {
slopes <- diff(val$y)/diff(val$x)
if (label.style == "flat") {
textloc <- which.min(abs(slopes))
rotangle <- 0
}
else if (label.style == "align") {
rx <- range(ux)
ry <- range(uy)
depth <- pmin(pmin(val$x - rx[1], rx[2] -
val$x)/diff(rx), pmin(val$y - ry[1], ry[2] -
val$y)/diff(ry))
textloc <- min(which.max(depth), length(slopes))
rotangle <- atan(asp * slopes[textloc] *
diff(rx)/diff(ry)) * 180/base::pi
}
else if (label.style == "mixed") {
rx <- range(ux)
ry <- range(uy)
depth <- pmin(pmin(val$x - rx[1], rx[2] -
val$x)/diff(rx), pmin(val$y - ry[1], ry[2] -
val$y)/diff(ry))
textloc <- which.min(abs(slopes))
rotangle <- 0
if (depth[textloc] < 0.05) {
textloc <- min(which.max(depth), length(slopes))
rotangle <- atan(asp * slopes[textloc] *
diff(rx)/diff(ry)) * 180/base::pi
}
}
else stop("Invalid label.style")
i <- match(val$level, at)
ltext(labels$labels[i], adj = c(0.5, 0), srt = rotangle,
col = labels$col, alpha = labels$alpha, cex = labels$cex,
font = labels$font, fontfamily = labels$fontfamily,
fontface = labels$fontface, x = 0.5 * (val$x[textloc] +
val$x[textloc + 1]), y = 0.5 * (val$y[textloc] +
val$y[textloc + 1]))
}
}
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.r
\name{rgen_length}
\alias{rgen_length}
\title{Samples chain lengths with given observation probabilities}
\usage{
rgen_length(n, x, prob)
}
\arguments{
\item{n}{number of samples to generate}
\item{x}{observed chain lengths}
\item{prob}{probability of observation}
}
\value{
sampled lengths
}
\description{
Samples the length of a transmission chain where each individual element is
observed with binomial probability with parameters n (number of successes)
and p (success probability)
}
\author{
Sebastian Funk
}
\keyword{internal}
| /man/rgen_length.Rd | no_license | ffinger/bpmodels | R | false | true | 621 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.r
\name{rgen_length}
\alias{rgen_length}
\title{Samples chain lengths with given observation probabilities}
\usage{
rgen_length(n, x, prob)
}
\arguments{
\item{n}{number of samples to generate}
\item{x}{observed chain lengths}
\item{prob}{probability of observation}
}
\value{
sampled lengths
}
\description{
Samples the length of a transmission chain where each individual element is
observed with binomial probability with parameters n (number of successes)
and p (success probability)
}
\author{
Sebastian Funk
}
\keyword{internal}
|
gap <- read.csv("household_power_consumption.txt", header=T, sep=";")
gap$Date <- as.Date(gap$Date, format="%d/%m/%Y")
df <- gap[(gap$Date=="2007-02-01") | (gap$Date=="2007-02-02"),]
df$Global_active_power <- as.numeric(as.character(df$Global_active_power))
df <- transform(df, timestamp=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
plot(df$timestamp,df$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off() | /plot2.R | no_license | andrewsmhay/ExData_Plotting1 | R | false | false | 505 | r | gap <- read.csv("household_power_consumption.txt", header=T, sep=";")
gap$Date <- as.Date(gap$Date, format="%d/%m/%Y")
df <- gap[(gap$Date=="2007-02-01") | (gap$Date=="2007-02-02"),]
df$Global_active_power <- as.numeric(as.character(df$Global_active_power))
df <- transform(df, timestamp=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
plot(df$timestamp,df$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off() |
function(input, output, session) {
data <- read.csv(Dir "data_for_Kmeans")
data <- data[,-1]
# Combine the selected variables into a new data frame
selectedData <- reactive({
data[, c(input$xcol, input$ycol)]
})
clusters <- reactive({
kmeans(selectedData(), input$clusters)
})
output$plot1 <- renderPlot({
palette(c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3",
"#FF7F00", "#FFFF33", "#A65628", "#F781BF", "#999999"))
par(mar = c(5.1, 4.1, 0, 1))
plot(selectedData(),
col = clusters()$cluster,
pch = 20, cex = 3)
points(clusters()$centers, pch = 4, cex = 4, lwd = 4)
})
}
| /analysis/server.R | permissive | alexrods/scrap_ligaMx_2020 | R | false | false | 658 | r | function(input, output, session) {
data <- read.csv(Dir "data_for_Kmeans")
data <- data[,-1]
# Combine the selected variables into a new data frame
selectedData <- reactive({
data[, c(input$xcol, input$ycol)]
})
clusters <- reactive({
kmeans(selectedData(), input$clusters)
})
output$plot1 <- renderPlot({
palette(c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3",
"#FF7F00", "#FFFF33", "#A65628", "#F781BF", "#999999"))
par(mar = c(5.1, 4.1, 0, 1))
plot(selectedData(),
col = clusters()$cluster,
pch = 20, cex = 3)
points(clusters()$centers, pch = 4, cex = 4, lwd = 4)
})
}
|
#' Estimate heritability.
#'
#' @param y phenotype vector
#' @param covar matrix with covariates
#' @param G genetic similarity matrix
#' @return numeric
#' @export
heritability <- function(y, G, covar = NULL) {
if (is.null(covar))
covar <- rep(1, nrow(G))
# to get reasonable interpretation, normalize matrix
G <- normalize.matrix(G)
# fit the mixed model
rg.fit <- regress(y~covar, ~G, pos=c(TRUE,TRUE))
# estimate heritability
h2 <- as.numeric(rg.fit$sigma[1] / sum(rg.fit$sigma))
h2
} | /R/heritability.R | no_license | harr/HPQTL2 | R | false | false | 525 | r | #' Estimate heritability.
#'
#' @param y phenotype vector
#' @param covar matrix with covariates
#' @param G genetic similarity matrix
#' @return numeric
#' @export
heritability <- function(y, G, covar = NULL) {
if (is.null(covar))
covar <- rep(1, nrow(G))
# to get reasonable interpretation, normalize matrix
G <- normalize.matrix(G)
# fit the mixed model
rg.fit <- regress(y~covar, ~G, pos=c(TRUE,TRUE))
# estimate heritability
h2 <- as.numeric(rg.fit$sigma[1] / sum(rg.fit$sigma))
h2
} |
#Do not change these lines unless you know what you are doing.
args=commandArgs(trailingOnly=TRUE)
require(survival)
require(gap)
scratch=args[1]
folder=args[2]
datafile=args[3]
wd=paste0(scratch,folder)
setwd(wd)
#This Assumes your Fitting.R funciton is in $SCRATCH/GENmatic/Fitting.R
source(paste0(scratch,"GENmatic/Fitting.R"))
data=read.csv(paste0(wd,datafile))
######################################
##### START TO EDIT THE FILE HERE#####
######################################
#Name of directory in scinet with plink (use trailing /)
pd="/home/w/wxu/oespinga/software/plink/plink-1.07-x86_64/"
#Enter your calls to GENfit here. You can use some sort of apply if you want.
#Make sure you set pd=pd and wd=wd
GENfit(data[,c(1,1)],data[,c("SvRfs","Rfs")],data$PC1,data$SEX,"coxph","additive",
"thinned","gwastest",qq=T,manhattan=T,pd=pd,wd=wd,
topn=10,topprop=0.1,topcut=0.05)
GENfit(data[,c(1,1)],data$SEX,data$PC1,NULL,"logistic","additive",
"thinned","logistictest",qq=T,manhattan=T,pd=pd,wd=wd,
topn=10,topprop=0.1,topcut=0.05)
GENfit(data[,c(1,1)],data$SvRfs,data$PC1,NULL,"linear","additive",
"thinned","lineartest",qq=T,manhattan=T,pd=pd,wd=wd,
topn=10,topprop=0.1,topcut=0.05)
| /GENmatic.R | no_license | rdelbel/GENmatic | R | false | false | 1,233 | r | #Do not change these lines unless you know what you are doing.
args=commandArgs(trailingOnly=TRUE)
require(survival)
require(gap)
scratch=args[1]
folder=args[2]
datafile=args[3]
wd=paste0(scratch,folder)
setwd(wd)
#This Assumes your Fitting.R funciton is in $SCRATCH/GENmatic/Fitting.R
source(paste0(scratch,"GENmatic/Fitting.R"))
data=read.csv(paste0(wd,datafile))
######################################
##### START TO EDIT THE FILE HERE#####
######################################
#Name of directory in scinet with plink (use trailing /)
pd="/home/w/wxu/oespinga/software/plink/plink-1.07-x86_64/"
#Enter your calls to GENfit here. You can use some sort of apply if you want.
#Make sure you set pd=pd and wd=wd
GENfit(data[,c(1,1)],data[,c("SvRfs","Rfs")],data$PC1,data$SEX,"coxph","additive",
"thinned","gwastest",qq=T,manhattan=T,pd=pd,wd=wd,
topn=10,topprop=0.1,topcut=0.05)
GENfit(data[,c(1,1)],data$SEX,data$PC1,NULL,"logistic","additive",
"thinned","logistictest",qq=T,manhattan=T,pd=pd,wd=wd,
topn=10,topprop=0.1,topcut=0.05)
GENfit(data[,c(1,1)],data$SvRfs,data$PC1,NULL,"linear","additive",
"thinned","lineartest",qq=T,manhattan=T,pd=pd,wd=wd,
topn=10,topprop=0.1,topcut=0.05)
|
setwd("C:\\Users\\Sabrina\\Google Drive\\Colgate\\Senior Year\\BioThesis\\Data")
getwd()
dat.nut<-read.csv("nutrients_consolidated.csv")
###nitrogen###
n.mean<- aggregate(dat.nut$N, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "mean")
colnames(n.mean)<- c("Site", "Species", "N")
n.sd<- aggregate(dat.nut$N, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "sd")
colnames(n.sd)<- c("Site", "Species", "sd")
n.l<- aggregate(dat.nut$N, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "length")
colnames(n.l)<- c("Site", "Species", "Length")
n.mean$SE<- n.sd$sd/sqrt(n.l$Length)
t.test(dat.nut$N[dat.nut$Species== "Betula" & dat.nut$Treatment== "H"],
dat.nut$N[dat.nut$Species== "Betula" & dat.nut$Treatment== "L"])
t.test(dat.nut$N[dat.nut$Species== "Salix" & dat.nut$Treatment== "H"],
dat.nut$N[dat.nut$Species== "Salix" & dat.nut$Treatment== "L"])
t.test(dat.nut$N[dat.nut$Species== "Larix" & dat.nut$Treatment== "H"],
dat.nut$N[dat.nut$Species== "Larix" & dat.nut$Treatment== "L"])
####carbon###
C.mean<- aggregate(dat.nut$C, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "mean",
na.rm= TRUE)
colnames(C.mean)<- c("Site", "Species", "N")
C.sd<- aggregate(dat.nut$C, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "sd",
na.rm= TRUE)
colnames(C.sd)<- c("Site", "Species", "sd")
C.l<- aggregate(dat.nut$C[!is.na(dat.nut$C)], by=list(dat.nut$Treatment[!is.na(dat.nut$C)],
dat.nut$Species[!is.na(dat.nut$C)]), FUN= "length")
colnames(C.l)<- c("Site", "Species", "Length")
C.mean$SE<- C.sd$sd/sqrt(C.l$Length)
t.test(dat.nut$C[dat.nut$Species== "Betula" & dat.nut$Treatment== "H"],
dat.nut$C[dat.nut$Species== "Betula" & dat.nut$Treatment== "L"])
t.test(dat.nut$C[dat.nut$Species== "Salix" & dat.nut$Treatment== "H"],
dat.nut$C[dat.nut$Species== "Salix" & dat.nut$Treatment== "L"])
t.test(dat.nut$C[dat.nut$Species== "Larix" & dat.nut$Treatment== "H"],
dat.nut$C[dat.nut$Species== "Larix" & dat.nut$Treatment== "L"])
###C:N####
cn.mean<- aggregate(dat.nut$C_N, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "mean",
na.rm=TRUE)
colnames(cn.mean)<- c("Site", "Species", "CN")
cn.sd<- aggregate(dat.nut$C_N, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "sd",
na.rm=TRUE)
colnames(cn.sd)<- c("Site", "Species", "sd")
cn.l<- aggregate(dat.nut$C_N[!is.na(dat.nut$C_N)],by=list(dat.nut$Treatment[!is.na(dat.nut$C_N)],
dat.nut$Species[!is.na(dat.nut$C_N)]), FUN= "length")
colnames(cn.l)<- c("Site", "Species", "Length")
cn.mean$SE<- cn.sd$sd/sqrt(cn.l$Length)
t.test(dat.nut$C_N[dat.nut$Species== "Betula" & dat.nut$Treatment== "H"],
dat.nut$C_N[dat.nut$Species== "Betula" & dat.nut$Treatment== "L"])
t.test(dat.nut$C_N[dat.nut$Species== "Salix" & dat.nut$Treatment== "H"],
dat.nut$C_N[dat.nut$Species== "Salix" & dat.nut$Treatment== "L"])
t.test(dat.nut$C_N[dat.nut$Species== "Larix" & dat.nut$Treatment== "H"],
dat.nut$C_N[dat.nut$Species== "Larix" & dat.nut$Treatment== "L"])
###Phosphorus###
p.mean<- aggregate(dat.nut$P, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "mean", na.rm=TRUE)
colnames(p.mean)<- c("Site", "Species", "P")
p.sd<- aggregate(dat.nut$P, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "sd", na.rm=TRUE)
colnames(p.sd)<- c("Site", "Species", "sd")
p.l<- aggregate(dat.nut$P, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "length")
colnames(p.l)<- c("Site", "Species", "Length")
p.mean$SE<- p.sd$sd/sqrt(p.l$Length)
t.test(dat.nut$P[dat.nut$Species== "Betula" & dat.nut$Treatment== "H"],
dat.nut$P[dat.nut$Species== "Betula" & dat.nut$Treatment== "L"])
t.test(dat.nut$P[dat.nut$Species== "Salix" & dat.nut$Treatment== "H"],
dat.nut$P[dat.nut$Species== "Salix" & dat.nut$Treatment== "L"])
t.test(dat.nut$P[dat.nut$Species== "Larix" & dat.nut$Treatment== "H"],
dat.nut$P[dat.nut$Species== "Larix" & dat.nut$Treatment== "L"])
### N:P ###
np.mean<- aggregate(dat.nut$N_P, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "mean", na.rm=TRUE)
colnames(np.mean)<- c("Site", "Species", "NP")
np.sd<- aggregate(dat.nut$N_P, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "sd", na.rm=TRUE)
colnames(np.sd)<- c("Site", "Species", "sd")
np.l<- aggregate(dat.nut$N_P[!is.na(dat.nut$N_P)],by=list(dat.nut$Treatment[!is.na(dat.nut$N_P)],
dat.nut$Species[!is.na(dat.nut$N_P)]), FUN= "length")
colnames(np.l)<- c("Site", "Species", "Length")
np.mean$SE<- np.sd$sd/sqrt(np.l$Length)
t.test(dat.nut$N_P[dat.nut$Species== "Betula" & dat.nut$Treatment== "H"],
dat.nut$N_P[dat.nut$Species== "Betula" & dat.nut$Treatment== "L"])
t.test(dat.nut$N_P[dat.nut$Species== "Salix" & dat.nut$Treatment== "H"],
dat.nut$N_P[dat.nut$Species== "Salix" & dat.nut$Treatment== "L"])
t.test(dat.nut$N_P[dat.nut$Species== "Larix" & dat.nut$Treatment== "H"],
dat.nut$N_P[dat.nut$Species== "Larix" & dat.nut$Treatment== "L"])
###C:P ###
cp.mean<- aggregate(dat.nut$C_P, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "mean"
, na.rm= TRUE)
colnames(cp.mean)<- c("Site", "Species", "CP")
cp.sd<- aggregate(dat.nut$C_P, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "sd",
na.rm= TRUE)
colnames(cp.sd)<- c("Site", "Species", "sd")
cp.l<- aggregate(dat.nut$C_P[!is.na(dat.nut$C_P)], by=list(dat.nut$Treatment[!is.na(dat.nut$C_P)],
dat.nut$Species[!is.na(dat.nut$C_P)]), FUN= "length")
colnames(cp.l)<- c("Site", "Species", "Length")
cp.mean$SE<- cp.sd$sd/sqrt(cp.l$Length)
t.test(dat.nut$C_P[dat.nut$Species== "Betula" & dat.nut$Treatment== "H"],
dat.nut$C_P[dat.nut$Species== "Betula" & dat.nut$Treatment== "L"])
t.test(dat.nut$C_P[dat.nut$Species== "Salix" & dat.nut$Treatment== "H"],
dat.nut$C_P[dat.nut$Species== "Salix" & dat.nut$Treatment== "L"])
t.test(dat.nut$C_P[dat.nut$Species== "Larix" & dat.nut$Treatment== "H"],
dat.nut$C_P[dat.nut$Species== "Larix" & dat.nut$Treatment== "L"])
###LMA###
lma.mean<- aggregate(dat.nut$LMA, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "mean"
)
colnames(lma.mean)<- c("Site", "Species", "LMA")
lma.sd<- aggregate(dat.nut$LMA, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "sd"
)
colnames(lma.sd)<- c("Site", "Species", "sd")
lma.l<- aggregate(dat.nut$LMA, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "length"
)
colnames(lma.l)<- c("Site", "Species", "Length")
lma.mean$SE<- lma.sd$sd/sqrt(lma.l$Length)
t.sd<- sqrt((((lma.l$Length[lma.mean$Species== "Betula" & lma.mean$Site== "H"]-1)
*(lma.sd$sd[lma.mean$Species== "Betula" & lma.mean$Site== "H"]^2))+
((lma.l$Length[lma.mean$Species== "Betula" & lma.mean$Site== "L"]-1)
*(lma.sd$sd[lma.mean$Species== "Betula" & lma.mean$Site== "L"]^2)))/
(lma.l$Length[lma.mean$Species== "Betula" & lma.mean$Site== "H"]+
lma.l$Length[lma.mean$Species== "Betula" & lma.mean$Site== "L"]-2))
t.check<- (lma.mean$LMA[lma.mean$Species== "Betula" & lma.mean$Site== "H"]-
lma.mean$LMA[lma.mean$Species== "Betula" & lma.mean$Site== "L"])/
(t.sd*sqrt((1/lma.l$Length[lma.mean$Species== "Betula" & lma.mean$Site== "H"])+
(1/lma.l$Length[lma.mean$Species== "Betula" & lma.mean$Site== "L"])))
t.check
t.test(dat.nut$LMA[dat.nut$Species== "Betula" & dat.nut$Treatment== "H"],
dat.nut$LMA[dat.nut$Species== "Betula" & dat.nut$Treatment== "L"])
t.test(dat.nut$LMA[dat.nut$Species== "Salix" & dat.nut$Treatment== "H"],
dat.nut$LMA[dat.nut$Species== "Salix" & dat.nut$Treatment== "L"])
t.test(dat.nut$LMA[dat.nut$Species== "Larix" & dat.nut$Treatment== "H"],
dat.nut$LMA[dat.nut$Species== "Larix" & dat.nut$Treatment== "L"])
#stomata density | /Nutrients.R | no_license | sfarmer35/bio_thesis | R | false | false | 7,855 | r | setwd("C:\\Users\\Sabrina\\Google Drive\\Colgate\\Senior Year\\BioThesis\\Data")
getwd()
dat.nut<-read.csv("nutrients_consolidated.csv")
###nitrogen###
n.mean<- aggregate(dat.nut$N, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "mean")
colnames(n.mean)<- c("Site", "Species", "N")
n.sd<- aggregate(dat.nut$N, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "sd")
colnames(n.sd)<- c("Site", "Species", "sd")
n.l<- aggregate(dat.nut$N, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "length")
colnames(n.l)<- c("Site", "Species", "Length")
n.mean$SE<- n.sd$sd/sqrt(n.l$Length)
t.test(dat.nut$N[dat.nut$Species== "Betula" & dat.nut$Treatment== "H"],
dat.nut$N[dat.nut$Species== "Betula" & dat.nut$Treatment== "L"])
t.test(dat.nut$N[dat.nut$Species== "Salix" & dat.nut$Treatment== "H"],
dat.nut$N[dat.nut$Species== "Salix" & dat.nut$Treatment== "L"])
t.test(dat.nut$N[dat.nut$Species== "Larix" & dat.nut$Treatment== "H"],
dat.nut$N[dat.nut$Species== "Larix" & dat.nut$Treatment== "L"])
####carbon###
C.mean<- aggregate(dat.nut$C, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "mean",
na.rm= TRUE)
colnames(C.mean)<- c("Site", "Species", "N")
C.sd<- aggregate(dat.nut$C, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "sd",
na.rm= TRUE)
colnames(C.sd)<- c("Site", "Species", "sd")
C.l<- aggregate(dat.nut$C[!is.na(dat.nut$C)], by=list(dat.nut$Treatment[!is.na(dat.nut$C)],
dat.nut$Species[!is.na(dat.nut$C)]), FUN= "length")
colnames(C.l)<- c("Site", "Species", "Length")
C.mean$SE<- C.sd$sd/sqrt(C.l$Length)
t.test(dat.nut$C[dat.nut$Species== "Betula" & dat.nut$Treatment== "H"],
dat.nut$C[dat.nut$Species== "Betula" & dat.nut$Treatment== "L"])
t.test(dat.nut$C[dat.nut$Species== "Salix" & dat.nut$Treatment== "H"],
dat.nut$C[dat.nut$Species== "Salix" & dat.nut$Treatment== "L"])
t.test(dat.nut$C[dat.nut$Species== "Larix" & dat.nut$Treatment== "H"],
dat.nut$C[dat.nut$Species== "Larix" & dat.nut$Treatment== "L"])
###C:N####
cn.mean<- aggregate(dat.nut$C_N, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "mean",
na.rm=TRUE)
colnames(cn.mean)<- c("Site", "Species", "CN")
cn.sd<- aggregate(dat.nut$C_N, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "sd",
na.rm=TRUE)
colnames(cn.sd)<- c("Site", "Species", "sd")
cn.l<- aggregate(dat.nut$C_N[!is.na(dat.nut$C_N)],by=list(dat.nut$Treatment[!is.na(dat.nut$C_N)],
dat.nut$Species[!is.na(dat.nut$C_N)]), FUN= "length")
colnames(cn.l)<- c("Site", "Species", "Length")
cn.mean$SE<- cn.sd$sd/sqrt(cn.l$Length)
t.test(dat.nut$C_N[dat.nut$Species== "Betula" & dat.nut$Treatment== "H"],
dat.nut$C_N[dat.nut$Species== "Betula" & dat.nut$Treatment== "L"])
t.test(dat.nut$C_N[dat.nut$Species== "Salix" & dat.nut$Treatment== "H"],
dat.nut$C_N[dat.nut$Species== "Salix" & dat.nut$Treatment== "L"])
t.test(dat.nut$C_N[dat.nut$Species== "Larix" & dat.nut$Treatment== "H"],
dat.nut$C_N[dat.nut$Species== "Larix" & dat.nut$Treatment== "L"])
###Phosphorus###
p.mean<- aggregate(dat.nut$P, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "mean", na.rm=TRUE)
colnames(p.mean)<- c("Site", "Species", "P")
p.sd<- aggregate(dat.nut$P, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "sd", na.rm=TRUE)
colnames(p.sd)<- c("Site", "Species", "sd")
p.l<- aggregate(dat.nut$P, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "length")
colnames(p.l)<- c("Site", "Species", "Length")
p.mean$SE<- p.sd$sd/sqrt(p.l$Length)
t.test(dat.nut$P[dat.nut$Species== "Betula" & dat.nut$Treatment== "H"],
dat.nut$P[dat.nut$Species== "Betula" & dat.nut$Treatment== "L"])
t.test(dat.nut$P[dat.nut$Species== "Salix" & dat.nut$Treatment== "H"],
dat.nut$P[dat.nut$Species== "Salix" & dat.nut$Treatment== "L"])
t.test(dat.nut$P[dat.nut$Species== "Larix" & dat.nut$Treatment== "H"],
dat.nut$P[dat.nut$Species== "Larix" & dat.nut$Treatment== "L"])
### N:P ###
np.mean<- aggregate(dat.nut$N_P, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "mean", na.rm=TRUE)
colnames(np.mean)<- c("Site", "Species", "NP")
np.sd<- aggregate(dat.nut$N_P, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "sd", na.rm=TRUE)
colnames(np.sd)<- c("Site", "Species", "sd")
np.l<- aggregate(dat.nut$N_P[!is.na(dat.nut$N_P)],by=list(dat.nut$Treatment[!is.na(dat.nut$N_P)],
dat.nut$Species[!is.na(dat.nut$N_P)]), FUN= "length")
colnames(np.l)<- c("Site", "Species", "Length")
np.mean$SE<- np.sd$sd/sqrt(np.l$Length)
t.test(dat.nut$N_P[dat.nut$Species== "Betula" & dat.nut$Treatment== "H"],
dat.nut$N_P[dat.nut$Species== "Betula" & dat.nut$Treatment== "L"])
t.test(dat.nut$N_P[dat.nut$Species== "Salix" & dat.nut$Treatment== "H"],
dat.nut$N_P[dat.nut$Species== "Salix" & dat.nut$Treatment== "L"])
t.test(dat.nut$N_P[dat.nut$Species== "Larix" & dat.nut$Treatment== "H"],
dat.nut$N_P[dat.nut$Species== "Larix" & dat.nut$Treatment== "L"])
###C:P ###
cp.mean<- aggregate(dat.nut$C_P, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "mean"
, na.rm= TRUE)
colnames(cp.mean)<- c("Site", "Species", "CP")
cp.sd<- aggregate(dat.nut$C_P, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "sd",
na.rm= TRUE)
colnames(cp.sd)<- c("Site", "Species", "sd")
cp.l<- aggregate(dat.nut$C_P[!is.na(dat.nut$C_P)], by=list(dat.nut$Treatment[!is.na(dat.nut$C_P)],
dat.nut$Species[!is.na(dat.nut$C_P)]), FUN= "length")
colnames(cp.l)<- c("Site", "Species", "Length")
cp.mean$SE<- cp.sd$sd/sqrt(cp.l$Length)
t.test(dat.nut$C_P[dat.nut$Species== "Betula" & dat.nut$Treatment== "H"],
dat.nut$C_P[dat.nut$Species== "Betula" & dat.nut$Treatment== "L"])
t.test(dat.nut$C_P[dat.nut$Species== "Salix" & dat.nut$Treatment== "H"],
dat.nut$C_P[dat.nut$Species== "Salix" & dat.nut$Treatment== "L"])
t.test(dat.nut$C_P[dat.nut$Species== "Larix" & dat.nut$Treatment== "H"],
dat.nut$C_P[dat.nut$Species== "Larix" & dat.nut$Treatment== "L"])
###LMA###
lma.mean<- aggregate(dat.nut$LMA, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "mean"
)
colnames(lma.mean)<- c("Site", "Species", "LMA")
lma.sd<- aggregate(dat.nut$LMA, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "sd"
)
colnames(lma.sd)<- c("Site", "Species", "sd")
lma.l<- aggregate(dat.nut$LMA, by=list(dat.nut$Treatment, dat.nut$Species), FUN= "length"
)
colnames(lma.l)<- c("Site", "Species", "Length")
lma.mean$SE<- lma.sd$sd/sqrt(lma.l$Length)
t.sd<- sqrt((((lma.l$Length[lma.mean$Species== "Betula" & lma.mean$Site== "H"]-1)
*(lma.sd$sd[lma.mean$Species== "Betula" & lma.mean$Site== "H"]^2))+
((lma.l$Length[lma.mean$Species== "Betula" & lma.mean$Site== "L"]-1)
*(lma.sd$sd[lma.mean$Species== "Betula" & lma.mean$Site== "L"]^2)))/
(lma.l$Length[lma.mean$Species== "Betula" & lma.mean$Site== "H"]+
lma.l$Length[lma.mean$Species== "Betula" & lma.mean$Site== "L"]-2))
t.check<- (lma.mean$LMA[lma.mean$Species== "Betula" & lma.mean$Site== "H"]-
lma.mean$LMA[lma.mean$Species== "Betula" & lma.mean$Site== "L"])/
(t.sd*sqrt((1/lma.l$Length[lma.mean$Species== "Betula" & lma.mean$Site== "H"])+
(1/lma.l$Length[lma.mean$Species== "Betula" & lma.mean$Site== "L"])))
t.check
t.test(dat.nut$LMA[dat.nut$Species== "Betula" & dat.nut$Treatment== "H"],
dat.nut$LMA[dat.nut$Species== "Betula" & dat.nut$Treatment== "L"])
t.test(dat.nut$LMA[dat.nut$Species== "Salix" & dat.nut$Treatment== "H"],
dat.nut$LMA[dat.nut$Species== "Salix" & dat.nut$Treatment== "L"])
t.test(dat.nut$LMA[dat.nut$Species== "Larix" & dat.nut$Treatment== "H"],
dat.nut$LMA[dat.nut$Species== "Larix" & dat.nut$Treatment== "L"])
#stomata density |
#setwd("C:/Users/phsrtcow/Documents/GitHub/os-sch-children-2021")
# 1-year lookback period
# load dataset
df <- read.csv("output/input_comorbidity.csv")
df <- df[, -which(colnames(df) == "patient_id")]
# set up blank matrix for results
results <- matrix(nrow = 12, ncol = 7)
# add conditions as row names
conditions <- colnames(df)[1 : nrow(results)]
conditions <- gsub("_gp", "", conditions)
conditions <- gsub("_", " ", conditions)
conditions <- gsub("mi", "Myocardial infarction", conditions)
conditions <- gsub("hf", "Heart failure ", conditions)
rownames(results) <- stringr::str_to_sentence(conditions)
# set column names
colnames(results) <- c("Number of patients with record in TPP or SUS",
"Number of patients with record in TPP only",
"% of patients with record in TPP only",
"Number of patients with record in TPP and SUS",
"% of patients with record in TPP and SUS",
"Number of patients with record in SUS only",
"% of patients with record in SUS only")
# add values to results table (rounded to nearest 10)
for (i in 1 : nrow(results)) {
results[i, 1] <- round(sum(df[, i] == 1 | df[, i + nrow(results)] == 1), -1)
results[i, 2] <- round(sum(df[, i] == 1 & df[, i + nrow(results)] == 0), -1)
results[i, 3] <- sum(df[, i] == 1 & df[, i + nrow(results)] == 0) / results[i, 1]
results[i, 4] <- round(sum(df[, i] == 1 & df[, i + nrow(results)] == 1), -1)
results[i, 5] <- sum(df[, i] == 1 & df[, i + nrow(results)] == 1) / results[i, 1]
results[i, 6] <- round(sum(df[, i] == 0 & df[, i + nrow(results)] == 1), -1)
results[i, 7] <- sum(df[, i] == 0 & df[, i + nrow(results)] == 1) / results[i, 1]
}
# format percentages
library(scales)
results[, c(3,5,7)] <- apply(results[, c(3,5,7)], 2, percent, accuracy = 0.1)
# save table
write.csv(results, file = "output/comorbidity_table.csv")
rm(list = ls())
# 5-year lookback period
# load dataset
df <- read.csv("output/input_comorbidity_5y.csv")
df <- df[, -which(colnames(df) == "patient_id")]
# set up blank matrix for results
results <- matrix(nrow = 12, ncol = 7)
# add conditions as row names
conditions <- colnames(df)[1 : nrow(results)]
conditions <- gsub("_gp", "", conditions)
conditions <- gsub("_", " ", conditions)
conditions <- gsub("mi", "Myocardial infarction", conditions)
conditions <- gsub("hf", "Heart failure ", conditions)
rownames(results) <- stringr::str_to_sentence(conditions)
# set column names
colnames(results) <- c("Number of patients with record in TPP or SUS",
"Number of patients with record in TPP only",
"% of patients with record in TPP only",
"Number of patients with record in TPP and SUS",
"% of patients with record in TPP and SUS",
"Number of patients with record in SUS only",
"% of patients with record in SUS only")
# add values to results table (rounded to nearest 10)
for (i in 1 : nrow(results)) {
results[i, 1] <- round(sum(df[, i] == 1 | df[, i + nrow(results)] == 1), -1)
results[i, 2] <- round(sum(df[, i] == 1 & df[, i + nrow(results)] == 0), -1)
results[i, 3] <- sum(df[, i] == 1 & df[, i + nrow(results)] == 0) / results[i, 1]
results[i, 4] <- round(sum(df[, i] == 1 & df[, i + nrow(results)] == 1), -1)
results[i, 5] <- sum(df[, i] == 1 & df[, i + nrow(results)] == 1) / results[i, 1]
results[i, 6] <- round(sum(df[, i] == 0 & df[, i + nrow(results)] == 1), -1)
results[i, 7] <- sum(df[, i] == 0 & df[, i + nrow(results)] == 1) / results[i, 1]
}
# format percentages
library(scales)
results[, c(3,5,7)] <- apply(results[, c(3,5,7)], 2, percent, accuracy = 0.1)
# save table
write.csv(results, file = "output/comorbidity_table_5y.csv")
| /analysis/comorbidity.R | permissive | opensafely/os-sch-children-2021 | R | false | false | 3,862 | r | #setwd("C:/Users/phsrtcow/Documents/GitHub/os-sch-children-2021")
# 1-year lookback period
# load dataset
df <- read.csv("output/input_comorbidity.csv")
df <- df[, -which(colnames(df) == "patient_id")]
# set up blank matrix for results
results <- matrix(nrow = 12, ncol = 7)
# add conditions as row names
conditions <- colnames(df)[1 : nrow(results)]
conditions <- gsub("_gp", "", conditions)
conditions <- gsub("_", " ", conditions)
conditions <- gsub("mi", "Myocardial infarction", conditions)
conditions <- gsub("hf", "Heart failure ", conditions)
rownames(results) <- stringr::str_to_sentence(conditions)
# set column names
colnames(results) <- c("Number of patients with record in TPP or SUS",
"Number of patients with record in TPP only",
"% of patients with record in TPP only",
"Number of patients with record in TPP and SUS",
"% of patients with record in TPP and SUS",
"Number of patients with record in SUS only",
"% of patients with record in SUS only")
# add values to results table (rounded to nearest 10)
for (i in 1 : nrow(results)) {
results[i, 1] <- round(sum(df[, i] == 1 | df[, i + nrow(results)] == 1), -1)
results[i, 2] <- round(sum(df[, i] == 1 & df[, i + nrow(results)] == 0), -1)
results[i, 3] <- sum(df[, i] == 1 & df[, i + nrow(results)] == 0) / results[i, 1]
results[i, 4] <- round(sum(df[, i] == 1 & df[, i + nrow(results)] == 1), -1)
results[i, 5] <- sum(df[, i] == 1 & df[, i + nrow(results)] == 1) / results[i, 1]
results[i, 6] <- round(sum(df[, i] == 0 & df[, i + nrow(results)] == 1), -1)
results[i, 7] <- sum(df[, i] == 0 & df[, i + nrow(results)] == 1) / results[i, 1]
}
# format percentages
library(scales)
results[, c(3,5,7)] <- apply(results[, c(3,5,7)], 2, percent, accuracy = 0.1)
# save table
write.csv(results, file = "output/comorbidity_table.csv")
rm(list = ls())
# 5-year lookback period
# load dataset
df <- read.csv("output/input_comorbidity_5y.csv")
df <- df[, -which(colnames(df) == "patient_id")]
# set up blank matrix for results
results <- matrix(nrow = 12, ncol = 7)
# add conditions as row names
conditions <- colnames(df)[1 : nrow(results)]
conditions <- gsub("_gp", "", conditions)
conditions <- gsub("_", " ", conditions)
conditions <- gsub("mi", "Myocardial infarction", conditions)
conditions <- gsub("hf", "Heart failure ", conditions)
rownames(results) <- stringr::str_to_sentence(conditions)
# set column names
colnames(results) <- c("Number of patients with record in TPP or SUS",
"Number of patients with record in TPP only",
"% of patients with record in TPP only",
"Number of patients with record in TPP and SUS",
"% of patients with record in TPP and SUS",
"Number of patients with record in SUS only",
"% of patients with record in SUS only")
# add values to results table (rounded to nearest 10)
for (i in 1 : nrow(results)) {
results[i, 1] <- round(sum(df[, i] == 1 | df[, i + nrow(results)] == 1), -1)
results[i, 2] <- round(sum(df[, i] == 1 & df[, i + nrow(results)] == 0), -1)
results[i, 3] <- sum(df[, i] == 1 & df[, i + nrow(results)] == 0) / results[i, 1]
results[i, 4] <- round(sum(df[, i] == 1 & df[, i + nrow(results)] == 1), -1)
results[i, 5] <- sum(df[, i] == 1 & df[, i + nrow(results)] == 1) / results[i, 1]
results[i, 6] <- round(sum(df[, i] == 0 & df[, i + nrow(results)] == 1), -1)
results[i, 7] <- sum(df[, i] == 0 & df[, i + nrow(results)] == 1) / results[i, 1]
}
# format percentages
library(scales)
results[, c(3,5,7)] <- apply(results[, c(3,5,7)], 2, percent, accuracy = 0.1)
# save table
write.csv(results, file = "output/comorbidity_table_5y.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SSplotBiology.R
\name{SSplotBiology}
\alias{SSplotBiology}
\title{Plot biology related quantities.}
\usage{
SSplotBiology(replist, plot = TRUE, print = FALSE, add = FALSE,
subplots = 1:17, seas = 1, morphs = NULL, colvec = c("red", "blue",
"grey20"), ltyvec = c(1, 2), shadealpha = 0.1, imageplot_text = FALSE,
imageplot_text_round = 0, legendloc = "topleft", plotdir = "default",
labels = c("Length (cm)", "Age (yr)", "Maturity",
"Mean weight (kg) in last year", "Spawning output",
"Length (cm, beginning of the year)", "Natural mortality",
"Female weight (kg)", "Female length (cm)", "Fecundity",
"Default fecundity label", "Year", "Hermaphroditism transition rate",
"Fraction females by age at equilibrium"), pwidth = 6.5, pheight = 5,
punits = "in", res = 300, ptsize = 10, cex.main = 1,
mainTitle = TRUE, verbose = TRUE)
}
\arguments{
\item{replist}{List created by \code{SS_output}}
\item{plot}{Plot to active plot device?}
\item{print}{Print to PNG files?}
\item{add}{add to existing plot}
\item{subplots}{vector controlling which subplots to create}
\item{seas}{which season to plot (values other than 1 only work in
seasonal models but but maybe not fully implemented)}
\item{morphs}{Which morphs to plot (if more than 1 per sex)? By default this
will be replist$mainmorphs}
\item{colvec}{vector of length 3 with colors for various points/lines}
\item{ltyvec}{vector of length 2 with lty for females/males in growth plots
values can be applied to other plots in the future}
\item{shadealpha}{Transparency parameter used to make default shadecol
values (see ?rgb for more info)}
\item{imageplot_text}{Whether to add numerical text to the image plots
when using weight at age. Defaults to FALSE.}
\item{imageplot_text_round}{The number of significant digits to which
the image plot text is rounded. Defaults to 0, meaning whole numbers. If
all your values are small and there's no contrast in the text, you might
want to make this 1 or 2.}
\item{legendloc}{Location of legend (see ?legend for more info)}
\item{plotdir}{Directory where PNG files will be written. by default it will
be the directory where the model was run.}
\item{labels}{Vector of labels for plots (titles and axis labels)}
\item{pwidth}{Width of plot}
\item{pheight}{Height of plot}
\item{punits}{Units for PNG file}
\item{res}{Resolution for PNG file}
\item{ptsize}{Point size for PNG file}
\item{cex.main}{Character expansion for plot titles}
\item{mainTitle}{Logical indicating if a title should be included at the top}
\item{verbose}{Return updates of function progress to the R GUI?}
}
\description{
Plot biology related quantities from Stock Synthesis model output, including
mean weight, maturity, fecundity, and spawning output.
}
\seealso{
\code{\link{SS_plots}}, \code{\link{SS_output}}
}
\author{
Ian Stewart, Ian Taylor
}
| /man/SSplotBiology.Rd | no_license | GwladysLambert/r4ss | R | false | true | 2,932 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SSplotBiology.R
\name{SSplotBiology}
\alias{SSplotBiology}
\title{Plot biology related quantities.}
\usage{
SSplotBiology(replist, plot = TRUE, print = FALSE, add = FALSE,
subplots = 1:17, seas = 1, morphs = NULL, colvec = c("red", "blue",
"grey20"), ltyvec = c(1, 2), shadealpha = 0.1, imageplot_text = FALSE,
imageplot_text_round = 0, legendloc = "topleft", plotdir = "default",
labels = c("Length (cm)", "Age (yr)", "Maturity",
"Mean weight (kg) in last year", "Spawning output",
"Length (cm, beginning of the year)", "Natural mortality",
"Female weight (kg)", "Female length (cm)", "Fecundity",
"Default fecundity label", "Year", "Hermaphroditism transition rate",
"Fraction females by age at equilibrium"), pwidth = 6.5, pheight = 5,
punits = "in", res = 300, ptsize = 10, cex.main = 1,
mainTitle = TRUE, verbose = TRUE)
}
\arguments{
\item{replist}{List created by \code{SS_output}}
\item{plot}{Plot to active plot device?}
\item{print}{Print to PNG files?}
\item{add}{add to existing plot}
\item{subplots}{vector controlling which subplots to create}
\item{seas}{which season to plot (values other than 1 only work in
seasonal models but but maybe not fully implemented)}
\item{morphs}{Which morphs to plot (if more than 1 per sex)? By default this
will be replist$mainmorphs}
\item{colvec}{vector of length 3 with colors for various points/lines}
\item{ltyvec}{vector of length 2 with lty for females/males in growth plots
values can be applied to other plots in the future}
\item{shadealpha}{Transparency parameter used to make default shadecol
values (see ?rgb for more info)}
\item{imageplot_text}{Whether to add numerical text to the image plots
when using weight at age. Defaults to FALSE.}
\item{imageplot_text_round}{The number of significant digits to which
the image plot text is rounded. Defaults to 0, meaning whole numbers. If
all your values are small and there's no contrast in the text, you might
want to make this 1 or 2.}
\item{legendloc}{Location of legend (see ?legend for more info)}
\item{plotdir}{Directory where PNG files will be written. by default it will
be the directory where the model was run.}
\item{labels}{Vector of labels for plots (titles and axis labels)}
\item{pwidth}{Width of plot}
\item{pheight}{Height of plot}
\item{punits}{Units for PNG file}
\item{res}{Resolution for PNG file}
\item{ptsize}{Point size for PNG file}
\item{cex.main}{Character expansion for plot titles}
\item{mainTitle}{Logical indicating if a title should be included at the top}
\item{verbose}{Return updates of function progress to the R GUI?}
}
\description{
Plot biology related quantities from Stock Synthesis model output, including
mean weight, maturity, fecundity, and spawning output.
}
\seealso{
\code{\link{SS_plots}}, \code{\link{SS_output}}
}
\author{
Ian Stewart, Ian Taylor
}
|
## ----install-EML-package, results="hide", warning=FALSE------------------
#install R EML tools
#library("devtools")
#install_github("ropensci/EML", build=FALSE, dependencies=c("DEPENDS", "IMPORTS"))
#devtools::install_github(c("hadley/purrr", "ropensci/EML"))
#call package
library("EML")
library("purrr")
library("dplyr")
#data location
#http://harvardforest.fas.harvard.edu:8080/exist/apps/datasets/showData.html?id=hf001
#table 4 http://harvardforest.fas.harvard.edu/data/p00/hf001/hf001-04-monthly-m.csv
## ----read-eml------------------------------------------------------------
#import EML from Harvard Forest Met Data
eml_HARV <- eml_read("http://harvardforest.fas.harvard.edu/data/eml/hf001.xml")
#view size of object
object.size(eml_HARV)
#view the object class
class(eml_HARV)
## ----view-eml-content----------------------------------------------------
#view the contact name listed in the file
#this works well!
eml_get(eml_HARV,"contact")
#grab all keywords in the file
eml_get(eml_HARV,"keywords")
#figure out the extent & temporal coverage of the data
eml_get(eml_HARV,"coverage")
## ----view-dataset-eml----------------------------------------------------
#view dataset abstract (description)
eml_HARV@dataset@abstract
#the above might be easier to read if we force line breaks!
#we can use strwrap to do this
#write out abstract - forcing line breaks
strwrap(eml_HARV@dataset@abstract, width = 80)
## ----find-geographic-coverage--------------------------------------------
#view geographic coverage
eml_HARV@dataset@coverage@geographicCoverage
## ----map-location, warning=FALSE, message=FALSE--------------------------
# grab x coordinate
XCoord <- eml_HARV@dataset@coverage@geographicCoverage@boundingCoordinates@westBoundingCoordinate
#grab y coordinate
YCoord <- eml_HARV@dataset@coverage@geographicCoverage@boundingCoordinates@northBoundingCoordinate
library(ggmap)
#map <- get_map(location='Harvard', maptype = "terrain")
map <- get_map(location='massachusetts', maptype = "toner", zoom =8)
ggmap(map, extent=TRUE) +
geom_point(aes(x=XCoord,y=YCoord),
color="darkred", size=6, pch=18)
## ----view-data-tables----------------------------------------------------
#we can view the data table name and description as follows
eml_HARV@dataset@dataTable[[1]]@entityName
eml_HARV@dataset@dataTable[[1]]@entityDescription
#view download path
eml_HARV@dataset@dataTable[[1]]@physical@distribution@online@url
## ----create-datatable-df-------------------------------------------------
#create an object that just contains dataTable level attributes
all.tables <- eml_HARV@dataset@dataTable
#use purrrr to generate a data.frame that contains the attrName and Def for each column
dataTable.desc <- purrr::map_df(all.tables,
function(x) data_frame(attribute = x@entityName,
description = x@entityDescription,
download.path = x@physical@distribution@online@url))
#view table descriptions
dataTable.desc
#view just the paths (they are too long to render in the output above)
head(dataTable.desc[3])
#how many rows (data tables) are in the data_frame?
nrow(dataTable.desc)
## ----data-table-attr-----------------------------------------------------
#create an object that contains metadata for table 8 only
EML.hr.dataTable <- eml_HARV@dataset@dataTable[[8]]
#Check out the table's name - make sure it's the right table!
EML.hr.dataTable@entityName
#what information does this data table contain?
EML.hr.dataTable@entityDescription
#how is the text file delimited?
EML.hr.dataTable@physical
#view table id
EML.hr.dataTable@id
#this is the download URL for the file.
EML.hr.dataTable@physical@distribution@online@url
## ----view-15min-attr-list------------------------------------------------
#get list of measurements for the 10th data table in the EML file
EML.hr.attr <- EML.hr.dataTable@attributeList@attribute
#the first column is the date field
EML.hr.attr[[1]]
#view the column name and description for the first column
EML.hr.attr[[1]]@attributeName
EML.hr.attr[[1]]@attributeDefinition
## ----view-monthly-attrs--------------------------------------------------
#list of all attribute description and metadata
#EML.15min.attr
# use a split-apply-combine approach to parse the attribute data
# and create a data_frame with only the attribute name and description
#dplyr approach
#do.call(rbind,
# lapply(EML.15min.attr, function(x) data.frame(column.name = x@attributeName,
# definition = x@attributeDefinition)))
#use purrrr to generate a dplyr data_frame that contains the attrName
#and Def for each column
EML.hr.attr.dt8 <- purrr::map_df(EML.hr.attr,
function(x) data_frame(attribute = x@attributeName,
description = x@attributeDefinition))
EML.hr.attr.dt8
#view first 6 rows for each column
head(EML.hr.attr.dt8$attribute)
head(EML.hr.attr.dt8$description)
## ----download-data-------------------------------------------------------
#view url
EML.hr.dataTable@physical@distribution@online@url
#Read in csv (data table 8)
month.avg.m.HARV <- read.csv(EML.hr.dataTable@physical@distribution@online@url,
stringsAsFactors = FALSE)
str(month.avg.m.HARV)
# view table structure
EML.hr.dataTable@physical
## ----EML-Structure-------------------------------------------------------
###THIS IS THE WRONG OUTPUT FOR SOME REASON??
#what are the names of those tables?
data.paths <- eml_get(obj,"csv_filepaths")
data.paths
data.paths[4]
| /_posts/EML/2015-12-12-Intro-to-EML.R | no_license | lstanish/NEON-Lesson-Building-Data-Skills | R | false | false | 5,604 | r | ## ----install-EML-package, results="hide", warning=FALSE------------------
#install R EML tools
#library("devtools")
#install_github("ropensci/EML", build=FALSE, dependencies=c("DEPENDS", "IMPORTS"))
#devtools::install_github(c("hadley/purrr", "ropensci/EML"))
#call package
library("EML")
library("purrr")
library("dplyr")
#data location
#http://harvardforest.fas.harvard.edu:8080/exist/apps/datasets/showData.html?id=hf001
#table 4 http://harvardforest.fas.harvard.edu/data/p00/hf001/hf001-04-monthly-m.csv
## ----read-eml------------------------------------------------------------
#import EML from Harvard Forest Met Data
eml_HARV <- eml_read("http://harvardforest.fas.harvard.edu/data/eml/hf001.xml")
#view size of object
object.size(eml_HARV)
#view the object class
class(eml_HARV)
## ----view-eml-content----------------------------------------------------
#view the contact name listed in the file
#this works well!
eml_get(eml_HARV,"contact")
#grab all keywords in the file
eml_get(eml_HARV,"keywords")
#figure out the extent & temporal coverage of the data
eml_get(eml_HARV,"coverage")
## ----view-dataset-eml----------------------------------------------------
#view dataset abstract (description)
eml_HARV@dataset@abstract
#the above might be easier to read if we force line breaks!
#we can use strwrap to do this
#write out abstract - forcing line breaks
strwrap(eml_HARV@dataset@abstract, width = 80)
## ----find-geographic-coverage--------------------------------------------
#view geographic coverage
eml_HARV@dataset@coverage@geographicCoverage
## ----map-location, warning=FALSE, message=FALSE--------------------------
# grab x coordinate
XCoord <- eml_HARV@dataset@coverage@geographicCoverage@boundingCoordinates@westBoundingCoordinate
#grab y coordinate
YCoord <- eml_HARV@dataset@coverage@geographicCoverage@boundingCoordinates@northBoundingCoordinate
library(ggmap)
#map <- get_map(location='Harvard', maptype = "terrain")
map <- get_map(location='massachusetts', maptype = "toner", zoom =8)
ggmap(map, extent=TRUE) +
geom_point(aes(x=XCoord,y=YCoord),
color="darkred", size=6, pch=18)
## ----view-data-tables----------------------------------------------------
#we can view the data table name and description as follows
eml_HARV@dataset@dataTable[[1]]@entityName
eml_HARV@dataset@dataTable[[1]]@entityDescription
#view download path
eml_HARV@dataset@dataTable[[1]]@physical@distribution@online@url
## ----create-datatable-df-------------------------------------------------
#create an object that just contains dataTable level attributes
all.tables <- eml_HARV@dataset@dataTable
#use purrrr to generate a data.frame that contains the attrName and Def for each column
dataTable.desc <- purrr::map_df(all.tables,
function(x) data_frame(attribute = x@entityName,
description = x@entityDescription,
download.path = x@physical@distribution@online@url))
#view table descriptions
dataTable.desc
#view just the paths (they are too long to render in the output above)
head(dataTable.desc[3])
#how many rows (data tables) are in the data_frame?
nrow(dataTable.desc)
## ----data-table-attr-----------------------------------------------------
#create an object that contains metadata for table 8 only
EML.hr.dataTable <- eml_HARV@dataset@dataTable[[8]]
#Check out the table's name - make sure it's the right table!
EML.hr.dataTable@entityName
#what information does this data table contain?
EML.hr.dataTable@entityDescription
#how is the text file delimited?
EML.hr.dataTable@physical
#view table id
EML.hr.dataTable@id
#this is the download URL for the file.
EML.hr.dataTable@physical@distribution@online@url
## ----view-15min-attr-list------------------------------------------------
#get list of measurements for the 10th data table in the EML file
EML.hr.attr <- EML.hr.dataTable@attributeList@attribute
#the first column is the date field
EML.hr.attr[[1]]
#view the column name and description for the first column
EML.hr.attr[[1]]@attributeName
EML.hr.attr[[1]]@attributeDefinition
## ----view-monthly-attrs--------------------------------------------------
#list of all attribute description and metadata
#EML.15min.attr
# use a split-apply-combine approach to parse the attribute data
# and create a data_frame with only the attribute name and description
#dplyr approach
#do.call(rbind,
# lapply(EML.15min.attr, function(x) data.frame(column.name = x@attributeName,
# definition = x@attributeDefinition)))
#use purrrr to generate a dplyr data_frame that contains the attrName
#and Def for each column
EML.hr.attr.dt8 <- purrr::map_df(EML.hr.attr,
function(x) data_frame(attribute = x@attributeName,
description = x@attributeDefinition))
EML.hr.attr.dt8
#view first 6 rows for each column
head(EML.hr.attr.dt8$attribute)
head(EML.hr.attr.dt8$description)
## ----download-data-------------------------------------------------------
#view url
EML.hr.dataTable@physical@distribution@online@url
#Read in csv (data table 8)
month.avg.m.HARV <- read.csv(EML.hr.dataTable@physical@distribution@online@url,
stringsAsFactors = FALSE)
str(month.avg.m.HARV)
# view table structure
EML.hr.dataTable@physical
## ----EML-Structure-------------------------------------------------------
###THIS IS THE WRONG OUTPUT FOR SOME REASON??
#what are the names of those tables?
data.paths <- eml_get(obj,"csv_filepaths")
data.paths
data.paths[4]
|
context("wflow_html")
# Test wflow_html --------------------------------------------------------------
test_that("wflow_html sets custom knitr chunk options", {
skip_on_cran()
# The R Markdown file opts_chunk.Rmd reads the options and exports to an RDS
# file
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/opts_chunk.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
observed <- readRDS(file.path(tmp_dir, "opts_chunk.rds"))
expect_identical(observed$comment, NA)
expect_identical(observed$fig.align, "center")
expect_identical(observed$tidy, FALSE)
})
test_that("wflow_html can set knit_root_dir in YAML header", {
skip_on_cran()
# The R Markdown file knit_root_dir.Rmd creates a file knit_root_dir.txt in
# its working directory, which is one upstream from its file location.
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
sub_dir <- file.path(tmp_dir, "sub_dir")
fs::dir_create(sub_dir)
rmd <- file.path(sub_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/knit_root_dir.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
expect_false(fs::file_exists(file.path(sub_dir, "knit_root_dir.txt")))
expect_true(fs::file_exists(file.path(tmp_dir, "knit_root_dir.txt")))
})
test_that("knit_root_dir can be overridden by command-line render argument", {
skip_on_cran()
# The R Markdown file knit_root_dir.Rmd creates a file knit_root_dir.txt in
# its working directory, which is one upstream from its file location.
# However, this is overriden by passing the directory that contains the file
# directly to render.
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
sub_dir <- file.path(tmp_dir, "sub_dir")
fs::dir_create(sub_dir)
rmd <- file.path(sub_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/knit_root_dir.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE, knit_root_dir = dirname(rmd))
expect_true(fs::file_exists(html))
expect_true(fs::file_exists(file.path(sub_dir, "knit_root_dir.txt")))
expect_false(fs::file_exists(file.path(tmp_dir, "knit_root_dir.txt")))
})
test_that("wflow_html can change the sesssioninfo from the YAML header", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
lines <- c("---",
"output: workflowr::wflow_html",
"workflowr:",
" sessioninfo: \"devtools::session_info()\"",
"---",
"",
"`r 1 + 1`")
writeLines(lines, rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
expect_true(sum(stringr::str_detect(html_lines, "devtools::session_info")) == 1)
})
test_that("wflow_html can change the seed from the YAML header", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
lines <- c("---",
"output: workflowr::wflow_html",
"workflowr:",
" seed: 1",
"---",
"",
"`r round(rnorm(1), 5)`")
writeLines(lines, rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
set.seed(1)
expect_true(sum(stringr::str_detect(html_lines,
as.character(round(rnorm(1), 5)))) == 1)
})
test_that("wflow_html does not require a YAML header", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
lines <- c("some text")
writeLines(lines, rmd)
html <- rmarkdown::render(rmd, output_format = wflow_html(), quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
expect_true(sum(stringr::str_detect(html_lines, "some text")) == 1)
})
test_that("wflow_html reads _workflowr.yml in the same directory, but can be overidden", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
# Set seed of 5 in _workflowr.yml
writeLines("seed: 5", con = file.path(tmp_dir, "_workflowr.yml"))
rmd <- file.path(tmp_dir, "file.Rmd")
lines <- c("---",
"output: workflowr::wflow_html",
"---",
"",
"`r round(rnorm(1), 5)`")
writeLines(lines, rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
set.seed(5)
expect_true(sum(stringr::str_detect(html_lines,
as.character(round(rnorm(1), 5)))) == 1)
# Override _workflowr.yml by specifying in YAML header
lines <- c("---",
"output: workflowr::wflow_html",
"workflowr:",
" seed: 1",
"---",
"",
"`r round(rnorm(1), 5)`")
writeLines(lines, rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
html_lines <- readLines(html)
set.seed(1)
expect_true(sum(stringr::str_detect(html_lines,
as.character(round(rnorm(1), 5)))) == 1)
})
test_that("The default knit_root_dir for a workflowr project is the root directory", {
skip_on_cran()
tmp_dir <- tempfile()
tmp_start <- wflow_start(tmp_dir, change_wd = FALSE, user.name = "Test Name",
user.email = "test@email")
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "analysis", "file.Rmd")
lines <- c("`r getwd()`")
writeLines(lines, rmd)
html <- rmarkdown::render_site(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
expect_true(sum(stringr::str_detect(html_lines, tmp_dir)) == 1)
})
test_that("The default knit_root_dir for a workflowr project can be analysis/", {
skip_on_cran()
tmp_dir <- tempfile()
tmp_start <- wflow_start(tmp_dir, change_wd = FALSE, user.name = "Test Name",
user.email = "test@email")
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
wflow_yml <- file.path(tmp_dir, "_workflowr.yml")
wflow_yml_lines <- readLines(wflow_yml)
wflow_yml_lines <- stringr::str_replace(wflow_yml_lines,
"knit_root_dir: \".\"",
"knit_root_dir: \"analysis\"")
writeLines(wflow_yml_lines, wflow_yml)
rmd <- file.path(tmp_dir, "analysis", "file.Rmd")
lines <- c("`r getwd()`")
writeLines(lines, rmd)
html <- rmarkdown::render_site(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
expect_true(sum(stringr::str_detect(html_lines, basename(rmd))) == 1)
})
test_that("wflow_html can insert figures with or without Git repo present", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
lines <- c("---",
"output: workflowr::wflow_html",
"---",
"",
"```{r chunkname}",
"plot(1:10)",
"```")
writeLines(lines, rmd)
# Without Git repo
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
expect_true(fs::file_exists(file.path(tmp_dir, "figure", basename(rmd),
"chunkname-1.png")))
html_lines <- readLines(html)
# Because it isn't a website, the image gets embedded as a base64 image
expect_true(sum(stringr::str_detect(html_lines,
"<img src=\"data:image/png;base64,")) == 1)
fs::file_delete(html)
# With Git repo
git2r::init(tmp_dir)
html <- rmarkdown::render(rmd, quiet = TRUE)
html_lines <- readLines(html)
expect_true(sum(stringr::str_detect(html_lines,
"<img src=\"data:image/png;base64,")) == 1)
})
test_that("github URL in _workflowr.yml overrides git remote", {
skip_on_cran()
tmp_dir <- tempfile()
tmp_start <- wflow_start(tmp_dir, change_wd = FALSE, user.name = "Test Name",
user.email = "test@email")
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
# Add remote
tmp_remote <- wflow_git_remote("origin", "testuser", "testrepo",
verbose = FALSE, project = tmp_dir)
# Define GitHub URL in _workflowr.yml
cat("github: https://github.com/upstream/testrepo\n",
file = file.path(tmp_dir, "_workflowr.yml"), append = TRUE)
rmd <- file.path(tmp_dir, "analysis", "index.Rmd")
html <- rmarkdown::render_site(rmd, quiet = TRUE)
html_lines <- readLines(html)
expect_true(any(stringr::str_detect(html_lines,
"https://github.com/upstream/testrepo")))
expect_false(any(stringr::str_detect(html_lines,
"https://github.com/testuser/testrepo")))
})
test_that("wflow_html inserts custom header and footer", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
lines <- c("---",
"output: workflowr::wflow_html",
"---")
writeLines(lines, rmd)
html <- rmarkdown::render(rmd, quiet = TRUE,
# These are added by wflow_site(), which I am
# purposefully skipping for these tests. In order
# for the browser tab icon to be a URL and not a
# binary blob, have to manually set to
# self_contained
output_options = list(self_contained = FALSE,
lib_dir = "site_libs"))
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
html_complete <- paste(html_lines, collapse = "\n")
expect_true(stringr::str_detect(html_complete,
stringr::fixed(workflowr:::includes$header)))
expect_true(stringr::str_detect(html_complete,
stringr::fixed(workflowr:::includes$footer)))
})
test_that("wflow_html allows users to add additional files for pandoc includes", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
css <- file.path(tmp_dir, "style.html")
style <- "p {color: red}"
writeLines(c("<style>", style, "</style>"), con = css)
rmd <- file.path(tmp_dir, "file.Rmd")
lines <- c("---",
"output:",
" workflowr::wflow_html:",
" includes:",
" in_header: style.html",
"---",
"```{r}",
"plot(1:10)",
"```")
writeLines(lines, rmd)
html <- rmarkdown::render(rmd, quiet = TRUE,
output_options = list(self_contained = FALSE,
lib_dir = "site_libs"))
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
html_complete <- paste(html_lines, collapse = "\n")
expect_true(stringr::str_detect(html_complete,
stringr::fixed(workflowr:::includes$header)))
expect_true(stringr::str_detect(html_complete,
stringr::fixed(workflowr:::includes$footer)))
expect_true(stringr::str_detect(html_complete,
stringr::fixed(style)))
})
test_that("wflow_html respects html_document() argument keep_md", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/keep_md.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
md <- fs::path_ext_set(html, "md")
expect_true(fs::file_exists(md))
})
test_that("wflow_html preserves knitr chunk option collapse", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/collapse.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
html_complete <- paste(html_lines, collapse = "\n")
# Test collapse=TRUE
expected_collapse <- "getwd\\(\\)\n#\\s"
expect_true(stringr::str_detect(html_complete, expected_collapse))
})
test_that("wflow_html preserves knitr chunk option indent", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/indent.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
md <- fs::path_ext_set(html, "md")
expect_true(fs::file_exists(md))
md_lines <- readLines(md)
expect_true(" 1 + 1" %in% md_lines)
expect_true(" [1] 2" %in% md_lines)
})
test_that("wflow_html adds spacing between final text and sinfo button", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/sessioninfo-spacing.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
final_sentence <- stringr::str_which(html_lines, "final sentence")
expect_identical(html_lines[final_sentence], "<p>final sentence</p>")
expect_identical(html_lines[final_sentence + 1], "<br>")
expect_identical(html_lines[final_sentence + 2], "<p>")
expect_identical(stringr::str_sub(html_lines[final_sentence + 3], 2, 7),
"button")
})
# Test plot_hook ---------------------------------------------------------------
test_that("wflow_html sends warning if fig.path is set by user", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
# If set in only only one chunk, only one warning should be generated
rmd <- file.path(tmp_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/fig-path-one-chunk.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
warnings_fig.path <- stringr::str_detect(html_lines, "<code>fig.path</code>")
expect_identical(sum(warnings_fig.path), 1L)
# If set globally, a warning should be generated for each plot (in this case 3)
rmd2 <- file.path(tmp_dir, "file2.Rmd")
fs::file_copy("files/test-wflow_html/fig-path-all-chunks.Rmd", rmd2)
html2 <- rmarkdown::render(rmd2, quiet = TRUE)
expect_true(fs::file_exists(html2))
html_lines2 <- readLines(html2)
warnings_fig.path2 <- stringr::str_detect(html_lines2, "<code>fig.path</code>")
expect_identical(sum(warnings_fig.path2), 3L)
})
test_that("wflow_html sends warning for outdated version of reticulate", {
skip_on_cran()
test_reticulate <-
requireNamespace("reticulate", quietly = TRUE) &&
reticulate::py_available(initialize = TRUE) &&
reticulate::py_module_available("matplotlib")
if (!test_reticulate) skip("Python not configured to test reticulate")
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/python-figure.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
warnings_reticulate <- stringr::str_detect(html_lines,
"<a href=\"https://cran.r-project.org/package=reticulate\">reticulate</a>")
if (utils::packageVersion("reticulate") < "1.14.9000") {
expect_identical(sum(warnings_reticulate), 2L)
} else {
expect_identical(sum(warnings_reticulate), 0L)
}
# fig.path warning should also still be sent
warnings_fig.path <- stringr::str_detect(html_lines, "<code>fig.path</code>")
expect_identical(sum(warnings_fig.path), 1L)
})
# Test cache_hook --------------------------------------------------------------
test_that("wflow_html sends warning if chunk caches without autodep", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
# If set in only only one chunk, only one warning should be generated
#
# one chunk has cache=TRUE (warning), another has cache=TRUE && autodep=TRUE
# (no warning), and the third has no options set (no warning).
rmd <- file.path(tmp_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/cache-one-chunk.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
expect_true(sum(stringr::str_detect(html_lines, "<strong>Warning:</strong>")) == 1)
# If cache=TRUE is set globally, a warning should be generated for each chunk
# that does not have autodep=TRUE.
#
# Expect 3 b/c 1 of 3 chunks has autodep=TRUE, plus added sessioninfo chunk
# (3 - 1 + 1)
rmd2 <- file.path(tmp_dir, "file2.Rmd")
fs::file_copy("files/test-wflow_html/cache-all-chunks.Rmd", rmd2)
html2 <- rmarkdown::render(rmd2, quiet = TRUE)
expect_true(fs::file_exists(html2))
html_lines2 <- readLines(html2)
expect_true(sum(stringr::str_detect(html_lines2, "<strong>Warning:</strong>")) == 3)
})
# Test add_bibliography --------------------------------------------------------
test_that("add_bibliography only adds bibliography when necessary", {
# Test by directly passing text. The next test block uses actual files
expected <- c("", "<div id=\"refs\"></div>", "", "")
expect_identical(workflowr:::add_bibliography("", ""), expected)
expect_identical(workflowr:::add_bibliography("", "<div id=\"refs\"></div>"),
"")
expect_identical(workflowr:::add_bibliography("", "<div id=\'refs\'></div>"),
"")
})
test_that("add_bibliography adds bibliography to files", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
# Copy test.bib
fs::file_copy("files/test-wflow_html/test.bib", file.path(tmp_dir, "test.bib"))
# Don't add bibliography when not specified in YAML header
bib_none <- file.path(tmp_dir, "bib-none.Rmd")
fs::file_copy("files/example.Rmd", bib_none)
bib_none_html <- rmarkdown::render(bib_none, quiet = TRUE)
expect_false(any(stringr::str_detect(readLines(bib_none_html),
"<div.*id=\"refs\".*>")))
# Add bibliography before session information
bib_add <- file.path(tmp_dir, "bib-add.Rmd")
fs::file_copy("files/test-wflow_html/bib-add.Rmd", bib_add)
bib_add_html <- rmarkdown::render(bib_add, quiet = TRUE)
bib_add_lines <- readLines(bib_add_html)
refs_line <- stringr::str_which(bib_add_lines, "<div.*id=\"refs\".*>")
sinfo_line <- stringr::str_which(bib_add_lines, "sessionInfo()")
expect_true(refs_line < sinfo_line)
# Don't add if user already manually added (double quotes)
bib_dont_add_1 <- file.path(tmp_dir, "bib-dont-add-1.Rmd")
fs::file_copy("files/test-wflow_html/bib-dont-add-1.Rmd", bib_dont_add_1)
bib_dont_add_1_html <- rmarkdown::render(bib_dont_add_1, quiet = TRUE)
bib_dont_add_1_lines <- readLines(bib_dont_add_1_html)
refs_line <- stringr::str_which(bib_dont_add_1_lines, "<div.*id=\"refs\".*>")
expect_true(length(refs_line) == 1)
sinfo_line <- stringr::str_which(bib_dont_add_1_lines, "sessionInfo()")
expect_true(refs_line < sinfo_line)
# Don't add if user already manually added (single quotes)
bib_dont_add_2 <- file.path(tmp_dir, "bib-dont-add-2.Rmd")
fs::file_copy("files/test-wflow_html/bib-dont-add-2.Rmd", bib_dont_add_2)
bib_dont_add_2_html <- rmarkdown::render(bib_dont_add_2, quiet = TRUE)
bib_dont_add_2_lines <- readLines(bib_dont_add_2_html)
refs_line <- stringr::str_which(bib_dont_add_2_lines, "<div.*id=[\"\']refs[\"\'].*>")
expect_true(length(refs_line) == 1)
sinfo_line <- stringr::str_which(bib_dont_add_2_lines, "sessionInfo()")
expect_true(refs_line < sinfo_line)
})
# Test add_pagetitle -----------------------------------------------------------
# pandoc2 generates a warning if a file has no title or pagetitle. This error
# can't be captured in R with utils::capture.output() or sink(). Thus need to
# run external R process and capture the stderr stream.
#
# Input: path to Rmd file
# Output: character vector of lines sent to stderr
#
# Usage:
# test_pandoc_warning("no-title.Rmd")
test_pandoc_warning <- function(rmd, output_format = workflowr::wflow_html()) {
wrap_render <- function(...) rmarkdown::render(...)
file_stderr <- fs::file_temp()
on.exit(fs::file_delete(file_stderr))
html <- callr::r_safe(wrap_render,
args = list(input = rmd, quiet = TRUE,
output_format = output_format),
stderr = file_stderr)
fs::file_delete(html)
lines_stderr <- readLines(file_stderr)
return(lines_stderr)
}
test_that("Rmd file without title does not generate pandoc2 warning", {
skip_on_cran()
rmd <- fs::file_temp(ext = "Rmd")
on.exit(fs::file_delete(rmd))
fs::file_create(rmd)
observed <- test_pandoc_warning(rmd)
expect_false(any(stringr::str_detect(observed, "nonempty")))
})
test_that("Rmd file with title defined in pandoc_args does not generate pandoc2 warning", {
skip_on_cran()
rmd <- fs::file_temp(ext = "Rmd")
on.exit(fs::file_delete(rmd))
lines <- c("---",
"output:",
" workflowr::wflow_html:",
" pandoc_args: ['--metadata', 'title=something']",
"---",
"")
writeLines(lines, con = rmd)
observed <- test_pandoc_warning(rmd, output_format = NULL)
expect_false(any(stringr::str_detect(observed, "nonempty")))
})
test_that("Rmd file with defined title does not generate pandoc2 warning", {
skip_on_cran()
rmd <- fs::file_temp(ext = "Rmd")
on.exit(fs::file_delete(rmd))
lines <- c("---", "title: something", "---", "")
writeLines(lines, con = rmd)
observed <- test_pandoc_warning(rmd)
expect_false(any(stringr::str_detect(observed, "nonempty")))
})
test_that("Rmd file with defined pagetitle does not generate pandoc2 warning", {
skip_on_cran()
rmd <- fs::file_temp(ext = "Rmd")
on.exit(fs::file_delete(rmd))
lines <- c("---", "pagetitle: something", "---", "")
writeLines(lines, con = rmd)
observed <- test_pandoc_warning(rmd)
expect_false(any(stringr::str_detect(observed, "nonempty")))
})
test_that("Rmd file with defined title and pagetitle does not generate pandoc2 warning", {
skip_on_cran()
rmd <- fs::file_temp(ext = "Rmd")
on.exit(fs::file_delete(rmd))
lines <- c("---", "title: something", "pagetitle: else", "---", "")
writeLines(lines, con = rmd)
observed <- test_pandoc_warning(rmd)
expect_false(any(stringr::str_detect(observed, "nonempty")))
})
test_that("add_pagetitle adds metadata pagetitle if missing title", {
observed <- workflowr:::add_pagetitle(metadata = list(), input_file = "abc")
expected <- c("--metadata", "pagetitle=abc")
expect_identical(observed, expected)
})
test_that("add_pagetitle does not add metadata pagetitle if set in pandoc_args", {
metadata <- list(
output = list(
`workflowr::wflow_html` = list(
pandoc_args = c("--metadata", "pagetitle=custom")
)
)
)
observed <- workflowr:::add_pagetitle(metadata = metadata, input_file = "abc")
expected <- character(0)
expect_identical(observed, expected)
})
| /tests/testthat/test-wflow_html.R | permissive | drjiang-bio/workflowr | R | false | false | 24,970 | r | context("wflow_html")
# Test wflow_html --------------------------------------------------------------
test_that("wflow_html sets custom knitr chunk options", {
skip_on_cran()
# The R Markdown file opts_chunk.Rmd reads the options and exports to an RDS
# file
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/opts_chunk.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
observed <- readRDS(file.path(tmp_dir, "opts_chunk.rds"))
expect_identical(observed$comment, NA)
expect_identical(observed$fig.align, "center")
expect_identical(observed$tidy, FALSE)
})
test_that("wflow_html can set knit_root_dir in YAML header", {
skip_on_cran()
# The R Markdown file knit_root_dir.Rmd creates a file knit_root_dir.txt in
# its working directory, which is one upstream from its file location.
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
sub_dir <- file.path(tmp_dir, "sub_dir")
fs::dir_create(sub_dir)
rmd <- file.path(sub_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/knit_root_dir.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
expect_false(fs::file_exists(file.path(sub_dir, "knit_root_dir.txt")))
expect_true(fs::file_exists(file.path(tmp_dir, "knit_root_dir.txt")))
})
test_that("knit_root_dir can be overridden by command-line render argument", {
skip_on_cran()
# The R Markdown file knit_root_dir.Rmd creates a file knit_root_dir.txt in
# its working directory, which is one upstream from its file location.
# However, this is overriden by passing the directory that contains the file
# directly to render.
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
sub_dir <- file.path(tmp_dir, "sub_dir")
fs::dir_create(sub_dir)
rmd <- file.path(sub_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/knit_root_dir.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE, knit_root_dir = dirname(rmd))
expect_true(fs::file_exists(html))
expect_true(fs::file_exists(file.path(sub_dir, "knit_root_dir.txt")))
expect_false(fs::file_exists(file.path(tmp_dir, "knit_root_dir.txt")))
})
test_that("wflow_html can change the sesssioninfo from the YAML header", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
lines <- c("---",
"output: workflowr::wflow_html",
"workflowr:",
" sessioninfo: \"devtools::session_info()\"",
"---",
"",
"`r 1 + 1`")
writeLines(lines, rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
expect_true(sum(stringr::str_detect(html_lines, "devtools::session_info")) == 1)
})
test_that("wflow_html can change the seed from the YAML header", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
lines <- c("---",
"output: workflowr::wflow_html",
"workflowr:",
" seed: 1",
"---",
"",
"`r round(rnorm(1), 5)`")
writeLines(lines, rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
set.seed(1)
expect_true(sum(stringr::str_detect(html_lines,
as.character(round(rnorm(1), 5)))) == 1)
})
test_that("wflow_html does not require a YAML header", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
lines <- c("some text")
writeLines(lines, rmd)
html <- rmarkdown::render(rmd, output_format = wflow_html(), quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
expect_true(sum(stringr::str_detect(html_lines, "some text")) == 1)
})
test_that("wflow_html reads _workflowr.yml in the same directory, but can be overidden", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
# Set seed of 5 in _workflowr.yml
writeLines("seed: 5", con = file.path(tmp_dir, "_workflowr.yml"))
rmd <- file.path(tmp_dir, "file.Rmd")
lines <- c("---",
"output: workflowr::wflow_html",
"---",
"",
"`r round(rnorm(1), 5)`")
writeLines(lines, rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
set.seed(5)
expect_true(sum(stringr::str_detect(html_lines,
as.character(round(rnorm(1), 5)))) == 1)
# Override _workflowr.yml by specifying in YAML header
lines <- c("---",
"output: workflowr::wflow_html",
"workflowr:",
" seed: 1",
"---",
"",
"`r round(rnorm(1), 5)`")
writeLines(lines, rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
html_lines <- readLines(html)
set.seed(1)
expect_true(sum(stringr::str_detect(html_lines,
as.character(round(rnorm(1), 5)))) == 1)
})
test_that("The default knit_root_dir for a workflowr project is the root directory", {
skip_on_cran()
tmp_dir <- tempfile()
tmp_start <- wflow_start(tmp_dir, change_wd = FALSE, user.name = "Test Name",
user.email = "test@email")
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "analysis", "file.Rmd")
lines <- c("`r getwd()`")
writeLines(lines, rmd)
html <- rmarkdown::render_site(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
expect_true(sum(stringr::str_detect(html_lines, tmp_dir)) == 1)
})
test_that("The default knit_root_dir for a workflowr project can be analysis/", {
skip_on_cran()
tmp_dir <- tempfile()
tmp_start <- wflow_start(tmp_dir, change_wd = FALSE, user.name = "Test Name",
user.email = "test@email")
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
wflow_yml <- file.path(tmp_dir, "_workflowr.yml")
wflow_yml_lines <- readLines(wflow_yml)
wflow_yml_lines <- stringr::str_replace(wflow_yml_lines,
"knit_root_dir: \".\"",
"knit_root_dir: \"analysis\"")
writeLines(wflow_yml_lines, wflow_yml)
rmd <- file.path(tmp_dir, "analysis", "file.Rmd")
lines <- c("`r getwd()`")
writeLines(lines, rmd)
html <- rmarkdown::render_site(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
expect_true(sum(stringr::str_detect(html_lines, basename(rmd))) == 1)
})
test_that("wflow_html can insert figures with or without Git repo present", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
lines <- c("---",
"output: workflowr::wflow_html",
"---",
"",
"```{r chunkname}",
"plot(1:10)",
"```")
writeLines(lines, rmd)
# Without Git repo
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
expect_true(fs::file_exists(file.path(tmp_dir, "figure", basename(rmd),
"chunkname-1.png")))
html_lines <- readLines(html)
# Because it isn't a website, the image gets embedded as a base64 image
expect_true(sum(stringr::str_detect(html_lines,
"<img src=\"data:image/png;base64,")) == 1)
fs::file_delete(html)
# With Git repo
git2r::init(tmp_dir)
html <- rmarkdown::render(rmd, quiet = TRUE)
html_lines <- readLines(html)
expect_true(sum(stringr::str_detect(html_lines,
"<img src=\"data:image/png;base64,")) == 1)
})
test_that("github URL in _workflowr.yml overrides git remote", {
skip_on_cran()
tmp_dir <- tempfile()
tmp_start <- wflow_start(tmp_dir, change_wd = FALSE, user.name = "Test Name",
user.email = "test@email")
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
# Add remote
tmp_remote <- wflow_git_remote("origin", "testuser", "testrepo",
verbose = FALSE, project = tmp_dir)
# Define GitHub URL in _workflowr.yml
cat("github: https://github.com/upstream/testrepo\n",
file = file.path(tmp_dir, "_workflowr.yml"), append = TRUE)
rmd <- file.path(tmp_dir, "analysis", "index.Rmd")
html <- rmarkdown::render_site(rmd, quiet = TRUE)
html_lines <- readLines(html)
expect_true(any(stringr::str_detect(html_lines,
"https://github.com/upstream/testrepo")))
expect_false(any(stringr::str_detect(html_lines,
"https://github.com/testuser/testrepo")))
})
test_that("wflow_html inserts custom header and footer", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
lines <- c("---",
"output: workflowr::wflow_html",
"---")
writeLines(lines, rmd)
html <- rmarkdown::render(rmd, quiet = TRUE,
# These are added by wflow_site(), which I am
# purposefully skipping for these tests. In order
# for the browser tab icon to be a URL and not a
# binary blob, have to manually set to
# self_contained
output_options = list(self_contained = FALSE,
lib_dir = "site_libs"))
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
html_complete <- paste(html_lines, collapse = "\n")
expect_true(stringr::str_detect(html_complete,
stringr::fixed(workflowr:::includes$header)))
expect_true(stringr::str_detect(html_complete,
stringr::fixed(workflowr:::includes$footer)))
})
test_that("wflow_html allows users to add additional files for pandoc includes", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
css <- file.path(tmp_dir, "style.html")
style <- "p {color: red}"
writeLines(c("<style>", style, "</style>"), con = css)
rmd <- file.path(tmp_dir, "file.Rmd")
lines <- c("---",
"output:",
" workflowr::wflow_html:",
" includes:",
" in_header: style.html",
"---",
"```{r}",
"plot(1:10)",
"```")
writeLines(lines, rmd)
html <- rmarkdown::render(rmd, quiet = TRUE,
output_options = list(self_contained = FALSE,
lib_dir = "site_libs"))
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
html_complete <- paste(html_lines, collapse = "\n")
expect_true(stringr::str_detect(html_complete,
stringr::fixed(workflowr:::includes$header)))
expect_true(stringr::str_detect(html_complete,
stringr::fixed(workflowr:::includes$footer)))
expect_true(stringr::str_detect(html_complete,
stringr::fixed(style)))
})
test_that("wflow_html respects html_document() argument keep_md", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/keep_md.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
md <- fs::path_ext_set(html, "md")
expect_true(fs::file_exists(md))
})
test_that("wflow_html preserves knitr chunk option collapse", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/collapse.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
html_complete <- paste(html_lines, collapse = "\n")
# Test collapse=TRUE
expected_collapse <- "getwd\\(\\)\n#\\s"
expect_true(stringr::str_detect(html_complete, expected_collapse))
})
test_that("wflow_html preserves knitr chunk option indent", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/indent.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
md <- fs::path_ext_set(html, "md")
expect_true(fs::file_exists(md))
md_lines <- readLines(md)
expect_true(" 1 + 1" %in% md_lines)
expect_true(" [1] 2" %in% md_lines)
})
test_that("wflow_html adds spacing between final text and sinfo button", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/sessioninfo-spacing.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
final_sentence <- stringr::str_which(html_lines, "final sentence")
expect_identical(html_lines[final_sentence], "<p>final sentence</p>")
expect_identical(html_lines[final_sentence + 1], "<br>")
expect_identical(html_lines[final_sentence + 2], "<p>")
expect_identical(stringr::str_sub(html_lines[final_sentence + 3], 2, 7),
"button")
})
# Test plot_hook ---------------------------------------------------------------
test_that("wflow_html sends warning if fig.path is set by user", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
# If set in only only one chunk, only one warning should be generated
rmd <- file.path(tmp_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/fig-path-one-chunk.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
warnings_fig.path <- stringr::str_detect(html_lines, "<code>fig.path</code>")
expect_identical(sum(warnings_fig.path), 1L)
# If set globally, a warning should be generated for each plot (in this case 3)
rmd2 <- file.path(tmp_dir, "file2.Rmd")
fs::file_copy("files/test-wflow_html/fig-path-all-chunks.Rmd", rmd2)
html2 <- rmarkdown::render(rmd2, quiet = TRUE)
expect_true(fs::file_exists(html2))
html_lines2 <- readLines(html2)
warnings_fig.path2 <- stringr::str_detect(html_lines2, "<code>fig.path</code>")
expect_identical(sum(warnings_fig.path2), 3L)
})
test_that("wflow_html sends warning for outdated version of reticulate", {
skip_on_cran()
test_reticulate <-
requireNamespace("reticulate", quietly = TRUE) &&
reticulate::py_available(initialize = TRUE) &&
reticulate::py_module_available("matplotlib")
if (!test_reticulate) skip("Python not configured to test reticulate")
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
rmd <- file.path(tmp_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/python-figure.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
warnings_reticulate <- stringr::str_detect(html_lines,
"<a href=\"https://cran.r-project.org/package=reticulate\">reticulate</a>")
if (utils::packageVersion("reticulate") < "1.14.9000") {
expect_identical(sum(warnings_reticulate), 2L)
} else {
expect_identical(sum(warnings_reticulate), 0L)
}
# fig.path warning should also still be sent
warnings_fig.path <- stringr::str_detect(html_lines, "<code>fig.path</code>")
expect_identical(sum(warnings_fig.path), 1L)
})
# Test cache_hook --------------------------------------------------------------
test_that("wflow_html sends warning if chunk caches without autodep", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
# If set in only only one chunk, only one warning should be generated
#
# one chunk has cache=TRUE (warning), another has cache=TRUE && autodep=TRUE
# (no warning), and the third has no options set (no warning).
rmd <- file.path(tmp_dir, "file.Rmd")
fs::file_copy("files/test-wflow_html/cache-one-chunk.Rmd", rmd)
html <- rmarkdown::render(rmd, quiet = TRUE)
expect_true(fs::file_exists(html))
html_lines <- readLines(html)
expect_true(sum(stringr::str_detect(html_lines, "<strong>Warning:</strong>")) == 1)
# If cache=TRUE is set globally, a warning should be generated for each chunk
# that does not have autodep=TRUE.
#
# Expect 3 b/c 1 of 3 chunks has autodep=TRUE, plus added sessioninfo chunk
# (3 - 1 + 1)
rmd2 <- file.path(tmp_dir, "file2.Rmd")
fs::file_copy("files/test-wflow_html/cache-all-chunks.Rmd", rmd2)
html2 <- rmarkdown::render(rmd2, quiet = TRUE)
expect_true(fs::file_exists(html2))
html_lines2 <- readLines(html2)
expect_true(sum(stringr::str_detect(html_lines2, "<strong>Warning:</strong>")) == 3)
})
# Test add_bibliography --------------------------------------------------------
test_that("add_bibliography only adds bibliography when necessary", {
# Test by directly passing text. The next test block uses actual files
expected <- c("", "<div id=\"refs\"></div>", "", "")
expect_identical(workflowr:::add_bibliography("", ""), expected)
expect_identical(workflowr:::add_bibliography("", "<div id=\"refs\"></div>"),
"")
expect_identical(workflowr:::add_bibliography("", "<div id=\'refs\'></div>"),
"")
})
test_that("add_bibliography adds bibliography to files", {
skip_on_cran()
tmp_dir <- tempfile()
fs::dir_create(tmp_dir)
tmp_dir <- workflowr:::absolute(tmp_dir)
on.exit(unlink(tmp_dir, recursive = TRUE))
# Copy test.bib
fs::file_copy("files/test-wflow_html/test.bib", file.path(tmp_dir, "test.bib"))
# Don't add bibliography when not specified in YAML header
bib_none <- file.path(tmp_dir, "bib-none.Rmd")
fs::file_copy("files/example.Rmd", bib_none)
bib_none_html <- rmarkdown::render(bib_none, quiet = TRUE)
expect_false(any(stringr::str_detect(readLines(bib_none_html),
"<div.*id=\"refs\".*>")))
# Add bibliography before session information
bib_add <- file.path(tmp_dir, "bib-add.Rmd")
fs::file_copy("files/test-wflow_html/bib-add.Rmd", bib_add)
bib_add_html <- rmarkdown::render(bib_add, quiet = TRUE)
bib_add_lines <- readLines(bib_add_html)
refs_line <- stringr::str_which(bib_add_lines, "<div.*id=\"refs\".*>")
sinfo_line <- stringr::str_which(bib_add_lines, "sessionInfo()")
expect_true(refs_line < sinfo_line)
# Don't add if user already manually added (double quotes)
bib_dont_add_1 <- file.path(tmp_dir, "bib-dont-add-1.Rmd")
fs::file_copy("files/test-wflow_html/bib-dont-add-1.Rmd", bib_dont_add_1)
bib_dont_add_1_html <- rmarkdown::render(bib_dont_add_1, quiet = TRUE)
bib_dont_add_1_lines <- readLines(bib_dont_add_1_html)
refs_line <- stringr::str_which(bib_dont_add_1_lines, "<div.*id=\"refs\".*>")
expect_true(length(refs_line) == 1)
sinfo_line <- stringr::str_which(bib_dont_add_1_lines, "sessionInfo()")
expect_true(refs_line < sinfo_line)
# Don't add if user already manually added (single quotes)
bib_dont_add_2 <- file.path(tmp_dir, "bib-dont-add-2.Rmd")
fs::file_copy("files/test-wflow_html/bib-dont-add-2.Rmd", bib_dont_add_2)
bib_dont_add_2_html <- rmarkdown::render(bib_dont_add_2, quiet = TRUE)
bib_dont_add_2_lines <- readLines(bib_dont_add_2_html)
refs_line <- stringr::str_which(bib_dont_add_2_lines, "<div.*id=[\"\']refs[\"\'].*>")
expect_true(length(refs_line) == 1)
sinfo_line <- stringr::str_which(bib_dont_add_2_lines, "sessionInfo()")
expect_true(refs_line < sinfo_line)
})
# Test add_pagetitle -----------------------------------------------------------
# pandoc2 generates a warning if a file has no title or pagetitle. This error
# can't be captured in R with utils::capture.output() or sink(). Thus need to
# run external R process and capture the stderr stream.
#
# Input: path to Rmd file
# Output: character vector of lines sent to stderr
#
# Usage:
# test_pandoc_warning("no-title.Rmd")
test_pandoc_warning <- function(rmd, output_format = workflowr::wflow_html()) {
wrap_render <- function(...) rmarkdown::render(...)
file_stderr <- fs::file_temp()
on.exit(fs::file_delete(file_stderr))
html <- callr::r_safe(wrap_render,
args = list(input = rmd, quiet = TRUE,
output_format = output_format),
stderr = file_stderr)
fs::file_delete(html)
lines_stderr <- readLines(file_stderr)
return(lines_stderr)
}
test_that("Rmd file without title does not generate pandoc2 warning", {
skip_on_cran()
rmd <- fs::file_temp(ext = "Rmd")
on.exit(fs::file_delete(rmd))
fs::file_create(rmd)
observed <- test_pandoc_warning(rmd)
expect_false(any(stringr::str_detect(observed, "nonempty")))
})
test_that("Rmd file with title defined in pandoc_args does not generate pandoc2 warning", {
skip_on_cran()
rmd <- fs::file_temp(ext = "Rmd")
on.exit(fs::file_delete(rmd))
lines <- c("---",
"output:",
" workflowr::wflow_html:",
" pandoc_args: ['--metadata', 'title=something']",
"---",
"")
writeLines(lines, con = rmd)
observed <- test_pandoc_warning(rmd, output_format = NULL)
expect_false(any(stringr::str_detect(observed, "nonempty")))
})
test_that("Rmd file with defined title does not generate pandoc2 warning", {
skip_on_cran()
rmd <- fs::file_temp(ext = "Rmd")
on.exit(fs::file_delete(rmd))
lines <- c("---", "title: something", "---", "")
writeLines(lines, con = rmd)
observed <- test_pandoc_warning(rmd)
expect_false(any(stringr::str_detect(observed, "nonempty")))
})
test_that("Rmd file with defined pagetitle does not generate pandoc2 warning", {
skip_on_cran()
rmd <- fs::file_temp(ext = "Rmd")
on.exit(fs::file_delete(rmd))
lines <- c("---", "pagetitle: something", "---", "")
writeLines(lines, con = rmd)
observed <- test_pandoc_warning(rmd)
expect_false(any(stringr::str_detect(observed, "nonempty")))
})
test_that("Rmd file with defined title and pagetitle does not generate pandoc2 warning", {
skip_on_cran()
rmd <- fs::file_temp(ext = "Rmd")
on.exit(fs::file_delete(rmd))
lines <- c("---", "title: something", "pagetitle: else", "---", "")
writeLines(lines, con = rmd)
observed <- test_pandoc_warning(rmd)
expect_false(any(stringr::str_detect(observed, "nonempty")))
})
test_that("add_pagetitle adds metadata pagetitle if missing title", {
observed <- workflowr:::add_pagetitle(metadata = list(), input_file = "abc")
expected <- c("--metadata", "pagetitle=abc")
expect_identical(observed, expected)
})
test_that("add_pagetitle does not add metadata pagetitle if set in pandoc_args", {
metadata <- list(
output = list(
`workflowr::wflow_html` = list(
pandoc_args = c("--metadata", "pagetitle=custom")
)
)
)
observed <- workflowr:::add_pagetitle(metadata = metadata, input_file = "abc")
expected <- character(0)
expect_identical(observed, expected)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ff_factors.R
\docType{data}
\name{ff_factors}
\alias{ff_factors}
\title{5 Fama-French Research Factors}
\format{
A data frame(383x7)
}
\usage{
data(ff_factors)
}
\description{
The dataset contains the monthly returns for the 5 Fama-French Research Factors (MRP, SMB, HML, RMW, CMA)
for the time period from 01.02.1986 to 31.12.2017.
}
\keyword{datasets}
| /man/ff_factors.Rd | no_license | antshi/auxPort | R | false | true | 432 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ff_factors.R
\docType{data}
\name{ff_factors}
\alias{ff_factors}
\title{5 Fama-French Research Factors}
\format{
A data frame(383x7)
}
\usage{
data(ff_factors)
}
\description{
The dataset contains the monthly returns for the 5 Fama-French Research Factors (MRP, SMB, HML, RMW, CMA)
for the time period from 01.02.1986 to 31.12.2017.
}
\keyword{datasets}
|
library(shiny)
library(shinydashboard)
library(mapdeck)
source("load_data.R")
max_number_rows = 50000
ui <- navbarPage("Blue Bikes", id="nav",
tabPanel("Trip Plotter",
div(class="outer",
tags$head(
# Include our custom CSS
includeCSS("styles.css"),
),
tags$style(
".irs-bar {",
" border-color: transparent;",
" background-color: transparent;",
"}",
".irs-bar-edge {",
" border-color: transparent;",
" background-color: transparent;",
"}"
),
# If not using custom CSS, set height of leafletOutput to a number instead of percent
mapdeckOutput("map", width="100%", height="100%"),
# Shiny versions prior to 0.11 should use class = "modal" instead.
absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE,
draggable = TRUE, top = 60, left = "auto", right = 20, bottom = "auto",
width = 330, height = "auto",
h2("Trip Explorer"),
sliderInput("distance_slider","Distance",min_distance,max_distance,c(min_distance,min_distance + 2000))
# selectInput("color", "Color", vars),
# selectInput("size", "Size", vars, selected = "adultpop"),
# conditionalPanel("input.color == 'superzip' || input.size == 'superzip'",
# # Only prompt for threshold when coloring or sizing by superzip
# numericInput("threshold", "SuperZIP threshold (top n percentile)", 5)
# ),
),
tags$div(id="cite",
'Data compiled for ', tags$em('Coming Apart: The State of White America, 1960–2010'), ' by Charles Murray (Crown Forum, 2012).'
)
)
),
tabPanel("Station Counts",
div(class="outer",
mapdeckOutput("station_counts", width="100%", height="100%"))
)
)
server <- function(input, output) {
trips <- reactive({
trip_data %>%
filter(between(distance,input$distance_slider[[1]], input$distance_slider[[2]])) %>%
head(max_number_rows)
})
trip_start_counts <- reactive({
trip_start_counts
})
## initialise a map
output$map <- renderMapdeck({
mapdeck(token = key_data, style = mapdeck_style('dark'), pitch = 35, location = c(-71.11903,42.35169), zoom = 12)
})
## initialise a map
output$station_counts <- renderMapdeck({
mapdeck(token = key_data, style = mapdeck_style('dark'), location = c(-71.11903,42.35169), zoom = 12)
})
observe({
mapdeck_update(map_id = "station_counts") %>%
add_heatmap(
data = trips(),
lat = "start_station_latitude",
lon = "start_station_longitude",
layer_id = "grid_layer"
, update_view = FALSE
)
})
observe({
mapdeck_update(map_id = "map") %>%
add_arc(
data = trips()
, origin = c("start_station_longitude", "start_station_latitude")
, destination = c("end_station_longitude", "end_station_latitude")
, stroke_from = "distance"
# , stroke_from_opacity = "distance"
, auto_highlight = TRUE
, layer_id = "myRoads"
, update_view = FALSE
, legend = TRUE
)
})
}
shinyApp(ui, server) | /app.R | no_license | nipper/blue_bikes | R | false | false | 4,707 | r | library(shiny)
library(shinydashboard)
library(mapdeck)
source("load_data.R")
max_number_rows = 50000
ui <- navbarPage("Blue Bikes", id="nav",
tabPanel("Trip Plotter",
div(class="outer",
tags$head(
# Include our custom CSS
includeCSS("styles.css"),
),
tags$style(
".irs-bar {",
" border-color: transparent;",
" background-color: transparent;",
"}",
".irs-bar-edge {",
" border-color: transparent;",
" background-color: transparent;",
"}"
),
# If not using custom CSS, set height of leafletOutput to a number instead of percent
mapdeckOutput("map", width="100%", height="100%"),
# Shiny versions prior to 0.11 should use class = "modal" instead.
absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE,
draggable = TRUE, top = 60, left = "auto", right = 20, bottom = "auto",
width = 330, height = "auto",
h2("Trip Explorer"),
sliderInput("distance_slider","Distance",min_distance,max_distance,c(min_distance,min_distance + 2000))
# selectInput("color", "Color", vars),
# selectInput("size", "Size", vars, selected = "adultpop"),
# conditionalPanel("input.color == 'superzip' || input.size == 'superzip'",
# # Only prompt for threshold when coloring or sizing by superzip
# numericInput("threshold", "SuperZIP threshold (top n percentile)", 5)
# ),
),
tags$div(id="cite",
'Data compiled for ', tags$em('Coming Apart: The State of White America, 1960–2010'), ' by Charles Murray (Crown Forum, 2012).'
)
)
),
tabPanel("Station Counts",
div(class="outer",
mapdeckOutput("station_counts", width="100%", height="100%"))
)
)
server <- function(input, output) {
trips <- reactive({
trip_data %>%
filter(between(distance,input$distance_slider[[1]], input$distance_slider[[2]])) %>%
head(max_number_rows)
})
trip_start_counts <- reactive({
trip_start_counts
})
## initialise a map
output$map <- renderMapdeck({
mapdeck(token = key_data, style = mapdeck_style('dark'), pitch = 35, location = c(-71.11903,42.35169), zoom = 12)
})
## initialise a map
output$station_counts <- renderMapdeck({
mapdeck(token = key_data, style = mapdeck_style('dark'), location = c(-71.11903,42.35169), zoom = 12)
})
observe({
mapdeck_update(map_id = "station_counts") %>%
add_heatmap(
data = trips(),
lat = "start_station_latitude",
lon = "start_station_longitude",
layer_id = "grid_layer"
, update_view = FALSE
)
})
observe({
mapdeck_update(map_id = "map") %>%
add_arc(
data = trips()
, origin = c("start_station_longitude", "start_station_latitude")
, destination = c("end_station_longitude", "end_station_latitude")
, stroke_from = "distance"
# , stroke_from_opacity = "distance"
, auto_highlight = TRUE
, layer_id = "myRoads"
, update_view = FALSE
, legend = TRUE
)
})
}
shinyApp(ui, server) |
# Use power.t.test to determine power
# Or use it to determine required n, mean difference, or sd
power.t.test(n = 16, delta = 0.5, sd = 1, type = "one.sample", alt = "one.sided")
# obtain power
power.t.test(n = 16, delta = 0.5, sd = 1, type = "one.sample", alt = "one.sided")$power
# vary delta, n, or sd
power.t.test(n = 16, delta = 1, sd = 1, type = "one.sample", alt = "one.sided")$power
power.t.test(n = 8, delta = 0.5, sd = 1, type = "one.sample", alt = "one.sided")$power
power.t.test(n = 16, delta = 0.5, sd = 2, type = "one.sample", alt = "one.sided")$power
# obtain n for desired power
power.t.test(power = 0.5, delta = 0.5, sd = 1, type = "one.sample", alt = "one.sided")$n
# obtain delta for desired power
power.t.test(power = 0.5, n= 8, sd = 1, type = "one.sample", alt = "one.sided")$delta
# obtain sd for desired power
power.t.test(power= 0.5, n = 16, delta = 0.5, sd = NULL, type = "one.sample", alt = "one.sided")$sd
| /power.t.test.R | no_license | skeydan/basic_stats_course | R | false | false | 940 | r | # Use power.t.test to determine power
# Or use it to determine required n, mean difference, or sd
power.t.test(n = 16, delta = 0.5, sd = 1, type = "one.sample", alt = "one.sided")
# obtain power
power.t.test(n = 16, delta = 0.5, sd = 1, type = "one.sample", alt = "one.sided")$power
# vary delta, n, or sd
power.t.test(n = 16, delta = 1, sd = 1, type = "one.sample", alt = "one.sided")$power
power.t.test(n = 8, delta = 0.5, sd = 1, type = "one.sample", alt = "one.sided")$power
power.t.test(n = 16, delta = 0.5, sd = 2, type = "one.sample", alt = "one.sided")$power
# obtain n for desired power
power.t.test(power = 0.5, delta = 0.5, sd = 1, type = "one.sample", alt = "one.sided")$n
# obtain delta for desired power
power.t.test(power = 0.5, n= 8, sd = 1, type = "one.sample", alt = "one.sided")$delta
# obtain sd for desired power
power.t.test(power= 0.5, n = 16, delta = 0.5, sd = NULL, type = "one.sample", alt = "one.sided")$sd
|
library(ggplot2)
if (!('fppc' %in% ls())) {
fppc <- read.csv('fppc.csv')
}
fppc.slice <- fppc[c(
'Filer_NamL',
'Committee_Type',
'Rpt_Date',
'From_Date',
'Thru_Date',
'Rec_Type',
'Tran_NamL',
'Tran_NamF',
'Tran_City',
'Tran_State',
'Tran_Zip4',
'Tran_Emp',
'Tran_Amt1',
'Tran_Amt2'
)]
fppc.slice$Zip <- substr(fppc.slice$Tran_Zip4, 1, 5)
oakland <- subset(fppc.slice, Tran_City == 'Oakland')
oakland$Zip <- factor(oakland$Zip, levels = names(sort(table(oakland$Zip), decreasing = TRUE)))
p <- ggplot(oakland) +
aes(x = Zip) + geom_bar() + coord_flip()
m <- lm(Tran_Amt2 ~ Zip, data = oakland)
| /example.r | no_license | tlevine/oakland-fppc | R | false | false | 629 | r | library(ggplot2)
if (!('fppc' %in% ls())) {
fppc <- read.csv('fppc.csv')
}
fppc.slice <- fppc[c(
'Filer_NamL',
'Committee_Type',
'Rpt_Date',
'From_Date',
'Thru_Date',
'Rec_Type',
'Tran_NamL',
'Tran_NamF',
'Tran_City',
'Tran_State',
'Tran_Zip4',
'Tran_Emp',
'Tran_Amt1',
'Tran_Amt2'
)]
fppc.slice$Zip <- substr(fppc.slice$Tran_Zip4, 1, 5)
oakland <- subset(fppc.slice, Tran_City == 'Oakland')
oakland$Zip <- factor(oakland$Zip, levels = names(sort(table(oakland$Zip), decreasing = TRUE)))
p <- ggplot(oakland) +
aes(x = Zip) + geom_bar() + coord_flip()
m <- lm(Tran_Amt2 ~ Zip, data = oakland)
|
library(dplyr)
mv <- read.csv("C:/Users/acer/Downloads/assignment8/movie_metadata.csv", stringsAsFactors=FALSE)
#extracting col with numeric data
nums = sapply(mv, is.numeric)
mvModified = mv[,nums]
DirectorActorDuos1 = list()
DirectorActorDuos2 = list()
DirectorActorDuos3 = list()
DirectorActorDuos1 = mv[,c(2,7,12)]
DirectorActorDuos2 = mv[,c(2,11,12)]
DirectorActorDuos3 = mv[,c(2,15,12)]
names(DirectorActorDuos2) = names(DirectorActorDuos1)
names(DirectorActorDuos3) = names(DirectorActorDuos1)
DirectorActorDuos = unique(rbind(DirectorActorDuos1,DirectorActorDuos2,DirectorActorDuos3))
dsummary = DirectorActorDuos%>%group_by(director_name,actor_2_name)%>%summarise(n = n())%>%arrange(desc(n))
dsummary1 = dsummary[!((dsummary$director_name == "") | (dsummary$actor_2_name == "")),]
jaccardSim = list()
jaccardSimilarity = function(Dname,Aname,freq){
dirmovielist = unique(DirectorActorDuos[DirectorActorDuos$director_name==Dname,3])
actormovielist = unique(DirectorActorDuos[DirectorActorDuos$actor_2_name==Aname,3])
u = length(union(dirmovielist, actormovielist))
i = length(intersect(dirmovielist, actormovielist))
#A = sum(as.numeric(unlist(dsummary1[dsummary1$director_name == Dname,3])),na.rm = T)
#B = sum(as.numeric(unlist(dsummary1[dsummary1$actor_2_name == Aname,3])),na.rm = T)
return(i/u)
}
for(i in 1:nrow(dsummary1)){
jaccardSim[[i]] = jaccardSimilarity(dsummary1[[i,1]],dsummary1[[i,2]],dsummary1[[i,3]])
}
ndx1 = order(unlist(jaccardSim),decreasing = T)[1:5]
maximum = jaccardSim[ndx1]
dsummary1[ndx1,c(1,2,3)]
| /assgn8_jaccard.R | no_license | regstrtn/assignment-8 | R | false | false | 1,614 | r | library(dplyr)
mv <- read.csv("C:/Users/acer/Downloads/assignment8/movie_metadata.csv", stringsAsFactors=FALSE)
#extracting col with numeric data
nums = sapply(mv, is.numeric)
mvModified = mv[,nums]
DirectorActorDuos1 = list()
DirectorActorDuos2 = list()
DirectorActorDuos3 = list()
DirectorActorDuos1 = mv[,c(2,7,12)]
DirectorActorDuos2 = mv[,c(2,11,12)]
DirectorActorDuos3 = mv[,c(2,15,12)]
names(DirectorActorDuos2) = names(DirectorActorDuos1)
names(DirectorActorDuos3) = names(DirectorActorDuos1)
DirectorActorDuos = unique(rbind(DirectorActorDuos1,DirectorActorDuos2,DirectorActorDuos3))
dsummary = DirectorActorDuos%>%group_by(director_name,actor_2_name)%>%summarise(n = n())%>%arrange(desc(n))
dsummary1 = dsummary[!((dsummary$director_name == "") | (dsummary$actor_2_name == "")),]
jaccardSim = list()
jaccardSimilarity = function(Dname,Aname,freq){
dirmovielist = unique(DirectorActorDuos[DirectorActorDuos$director_name==Dname,3])
actormovielist = unique(DirectorActorDuos[DirectorActorDuos$actor_2_name==Aname,3])
u = length(union(dirmovielist, actormovielist))
i = length(intersect(dirmovielist, actormovielist))
#A = sum(as.numeric(unlist(dsummary1[dsummary1$director_name == Dname,3])),na.rm = T)
#B = sum(as.numeric(unlist(dsummary1[dsummary1$actor_2_name == Aname,3])),na.rm = T)
return(i/u)
}
for(i in 1:nrow(dsummary1)){
jaccardSim[[i]] = jaccardSimilarity(dsummary1[[i,1]],dsummary1[[i,2]],dsummary1[[i,3]])
}
ndx1 = order(unlist(jaccardSim),decreasing = T)[1:5]
maximum = jaccardSim[ndx1]
dsummary1[ndx1,c(1,2,3)]
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getMAF.R
\name{getMAF}
\alias{getMAF}
\title{Calculate MAF}
\usage{
getMAF(z = NULL, noCall = 9, flip = TRUE, dosageMax = 2)
}
\arguments{
\item{z}{matrix object, rows are samples, columns are SNPs, values range 0-2.}
\item{noCall}{missing value for genotype, defaults to 9.}
\item{flip}{default TRUE. If maf is more than 0.5, then flip 1-maf.}
\item{dosageMax}{default is 2 , for chr23 use 1.}
}
\value{
a \code{matrix} object. First column is MAF (range 0-0.5), second column is 1 if the MAF is flipped, else 0.
}
\description{
This function calculates MAF for imputed SNP data in dosage format.
}
\examples{
# dummy SNP data, 25 samples, 4 SNPs
set.seed(123)
geno <- matrix(sample(c(0, 1, 2), 100, replace = TRUE), ncol = 4)
# calculate MAF, returns 2 column matrix
getMAF(geno)
}
\author{
Tokhir Dadaev
}
\keyword{dosage}
\keyword{maf}
\keyword{snp}
| /man/getMAF.Rd | permissive | oncogenetics/oncofunco | R | false | true | 935 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getMAF.R
\name{getMAF}
\alias{getMAF}
\title{Calculate MAF}
\usage{
getMAF(z = NULL, noCall = 9, flip = TRUE, dosageMax = 2)
}
\arguments{
\item{z}{matrix object, rows are samples, columns are SNPs, values range 0-2.}
\item{noCall}{missing value for genotype, defaults to 9.}
\item{flip}{default TRUE. If maf is more than 0.5, then flip 1-maf.}
\item{dosageMax}{default is 2 , for chr23 use 1.}
}
\value{
a \code{matrix} object. First column is MAF (range 0-0.5), second column is 1 if the MAF is flipped, else 0.
}
\description{
This function calculates MAF for imputed SNP data in dosage format.
}
\examples{
# dummy SNP data, 25 samples, 4 SNPs
set.seed(123)
geno <- matrix(sample(c(0, 1, 2), 100, replace = TRUE), ncol = 4)
# calculate MAF, returns 2 column matrix
getMAF(geno)
}
\author{
Tokhir Dadaev
}
\keyword{dosage}
\keyword{maf}
\keyword{snp}
|
# SNPP preprocessing
# Raw subnational population projection data downloaded from:
# https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/populationandmigration/populationprojections/datasets/localauthoritiesinenglandz1/2014based/snppz1population.zip
# Unzip the files (2014 SNPP Population females.csv and 2014 SNPP Population males.csv) to the cache directory
# Run this script
# Make SNPP ages categories the same as census
adjustSnppAge = function(df) {
# AGE_GROUP -> AGE
colnames(df)[colnames(df) == "AGE_GROUP"] = "AGE"
# remove non-numeric
df = df[df$AGE != "All ages",]
df$AGE[df$AGE == "90 and over"] = "90"
# spot-check we preserve correct totals
total14 = sum(df$X2014)
total19 = sum(df$X2019)
total29 = sum(df$X2029)
total39 = sum(df$X2039)
df$AGE = as.numeric(df$AGE) + 1
# merge ages 85+
years = 2014:2039
for (y in years)
{
col = paste0("X",y)
df[df$AGE==86, col] = df[df$AGE==86, col] + df[df$AGE==87, col] + df[df$AGE==88, col] +
df[df$AGE==89, col] + df[df$AGE==90, col] + df[df$AGE==91, col]
}
df = df[df$AGE<87,]
# check total is preserved
stopifnot(sum(df$X2014) == total14)
stopifnot(sum(df$X2019) == total19)
stopifnot(sum(df$X2029) == total29)
stopifnot(sum(df$X2039) == total39)
return(df)
}
# TODO Wales/Scotland data?
setwd("~/dev/nismod/microsimulation/")
cache_dir = "./cache/"
snpp14m = read.csv(paste0(cache_dir, "2014 SNPP Population males.csv"), stringsAsFactors = F)
snpp14f = read.csv(paste0(cache_dir, "2014 SNPP Population females.csv"), stringsAsFactors = F)
# remove stuff not required
snpp14m$AREA_NAME=NULL
snpp14f$AREA_NAME=NULL
snpp14m$COMPONENT=NULL
snpp14f$COMPONENT=NULL
# use census sex enumeration
snpp14m$SEX=rep(1, nrow(snpp14m))
snpp14f$SEX=rep(2, nrow(snpp14m))
snpp14 = rbind(snpp14m, snpp14f)
# AGE 0-90+ -> 0(1)-85+(86) (to match census)
#snpp14$AGE_GROUP = snpp14$AGE_GROUP + 1
snpp14 = adjustSnppAge(snpp14)
# make col names consistency with MYE/census
colnames(snpp14)[colnames(snpp14) == "AREA_CODE"] = "GEOGRAPHY_CODE"
colnames(snpp14)[colnames(snpp14) == "SEX"] = "GENDER"
write.csv(snpp14, paste0(cache_dir, "snpp2014.csv"), row.names=F)
| /scripts/preprocess_snpp.R | permissive | LLD2018/microsimulation-1 | R | false | false | 2,204 | r |
# SNPP preprocessing
# Raw subnational population projection data downloaded from:
# https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/populationandmigration/populationprojections/datasets/localauthoritiesinenglandz1/2014based/snppz1population.zip
# Unzip the files (2014 SNPP Population females.csv and 2014 SNPP Population males.csv) to the cache directory
# Run this script
# Make SNPP ages categories the same as census
adjustSnppAge = function(df) {
# AGE_GROUP -> AGE
colnames(df)[colnames(df) == "AGE_GROUP"] = "AGE"
# remove non-numeric
df = df[df$AGE != "All ages",]
df$AGE[df$AGE == "90 and over"] = "90"
# spot-check we preserve correct totals
total14 = sum(df$X2014)
total19 = sum(df$X2019)
total29 = sum(df$X2029)
total39 = sum(df$X2039)
df$AGE = as.numeric(df$AGE) + 1
# merge ages 85+
years = 2014:2039
for (y in years)
{
col = paste0("X",y)
df[df$AGE==86, col] = df[df$AGE==86, col] + df[df$AGE==87, col] + df[df$AGE==88, col] +
df[df$AGE==89, col] + df[df$AGE==90, col] + df[df$AGE==91, col]
}
df = df[df$AGE<87,]
# check total is preserved
stopifnot(sum(df$X2014) == total14)
stopifnot(sum(df$X2019) == total19)
stopifnot(sum(df$X2029) == total29)
stopifnot(sum(df$X2039) == total39)
return(df)
}
# TODO Wales/Scotland data?
setwd("~/dev/nismod/microsimulation/")
cache_dir = "./cache/"
snpp14m = read.csv(paste0(cache_dir, "2014 SNPP Population males.csv"), stringsAsFactors = F)
snpp14f = read.csv(paste0(cache_dir, "2014 SNPP Population females.csv"), stringsAsFactors = F)
# remove stuff not required
snpp14m$AREA_NAME=NULL
snpp14f$AREA_NAME=NULL
snpp14m$COMPONENT=NULL
snpp14f$COMPONENT=NULL
# use census sex enumeration
snpp14m$SEX=rep(1, nrow(snpp14m))
snpp14f$SEX=rep(2, nrow(snpp14m))
snpp14 = rbind(snpp14m, snpp14f)
# AGE 0-90+ -> 0(1)-85+(86) (to match census)
#snpp14$AGE_GROUP = snpp14$AGE_GROUP + 1
snpp14 = adjustSnppAge(snpp14)
# make col names consistency with MYE/census
colnames(snpp14)[colnames(snpp14) == "AREA_CODE"] = "GEOGRAPHY_CODE"
colnames(snpp14)[colnames(snpp14) == "SEX"] = "GENDER"
write.csv(snpp14, paste0(cache_dir, "snpp2014.csv"), row.names=F)
|
filename = "exdata_plotting1.zip"
if (!file.exists(filename)) {
retval = download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile = filename,
method = "curl")
}
## Reading the data from the contents of the zipped file
df.power = read.csv(unz(filename, "household_power_consumption.txt"), header=T,
sep=";", stringsAsFactors=F, na.strings="?",
colClasses=c("character", "character", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric"))
## Formatting the date and time and subseting the data only on 2007-02-01 and 2007-02-02
df.power$timestamp = strptime(paste(df.power$Date, df.power$Time),
format="%d/%m/%Y %H:%M:%S", tz="UTC")
startDate = strptime("01/02/2007 00:00:00", format="%d/%m/%Y %H:%M:%S", tz="UTC")
endDate = strptime("02/02/2007 23:59:59", format="%d/%m/%Y %H:%M:%S", tz="UTC")
df.power = df.power[df.power$timestamp >= startDate & df.power$timestamp <= endDate, ]
## Creating the plot
png(filename="plot2.png", width=480, height=480)
plot(df.power$timestamp, df.power$Global_active_power, type="l", xlab="",
ylab="Global Active Power (kilowatts)")
dev.off() | /plot2.R | no_license | SebRosengren/ExData_Plotting1 | R | false | false | 1,356 | r | filename = "exdata_plotting1.zip"
if (!file.exists(filename)) {
retval = download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile = filename,
method = "curl")
}
## Reading the data from the contents of the zipped file
df.power = read.csv(unz(filename, "household_power_consumption.txt"), header=T,
sep=";", stringsAsFactors=F, na.strings="?",
colClasses=c("character", "character", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric"))
## Formatting the date and time and subseting the data only on 2007-02-01 and 2007-02-02
df.power$timestamp = strptime(paste(df.power$Date, df.power$Time),
format="%d/%m/%Y %H:%M:%S", tz="UTC")
startDate = strptime("01/02/2007 00:00:00", format="%d/%m/%Y %H:%M:%S", tz="UTC")
endDate = strptime("02/02/2007 23:59:59", format="%d/%m/%Y %H:%M:%S", tz="UTC")
df.power = df.power[df.power$timestamp >= startDate & df.power$timestamp <= endDate, ]
## Creating the plot
png(filename="plot2.png", width=480, height=480)
plot(df.power$timestamp, df.power$Global_active_power, type="l", xlab="",
ylab="Global Active Power (kilowatts)")
dev.off() |
library(xlsx)
mdacc_clinical <- read.xlsx("data/HGSC_DB.xlsx",
sheetName = 'CLINICAL_NGS_CONC',
stringsAsFactors=FALSE)
mdacc_somatic <- read.xlsx("data/HGSC_DB.xlsx",
sheetName = 'SOMATIC MUTATIONS',
stringsAsFactors=FALSE)
mdacc_germline <- read.xlsx("data/HGSC_DB.xlsx",
sheetName = 'GERMLINE MUTATIONS',
stringsAsFactors=FALSE)
mdacc_myc <- read.xlsx("data/HGSC_DB.xlsx",
sheetName = 'MYC COPY NUM_FINAL',
stringsAsFactors=FALSE)
| /get_data_mdacc.R | no_license | pathology-sandbox/myc_brca_study | R | false | false | 595 | r | library(xlsx)
mdacc_clinical <- read.xlsx("data/HGSC_DB.xlsx",
sheetName = 'CLINICAL_NGS_CONC',
stringsAsFactors=FALSE)
mdacc_somatic <- read.xlsx("data/HGSC_DB.xlsx",
sheetName = 'SOMATIC MUTATIONS',
stringsAsFactors=FALSE)
mdacc_germline <- read.xlsx("data/HGSC_DB.xlsx",
sheetName = 'GERMLINE MUTATIONS',
stringsAsFactors=FALSE)
mdacc_myc <- read.xlsx("data/HGSC_DB.xlsx",
sheetName = 'MYC COPY NUM_FINAL',
stringsAsFactors=FALSE)
|
# This is our initial data; one entry per "sample"
# (in this toy example, a "sample" is just a sentence, but
# it could be an entire document).
library(keras)
samples <- c("The cat sat on the mat.", "The dog ate my homework.")
# First, build an index of all tokens in the data.
token_index <- list()
for (sample in samples)
# Tokenizes the samples via the strsplit function. In real life, you'd also
# strip punctuation and special characters from the samples.
for (word in strsplit(sample, " ")[[1]])
if (!word %in% names(token_index))
# Assigns a unique index to each unique word. Note that you don't
# attribute index 1 to anything.
token_index[[word]] <- length(token_index) + 2
# Vectorizes the samples. You'll only consider the first max_length
# words in each sample.
max_length <- 10
# This is where you store the results.
results <- array(0, dim = c(length(samples),
max_length,
max(as.integer(token_index))))
for (i in 1:length(samples)) {
sample <- samples[[i]]
words <- head(strsplit(sample, " ")[[1]], n = max_length)
for (j in 1:length(words)) {
index <- token_index[[words[[j]]]]
results[[i, j, index]] <- 1
}
}
# Character level one-hot encoding (toy example):
samples <- c("The cat sat on the mat.", "The dog ate my homework.")
ascii_tokens <- c("", sapply(as.raw(c(32:126)), rawToChar))
token_index <- c(1:(length(ascii_tokens)))
names(token_index) <- ascii_tokens
max_length <- 50
results <- array(0, dim = c(length(samples), max_length, length(token_index)))
for (i in 1:length(samples)) {
sample <- samples[[i]]
characters <- strsplit(sample, "")[[1]]
for (j in 1:length(characters)) {
character <- characters[[j]]
results[i, j, token_index[[character]]] <- 1
}
}
# Using Keras for word-level one-hot encoding:
library(keras)
samples <- c("The cat sat on the mat.", "The dog ate my homework.")
# Creates a tokenizer, configured to only take into account the 1,000
# most common words, then builds the word index.
tokenizer <- text_tokenizer(num_words = 1000) %>%
fit_text_tokenizer(samples)
# Turns strings into lists of integer indices
sequences <- texts_to_sequences(tokenizer, samples)
# You could also directly get the one-hot binary representations. Vectorization
# modes other than one-hot encoding are supported by this tokenizer.
one_hot_results <- texts_to_matrix(tokenizer, samples, mode = "binary")
# How you can recover the word index that was computed
word_index <- tokenizer$word_index
cat("Found", length(word_index), "unique tokens.\n")
# Word-level one-hot encoding with hashing trick (toy example):
library(hashFunction)
samples <- c("The cat sat on the mat.", "The dog ate my homework.")
# We will store our words as vectors of size 1000.
# Note that if you have close to 1000 words (or more)
# you will start seeing many hash collisions, which
# will decrease the accuracy of this encoding method.
dimensionality <- 1000
max_length <- 10
results <- array(0, dim = c(length(samples), max_length, dimensionality))
for (i in 1:length(samples)) {
sample <- samples[[i]]
words <- head(strsplit(sample, " ")[[1]], n = max_length)
for (j in 1:length(words)) {
# Hash the word into a "random" integer index
# that is between 0 and 1,000
index <- abs(spooky.32(words[[i]])) %% dimensionality
results[[i, j, index]] <- 1
}
}
| /DeepLearningR/Ch06_one_hot_encoding_words.R | no_license | PyRPy/Keras_R | R | false | false | 3,505 | r | # This is our initial data; one entry per "sample"
# (in this toy example, a "sample" is just a sentence, but
# it could be an entire document).
library(keras)
samples <- c("The cat sat on the mat.", "The dog ate my homework.")
# First, build an index of all tokens in the data.
token_index <- list()
for (sample in samples)
# Tokenizes the samples via the strsplit function. In real life, you'd also
# strip punctuation and special characters from the samples.
for (word in strsplit(sample, " ")[[1]])
if (!word %in% names(token_index))
# Assigns a unique index to each unique word. Note that you don't
# attribute index 1 to anything.
token_index[[word]] <- length(token_index) + 2
# Vectorizes the samples. You'll only consider the first max_length
# words in each sample.
max_length <- 10
# This is where you store the results.
results <- array(0, dim = c(length(samples),
max_length,
max(as.integer(token_index))))
for (i in 1:length(samples)) {
sample <- samples[[i]]
words <- head(strsplit(sample, " ")[[1]], n = max_length)
for (j in 1:length(words)) {
index <- token_index[[words[[j]]]]
results[[i, j, index]] <- 1
}
}
# Character level one-hot encoding (toy example):
samples <- c("The cat sat on the mat.", "The dog ate my homework.")
ascii_tokens <- c("", sapply(as.raw(c(32:126)), rawToChar))
token_index <- c(1:(length(ascii_tokens)))
names(token_index) <- ascii_tokens
max_length <- 50
results <- array(0, dim = c(length(samples), max_length, length(token_index)))
for (i in 1:length(samples)) {
sample <- samples[[i]]
characters <- strsplit(sample, "")[[1]]
for (j in 1:length(characters)) {
character <- characters[[j]]
results[i, j, token_index[[character]]] <- 1
}
}
# Using Keras for word-level one-hot encoding:
library(keras)
samples <- c("The cat sat on the mat.", "The dog ate my homework.")
# Creates a tokenizer, configured to only take into account the 1,000
# most common words, then builds the word index.
tokenizer <- text_tokenizer(num_words = 1000) %>%
fit_text_tokenizer(samples)
# Turns strings into lists of integer indices
sequences <- texts_to_sequences(tokenizer, samples)
# You could also directly get the one-hot binary representations. Vectorization
# modes other than one-hot encoding are supported by this tokenizer.
one_hot_results <- texts_to_matrix(tokenizer, samples, mode = "binary")
# How you can recover the word index that was computed
word_index <- tokenizer$word_index
cat("Found", length(word_index), "unique tokens.\n")
# Word-level one-hot encoding with hashing trick (toy example):
library(hashFunction)
samples <- c("The cat sat on the mat.", "The dog ate my homework.")
# We will store our words as vectors of size 1000.
# Note that if you have close to 1000 words (or more)
# you will start seeing many hash collisions, which
# will decrease the accuracy of this encoding method.
dimensionality <- 1000
max_length <- 10
results <- array(0, dim = c(length(samples), max_length, dimensionality))
for (i in 1:length(samples)) {
sample <- samples[[i]]
words <- head(strsplit(sample, " ")[[1]], n = max_length)
for (j in 1:length(words)) {
# Hash the word into a "random" integer index
# that is between 0 and 1,000
index <- abs(spooky.32(words[[i]])) %% dimensionality
results[[i, j, index]] <- 1
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BETS.grnn.train.R
\name{BETS.grnn.train}
\alias{BETS.grnn.train}
\title{Train a General Regression Neural Network}
\usage{
BETS.grnn.train(train.set, sigma, step = 0.1, select = TRUE, names = NA)
}
\arguments{
\item{train.set}{A \code{ts list} (a list of \code{ts} objects). The first element must be the dependent variable. The other elements, the regressors.}
\item{sigma}{A \code{numeric} or a \code{numeric vector}. The sigma parameter, that is, the standard deviation of the activation functions (gaussians) of the pattern layer. Can be either a fixed value or a range (a vector containing the minimum and the maximum values).}
\item{step}{A \code{numeric} value. If \code{sigma} is a range, the user must provide a step value to vary sigma. The function is going to select the best sigma based on MAPE.}
\item{select}{A \code{boolean}. Must be set to \code{FALSE} if the regressors should not be chosen. The default is \code{TRUE}.}
\item{names}{A \code{character vector}. Optional. The names of the regressors. If not provided, indexes will be used and reported.}
}
\value{
A \code{list} of result objects, each representing a network. These objects are ordered by MAPE (the 20 best MAPEs) and its fields are:
\itemize{
\item{\code{accuracy}: A \code{numeric} value. Accuracy measure between the fitted and the actual series values. By default, the MAPE. In future versions, it will be possible to change it.}
\item{\code{fitted}: The fitted values, that is, one step ahead predicitions calculated by the trained net.}
\item{\code{net}: An object returned by the \link[grnn]{grnn} function. Represents a trained net. }
\item{\code{sigma}: A \code{numeric}. The sigma that was chosen, either by the user or by the function itself (in case \code{select} was set to \code{TRUE})}
\item{\code{regressors}: A \code{character vector}. Regressors that were chosen, either by the user or by the fuction itself (in case \code{select} was set to \code{TRUE})}
\item{\code{sigma.accuracy}: A \code{data.frame}. Sigma versus accuracy value of the corresponding trained network. Those networks were trained using the best set of regressors.}
\item{\code{residuals}: A \code{numeric vector}. Fitted values subtracted from the actual values.}
}
BETS.grnn.train also returns a diagnostic of training rounds and a \code{sigma} versus \code{accuracy} plot.
}
\description{
Creates a set of probabilistic neural networks as proposed by \href{http://www.inf.ufrgs.br/~engel/data/media/file/cmp121/GRNN.pdf}{Specht [1991]}. The user provides a set of regressors and the function chooses which subset is the best, based on an accuracy measure (by default, the MAPE) between fited and actual values. These networks have only one parameter, the \code{sigma}, which is the standard deviation of each activation function (gaussian) of the pattern layer. Sigma can also be automatically chosen. This function builds on \link[grnn]{grnn-package}.
}
\author{
Talitha Speranza \email{talitha.speranza@fgv.br}
}
| /man/BETS.grnn.train.Rd | no_license | analisemacro/BETS | R | false | true | 3,073 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BETS.grnn.train.R
\name{BETS.grnn.train}
\alias{BETS.grnn.train}
\title{Train a General Regression Neural Network}
\usage{
BETS.grnn.train(train.set, sigma, step = 0.1, select = TRUE, names = NA)
}
\arguments{
\item{train.set}{A \code{ts list} (a list of \code{ts} objects). The first element must be the dependent variable. The other elements, the regressors.}
\item{sigma}{A \code{numeric} or a \code{numeric vector}. The sigma parameter, that is, the standard deviation of the activation functions (gaussians) of the pattern layer. Can be either a fixed value or a range (a vector containing the minimum and the maximum values).}
\item{step}{A \code{numeric} value. If \code{sigma} is a range, the user must provide a step value to vary sigma. The function is going to select the best sigma based on MAPE.}
\item{select}{A \code{boolean}. Must be set to \code{FALSE} if the regressors should not be chosen. The default is \code{TRUE}.}
\item{names}{A \code{character vector}. Optional. The names of the regressors. If not provided, indexes will be used and reported.}
}
\value{
A \code{list} of result objects, each representing a network. These objects are ordered by MAPE (the 20 best MAPEs) and its fields are:
\itemize{
\item{\code{accuracy}: A \code{numeric} value. Accuracy measure between the fitted and the actual series values. By default, the MAPE. In future versions, it will be possible to change it.}
\item{\code{fitted}: The fitted values, that is, one step ahead predicitions calculated by the trained net.}
\item{\code{net}: An object returned by the \link[grnn]{grnn} function. Represents a trained net. }
\item{\code{sigma}: A \code{numeric}. The sigma that was chosen, either by the user or by the function itself (in case \code{select} was set to \code{TRUE})}
\item{\code{regressors}: A \code{character vector}. Regressors that were chosen, either by the user or by the fuction itself (in case \code{select} was set to \code{TRUE})}
\item{\code{sigma.accuracy}: A \code{data.frame}. Sigma versus accuracy value of the corresponding trained network. Those networks were trained using the best set of regressors.}
\item{\code{residuals}: A \code{numeric vector}. Fitted values subtracted from the actual values.}
}
BETS.grnn.train also returns a diagnostic of training rounds and a \code{sigma} versus \code{accuracy} plot.
}
\description{
Creates a set of probabilistic neural networks as proposed by \href{http://www.inf.ufrgs.br/~engel/data/media/file/cmp121/GRNN.pdf}{Specht [1991]}. The user provides a set of regressors and the function chooses which subset is the best, based on an accuracy measure (by default, the MAPE) between fited and actual values. These networks have only one parameter, the \code{sigma}, which is the standard deviation of each activation function (gaussian) of the pattern layer. Sigma can also be automatically chosen. This function builds on \link[grnn]{grnn-package}.
}
\author{
Talitha Speranza \email{talitha.speranza@fgv.br}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/index.R
\name{IndexDiagonal}
\alias{IndexDiagonal}
\title{\code{IndexDiagonal}}
\usage{
IndexDiagonal(x, off = FALSE, by.row = TRUE)
}
\arguments{
\item{x}{The data to be indexed.}
\item{off}{TRUE if indexing by the off diagonal.}
\item{by.row}{Indexes by row (versus by column).
those to the right of them, to NA.}
}
\description{
Indexes relative to the diagonal.
}
| /man/IndexDiagonal.Rd | no_license | Displayr/flipStartup | R | false | true | 448 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/index.R
\name{IndexDiagonal}
\alias{IndexDiagonal}
\title{\code{IndexDiagonal}}
\usage{
IndexDiagonal(x, off = FALSE, by.row = TRUE)
}
\arguments{
\item{x}{The data to be indexed.}
\item{off}{TRUE if indexing by the off diagonal.}
\item{by.row}{Indexes by row (versus by column).
those to the right of them, to NA.}
}
\description{
Indexes relative to the diagonal.
}
|
#' @title Show Method
#' @name Show-Methods
#' @docType methods
#'
#' @aliases show,Antitrust-method
#' @param object An instance of the Antitrust class.
#' @description Displays the percentage change in prices due to the merger.
#'
#' @include SummaryMethods.R
#' @keywords methods
NULL
## print method
#'@rdname Show-Methods
#'@export
setMethod(
f= "show",
signature= "Antitrust",
definition=function(object){
res <- summary(object,market=TRUE)
return(NULL)
}
)
#'@rdname Show-Methods
#'@export
setMethod(
f= "show",
signature= "VertBargBertLogit",
definition=function(object){
res <- summary(object,market=TRUE)
return(NULL)
}
)
| /R/ShowMethods.R | no_license | luciu5/antitrust | R | false | false | 714 | r | #' @title Show Method
#' @name Show-Methods
#' @docType methods
#'
#' @aliases show,Antitrust-method
#' @param object An instance of the Antitrust class.
#' @description Displays the percentage change in prices due to the merger.
#'
#' @include SummaryMethods.R
#' @keywords methods
NULL
## print method
#'@rdname Show-Methods
#'@export
setMethod(
f= "show",
signature= "Antitrust",
definition=function(object){
res <- summary(object,market=TRUE)
return(NULL)
}
)
#'@rdname Show-Methods
#'@export
setMethod(
f= "show",
signature= "VertBargBertLogit",
definition=function(object){
res <- summary(object,market=TRUE)
return(NULL)
}
)
|
#Helper function to return: The best performing hospital in a state, based on a certain outcome criteria
rankall <- function(outcome,rankValue = "best")
{
## Read the outcome data
print("Read data section")
setwd(directoryDataPath)
dataframeofinterest <- loadCSVfile(directoryDataPath,"outcome-of-care-measures.csv")
# similar logic is applied for outcome
if(outcome == "heart attack" | outcome == "heart failure" | outcome == "pneumonia")
{
## Do nothing since vaid outcome is supplied
}
else
{
stop("invalid outcome")
}
## REturn hospital name in that state with lowest 30 day death rate
#Change Data formats
dataframeofinterest$Hospital.Name <- as.character(dataframeofinterest$Hospital.Name)
maskheatattackNAS <- dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack == "Not Available"
maskheartfailureNAS <- dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure == "Not Available"
maskpneumoniaNAS <- dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia == "Not Available"
#Assign NAS to the Not availables
dataframeofinterest[maskheatattackNAS,"Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack"] <- NA
dataframeofinterest[maskheartfailureNAS,"Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure"] <- NA
dataframeofinterest[maskpneumoniaNAS,"Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia"] <- NA
# str(dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack)
# str(dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure)
# str(dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia)
# str(dataframeofinterest$State)
# str(dataframeofinterest$Hospital.Name)
#Make changes to data format, cleanup factor to nums etc etc.
dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack <- as.numeric(as.character(dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack))
dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure <- as.numeric(as.character(dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure))
dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia <- as.numeric(as.character(dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia))
str(dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack)
str(dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure)
str(dataframeofinterest$dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia)
str(dataframeofinterest$State)
str(dataframeofinterest$Hospital.Name)
#filter out based on state and make a new dataframe reference
newdataframe <- dataframeofinterest[,c("Hospital.Name","State","Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack","Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure","Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia")]
# cleanup the names to shorten them for better visualization as well as ease of typing in the future
colnames(newdataframe)[3] <- "HA30"
colnames(newdataframe)[4] <- "HF30"
colnames(newdataframe)[5] <- "PN30"
# verify if name changes are success full
#str(newdataframe)
#print out to see how stuff looks now
#head(newdataframe, n = 2)
#Start the real analysis, since our data is already filtered by state , we just need to find the min value of mortality rate
if(outcome == "heart attack")
{
#Split the data based on the states
newSplitData <- split(newdataframe,newdataframe$State)
#Verify if the split worked
orderedLList <- lapply(newSplitData, function(x)
{
vecresult <- x[order(x$HA30,x$Hospital.Name),]
vecRank <- rank(vecresult$HA30,ties.method = "first")
vecresult$Rank <- vecRank
vecresultMask <- !is.na(vecresult$HA30)
vecresult <- vecresult[vecresultMask,]
if(rankValue == "best")
{
head(vecresult,1)
}
else if(rankValue == "worst")
{
tail(vecresult,1)
}
else if(rankValue <= nrow(vecresult))
{
vecresult[rankValue,]
}
else
{
return(NA)
}
})
}
else if(outcome == "heart failure")
{
#Split the data based on the states
newSplitData <- split(newdataframe,newdataframe$State)
#Verify if the split worked
orderedLList <- lapply(newSplitData, function(x)
{
vecresult <- x[order(x$HF30,x$Hospital.Name),]
vecRank <- rank(vecresult$HF30,ties.method = "first")
vecresult$Rank <- vecRank
vecresultMask <- !is.na(vecresult$HF30)
vecresult <- vecresult[vecresultMask,]
if(rankValue == "best")
{
head(vecresult,1)
}
else if(rankValue == "worst")
{
tail(vecresult,1)
}
else if(rankValue <= nrow(vecresult))
{
vecresult[rankValue,]
}
else
{
return(NA)
}
})
}
else
{
#Split the data based on the states
newSplitData <- split(newdataframe,newdataframe$State)
#Verify if the split worked
orderedLList <- lapply(newSplitData, function(x)
{
vecresult <- x[order(x$PN30,x$Hospital.Name),]
vecRank <- rank(vecresult$PN30,ties.method = "first")
vecresult$Rank <- vecRank
vecresultMask <- !is.na(vecresult$PN30)
vecresult <- vecresult[vecresultMask,]
if(rankValue == "best")
{
head(vecresult,1)
}
else if(rankValue == "worst")
{
tail(vecresult,1)
}
else if(rankValue <= nrow(vecresult))
{
vecresult[rankValue,]
}
else
{
return(NA)
}
})
}
#
# #Sort by alphabetical order and send the top most result
# head(result)
} | /Scripts/rankall.R | no_license | Whitchurch/datasciencecoursera | R | false | false | 6,070 | r | #Helper function to return: The best performing hospital in a state, based on a certain outcome criteria
rankall <- function(outcome,rankValue = "best")
{
## Read the outcome data
print("Read data section")
setwd(directoryDataPath)
dataframeofinterest <- loadCSVfile(directoryDataPath,"outcome-of-care-measures.csv")
# similar logic is applied for outcome
if(outcome == "heart attack" | outcome == "heart failure" | outcome == "pneumonia")
{
## Do nothing since vaid outcome is supplied
}
else
{
stop("invalid outcome")
}
## REturn hospital name in that state with lowest 30 day death rate
#Change Data formats
dataframeofinterest$Hospital.Name <- as.character(dataframeofinterest$Hospital.Name)
maskheatattackNAS <- dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack == "Not Available"
maskheartfailureNAS <- dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure == "Not Available"
maskpneumoniaNAS <- dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia == "Not Available"
#Assign NAS to the Not availables
dataframeofinterest[maskheatattackNAS,"Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack"] <- NA
dataframeofinterest[maskheartfailureNAS,"Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure"] <- NA
dataframeofinterest[maskpneumoniaNAS,"Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia"] <- NA
# str(dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack)
# str(dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure)
# str(dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia)
# str(dataframeofinterest$State)
# str(dataframeofinterest$Hospital.Name)
#Make changes to data format, cleanup factor to nums etc etc.
dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack <- as.numeric(as.character(dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack))
dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure <- as.numeric(as.character(dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure))
dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia <- as.numeric(as.character(dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia))
str(dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack)
str(dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure)
str(dataframeofinterest$dataframeofinterest$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia)
str(dataframeofinterest$State)
str(dataframeofinterest$Hospital.Name)
#filter out based on state and make a new dataframe reference
newdataframe <- dataframeofinterest[,c("Hospital.Name","State","Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack","Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure","Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia")]
# cleanup the names to shorten them for better visualization as well as ease of typing in the future
colnames(newdataframe)[3] <- "HA30"
colnames(newdataframe)[4] <- "HF30"
colnames(newdataframe)[5] <- "PN30"
# verify if name changes are success full
#str(newdataframe)
#print out to see how stuff looks now
#head(newdataframe, n = 2)
#Start the real analysis, since our data is already filtered by state , we just need to find the min value of mortality rate
if(outcome == "heart attack")
{
#Split the data based on the states
newSplitData <- split(newdataframe,newdataframe$State)
#Verify if the split worked
orderedLList <- lapply(newSplitData, function(x)
{
vecresult <- x[order(x$HA30,x$Hospital.Name),]
vecRank <- rank(vecresult$HA30,ties.method = "first")
vecresult$Rank <- vecRank
vecresultMask <- !is.na(vecresult$HA30)
vecresult <- vecresult[vecresultMask,]
if(rankValue == "best")
{
head(vecresult,1)
}
else if(rankValue == "worst")
{
tail(vecresult,1)
}
else if(rankValue <= nrow(vecresult))
{
vecresult[rankValue,]
}
else
{
return(NA)
}
})
}
else if(outcome == "heart failure")
{
#Split the data based on the states
newSplitData <- split(newdataframe,newdataframe$State)
#Verify if the split worked
orderedLList <- lapply(newSplitData, function(x)
{
vecresult <- x[order(x$HF30,x$Hospital.Name),]
vecRank <- rank(vecresult$HF30,ties.method = "first")
vecresult$Rank <- vecRank
vecresultMask <- !is.na(vecresult$HF30)
vecresult <- vecresult[vecresultMask,]
if(rankValue == "best")
{
head(vecresult,1)
}
else if(rankValue == "worst")
{
tail(vecresult,1)
}
else if(rankValue <= nrow(vecresult))
{
vecresult[rankValue,]
}
else
{
return(NA)
}
})
}
else
{
#Split the data based on the states
newSplitData <- split(newdataframe,newdataframe$State)
#Verify if the split worked
orderedLList <- lapply(newSplitData, function(x)
{
vecresult <- x[order(x$PN30,x$Hospital.Name),]
vecRank <- rank(vecresult$PN30,ties.method = "first")
vecresult$Rank <- vecRank
vecresultMask <- !is.na(vecresult$PN30)
vecresult <- vecresult[vecresultMask,]
if(rankValue == "best")
{
head(vecresult,1)
}
else if(rankValue == "worst")
{
tail(vecresult,1)
}
else if(rankValue <= nrow(vecresult))
{
vecresult[rankValue,]
}
else
{
return(NA)
}
})
}
#
# #Sort by alphabetical order and send the top most result
# head(result)
} |
# Process Picarro data for the Beaver Creek experiment
# This script reads all available Picarro outputs in `data/picarro/`,
# concatenating and writing to an `outputs/rawdata.csv.gz` file.
# Ben Bond-Lamberty and Aditi Sengupta June 2018
source("0-functions.R")
SCRIPTNAME <- "2-data.R"
PROBLEM <- FALSE
PICARRO_DATA_DIR <- file.path(DATA_DIR, "picarro/")
# -----------------------------------------------------------------------------
# read a single output file, returning data frame
read_outputfile <- function(fqfn) {
printlog("Reading", fqfn)
stopifnot(file.exists(fqfn))
f <- fqfn
if(grepl(".gz$", fqfn)) {
f <- gzfile(fqfn)
} else if(grepl(".zip$", fqfn)) {
f <- unz(fqfn)
}
d <- read.table(f, header = TRUE)
print_dims(d)
# Add ancillary data
d$file <- basename(fqfn)
# d$dir <- dirname(fqfn)
return(d)
} # read_outputfile
# -----------------------------------------------------------------------------
# scan a directory and process all files in it, returning tempfile names
process_directory <- function(input_path) {
filelist <- list.files(path = input_path,
pattern = "dat$|dat.gz$|dat.zip$",
recursive = TRUE,
full.names = TRUE)
filedata <- list()
printlog("Found", length(filedata), "files")
for(f in filelist) {
printlog("Reading", f)
tibble::as_tibble(read.table(f, header = TRUE, stringsAsFactors = FALSE)) %>%
# select only the columns we need, and discard any fractional valve numbers
select(DATE, TIME, ALARM_STATUS, MPVPosition, CH4_dry, CO2_dry, h2o_reported) %>%
filter(MPVPosition == floor(MPVPosition)) ->
filedata[[basename(f)]]
}
filedata %>%
bind_rows(.id = "filename")
}
# ==============================================================================
# Main
openlog(file.path(outputdir(), paste0(SCRIPTNAME, ".log.txt")), sink = TRUE)
printlog("Welcome to", SCRIPTNAME)
printlog("Data directory is", PICARRO_DATA_DIR)
rawdata <- process_directory(PICARRO_DATA_DIR)
printlog("Writing output file...")
save_data(rawdata, fn = RAWDATA_FILE, scriptfolder = FALSE)
printlog("All done with", SCRIPTNAME)
closelog()
if(PROBLEM) warning("There was a problem - see log")
| /2-data.R | no_license | AditiGit/BeaverCreek_Test | R | false | false | 2,292 | r | # Process Picarro data for the Beaver Creek experiment
# This script reads all available Picarro outputs in `data/picarro/`,
# concatenating and writing to an `outputs/rawdata.csv.gz` file.
# Ben Bond-Lamberty and Aditi Sengupta June 2018
source("0-functions.R")
SCRIPTNAME <- "2-data.R"
PROBLEM <- FALSE
PICARRO_DATA_DIR <- file.path(DATA_DIR, "picarro/")
# -----------------------------------------------------------------------------
# read a single output file, returning data frame
read_outputfile <- function(fqfn) {
printlog("Reading", fqfn)
stopifnot(file.exists(fqfn))
f <- fqfn
if(grepl(".gz$", fqfn)) {
f <- gzfile(fqfn)
} else if(grepl(".zip$", fqfn)) {
f <- unz(fqfn)
}
d <- read.table(f, header = TRUE)
print_dims(d)
# Add ancillary data
d$file <- basename(fqfn)
# d$dir <- dirname(fqfn)
return(d)
} # read_outputfile
# -----------------------------------------------------------------------------
# scan a directory and process all files in it, returning tempfile names
process_directory <- function(input_path) {
filelist <- list.files(path = input_path,
pattern = "dat$|dat.gz$|dat.zip$",
recursive = TRUE,
full.names = TRUE)
filedata <- list()
printlog("Found", length(filedata), "files")
for(f in filelist) {
printlog("Reading", f)
tibble::as_tibble(read.table(f, header = TRUE, stringsAsFactors = FALSE)) %>%
# select only the columns we need, and discard any fractional valve numbers
select(DATE, TIME, ALARM_STATUS, MPVPosition, CH4_dry, CO2_dry, h2o_reported) %>%
filter(MPVPosition == floor(MPVPosition)) ->
filedata[[basename(f)]]
}
filedata %>%
bind_rows(.id = "filename")
}
# ==============================================================================
# Main
openlog(file.path(outputdir(), paste0(SCRIPTNAME, ".log.txt")), sink = TRUE)
printlog("Welcome to", SCRIPTNAME)
printlog("Data directory is", PICARRO_DATA_DIR)
rawdata <- process_directory(PICARRO_DATA_DIR)
printlog("Writing output file...")
save_data(rawdata, fn = RAWDATA_FILE, scriptfolder = FALSE)
printlog("All done with", SCRIPTNAME)
closelog()
if(PROBLEM) warning("There was a problem - see log")
|
#
#
#
# hex_for <- readRDS("../data/hex_municipio/hex_for.rds")
#
# cnes <- read_csv("../data-raw/hospitais/cnesnone_2018.csv") %>%
# st_as_sf(coords = c("long", "lat"), crs = 4326)
#
# escolas <- read_csv("../data/censo_escolar/censo_escolar_2015.csv") %>%
# filter(!is.na(lon)) %>%
# filter(municipio == "Fortaleza") %>%
# st_as_sf(coords = c("lon", "lat"), crs = 4326)
#
# pop <- read_rds("../data/grade_municipio/grade_for.rds") %>%
# select(id_grade, POP) %>%
# st_centroid()
#
#
# hex_for_temp <- hex_for %>%
# st_join(pop) %>%
# group_by(id_hex) %>%
# summarise(pop_total = sum(POP)) %>%
# ungroup() %>%
# st_join(cnes) %>%
# group_by(id_hex, pop_total) %>%
# summarise(saude_total = n()) %>%
# ungroup() %>%
# st_join(escolas) %>%
# group_by(id_hex, pop_total, saude_total) %>%
# summarise(escolas_total = n())
#
#
# mapview(hex_for_temp, zcol = "pop_total")
# mapview(hex_for_temp, zcol = "saude_total")
# mapview(hex_for_temp, zcol = "escolas_total")
# FUNCAO!!!!!!!!!!!!!!!! --------------------------------------------------
# munis <- "for"
agrupar_variaveis <- function(munis) {
# ABRIR ARQUIVOS COM AS OPORTUNIDADES -------------------------------------
# saude
cnes <- read_csv("../data-raw/hospitais/cnesnone_2018.csv") %>%
st_as_sf(coords = c("long", "lat"), crs = 4326)
# educacao
escolas <- read_csv("../data/censo_escolar/censo_escolar_2015.csv") %>%
dplyr::filter(!is.na(lat)) %>%
# mutate(municipio == tolower(municipio)) %>%
# filter(municipio == muni) %>%
st_as_sf(coords = c("lon", "lat"), crs = 4326)
# empregos, por enquanto para 2015
# deu problemas no fread, entao tentando com o readr
# empregos <- fread("../data-raw/rais/rais_2015_rafa_franco.csv", fill = TRUE) %>%
empregos <- read_rds("../data/rais/rais_2015.rds")
# Criar tabela de lookup
cidades_lookup <- tibble(municipio = c("for", "rec", "bel", "rio", "por", "cur", "ter"),
cidade_uf = c("fortaleza, ce", "recife, pe", "belo horizonte, mg", "rio de janeiro, rj",
"porto alegre, rs", "curitiba, pr", "teresina, pi"))
# FUNCAO PARA REALIZAR EM CADA MUNICIPIO ----------------------------------
por_municipio <- function(munis) {
dir <- dir("../data/hex_municipio/", pattern = munis)
res <- str_extract(dir, "\\d+")
dir_muni <- paste0("../data/hex_municipio/hex_", munis, "_", res, ".rds")
dir_grade <- paste0("../data/grade_municipio_com_renda/grade_renda_", munis, ".rds")
pop <- read_rds(dir_grade) %>%
dplyr::select(id_grade, pop_total, renda) %>%
mutate(renda = as.numeric(renda)) %>%
st_centroid()
# Extrair o nome da cidade de acordo com a base da RAIS
# cidade_ufs <- filter(cidades_lookup, municipio == munis) %>% .$cidade_uf
# setDT(empregos)
#
# empregos_v1 <- empregos[cidade_uf == cidade_ufs]
# muni_res <- dir_muni[1]
# FUNCAO PARA REALIZAR PARA TODAS AS RESOLUCOES ------------------------------
seila <- function(muni_res, cidade_uf) {
dir_muni <- muni_res
res <- str_extract(dir_muni, "\\d+")
hex_muni <- readRDS(dir_muni)
hex_muni_fim <- hex_muni %>%
# Agrupar populacao e renda
st_join(pop) %>%
group_by(id_hex) %>%
summarise(pop_total = sum(pop_total), renda_total = sum(renda)) %>%
ungroup() %>%
# Agrupar empregos (agora somando a quantidade de vinculos!)
st_join(empregos) %>%
# mutate(indice = ifelse(is.na(id_estab), 0, 1)) %>%
group_by(id_hex, pop_total, renda_total) %>%
summarise(empregos_total = sum(qt_vinc_ativos, na.rm = TRUE)) %>%
ungroup() %>%
mutate(empregos_total = ifelse(is.na(empregos_total), 0, empregos_total)) %>%
# agrupar saude
st_join(cnes) %>%
mutate(indice = ifelse(is.na(co_cnes), 0, 1)) %>%
group_by(id_hex, pop_total, renda_total, empregos_total) %>%
summarise(saude_total = sum(indice)) %>%
ungroup() %>%
# agrupar educacao
st_join(escolas) %>%
mutate(indice = ifelse(is.na(cod_escola), 0, 1)) %>%
group_by(id_hex, pop_total, renda_total, empregos_total, saude_total) %>%
summarise(escolas_total = sum(indice)) %>%
ungroup()
dir_output <- sprintf("../data/hex_agregados/hex_agregado_%s_%s.rds", munis, res)
write_rds(hex_muni_fim, dir_output)
}
# aplicar para cada resolucao
walk(dir_muni, seila)
}
# aplicar para cada municipio
map(munis, por_municipio)
}
# agrupar_variaveis(c("for", "rec", "bel", "rio", "por", "cur", "ter"))
# agrupar_variaveis("sao")
# agrupar_variaveis("cur")
# agrupar_variaveis("por")
| /R/temp/2-agrupar_variaveis.R | no_license | Multiplicidademobilidade/acesso_oport | R | false | false | 4,900 | r | #
#
#
# hex_for <- readRDS("../data/hex_municipio/hex_for.rds")
#
# cnes <- read_csv("../data-raw/hospitais/cnesnone_2018.csv") %>%
# st_as_sf(coords = c("long", "lat"), crs = 4326)
#
# escolas <- read_csv("../data/censo_escolar/censo_escolar_2015.csv") %>%
# filter(!is.na(lon)) %>%
# filter(municipio == "Fortaleza") %>%
# st_as_sf(coords = c("lon", "lat"), crs = 4326)
#
# pop <- read_rds("../data/grade_municipio/grade_for.rds") %>%
# select(id_grade, POP) %>%
# st_centroid()
#
#
# hex_for_temp <- hex_for %>%
# st_join(pop) %>%
# group_by(id_hex) %>%
# summarise(pop_total = sum(POP)) %>%
# ungroup() %>%
# st_join(cnes) %>%
# group_by(id_hex, pop_total) %>%
# summarise(saude_total = n()) %>%
# ungroup() %>%
# st_join(escolas) %>%
# group_by(id_hex, pop_total, saude_total) %>%
# summarise(escolas_total = n())
#
#
# mapview(hex_for_temp, zcol = "pop_total")
# mapview(hex_for_temp, zcol = "saude_total")
# mapview(hex_for_temp, zcol = "escolas_total")
# FUNCAO!!!!!!!!!!!!!!!! --------------------------------------------------
# munis <- "for"
agrupar_variaveis <- function(munis) {
# ABRIR ARQUIVOS COM AS OPORTUNIDADES -------------------------------------
# saude
cnes <- read_csv("../data-raw/hospitais/cnesnone_2018.csv") %>%
st_as_sf(coords = c("long", "lat"), crs = 4326)
# educacao
escolas <- read_csv("../data/censo_escolar/censo_escolar_2015.csv") %>%
dplyr::filter(!is.na(lat)) %>%
# mutate(municipio == tolower(municipio)) %>%
# filter(municipio == muni) %>%
st_as_sf(coords = c("lon", "lat"), crs = 4326)
# empregos, por enquanto para 2015
# deu problemas no fread, entao tentando com o readr
# empregos <- fread("../data-raw/rais/rais_2015_rafa_franco.csv", fill = TRUE) %>%
empregos <- read_rds("../data/rais/rais_2015.rds")
# Criar tabela de lookup
cidades_lookup <- tibble(municipio = c("for", "rec", "bel", "rio", "por", "cur", "ter"),
cidade_uf = c("fortaleza, ce", "recife, pe", "belo horizonte, mg", "rio de janeiro, rj",
"porto alegre, rs", "curitiba, pr", "teresina, pi"))
# FUNCAO PARA REALIZAR EM CADA MUNICIPIO ----------------------------------
por_municipio <- function(munis) {
dir <- dir("../data/hex_municipio/", pattern = munis)
res <- str_extract(dir, "\\d+")
dir_muni <- paste0("../data/hex_municipio/hex_", munis, "_", res, ".rds")
dir_grade <- paste0("../data/grade_municipio_com_renda/grade_renda_", munis, ".rds")
pop <- read_rds(dir_grade) %>%
dplyr::select(id_grade, pop_total, renda) %>%
mutate(renda = as.numeric(renda)) %>%
st_centroid()
# Extrair o nome da cidade de acordo com a base da RAIS
# cidade_ufs <- filter(cidades_lookup, municipio == munis) %>% .$cidade_uf
# setDT(empregos)
#
# empregos_v1 <- empregos[cidade_uf == cidade_ufs]
# muni_res <- dir_muni[1]
# FUNCAO PARA REALIZAR PARA TODAS AS RESOLUCOES ------------------------------
seila <- function(muni_res, cidade_uf) {
dir_muni <- muni_res
res <- str_extract(dir_muni, "\\d+")
hex_muni <- readRDS(dir_muni)
hex_muni_fim <- hex_muni %>%
# Agrupar populacao e renda
st_join(pop) %>%
group_by(id_hex) %>%
summarise(pop_total = sum(pop_total), renda_total = sum(renda)) %>%
ungroup() %>%
# Agrupar empregos (agora somando a quantidade de vinculos!)
st_join(empregos) %>%
# mutate(indice = ifelse(is.na(id_estab), 0, 1)) %>%
group_by(id_hex, pop_total, renda_total) %>%
summarise(empregos_total = sum(qt_vinc_ativos, na.rm = TRUE)) %>%
ungroup() %>%
mutate(empregos_total = ifelse(is.na(empregos_total), 0, empregos_total)) %>%
# agrupar saude
st_join(cnes) %>%
mutate(indice = ifelse(is.na(co_cnes), 0, 1)) %>%
group_by(id_hex, pop_total, renda_total, empregos_total) %>%
summarise(saude_total = sum(indice)) %>%
ungroup() %>%
# agrupar educacao
st_join(escolas) %>%
mutate(indice = ifelse(is.na(cod_escola), 0, 1)) %>%
group_by(id_hex, pop_total, renda_total, empregos_total, saude_total) %>%
summarise(escolas_total = sum(indice)) %>%
ungroup()
dir_output <- sprintf("../data/hex_agregados/hex_agregado_%s_%s.rds", munis, res)
write_rds(hex_muni_fim, dir_output)
}
# aplicar para cada resolucao
walk(dir_muni, seila)
}
# aplicar para cada municipio
map(munis, por_municipio)
}
# agrupar_variaveis(c("for", "rec", "bel", "rio", "por", "cur", "ter"))
# agrupar_variaveis("sao")
# agrupar_variaveis("cur")
# agrupar_variaveis("por")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataproc_objects.R
\name{SoftwareConfig.properties}
\alias{SoftwareConfig.properties}
\title{SoftwareConfig.properties Object}
\usage{
SoftwareConfig.properties()
}
\value{
SoftwareConfig.properties object
}
\description{
SoftwareConfig.properties Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
[Optional] The properties to set on daemon config files. Property keys are specified in `prefix:property` format, such as `core:fs.defaultFS`. The following are supported prefixes and their mappings: * core: `core-site.xml` * hdfs: `hdfs-site.xml` * mapred: `mapred-site.xml` * yarn: `yarn-site.xml` * hive: `hive-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf`
}
\seealso{
Other SoftwareConfig functions: \code{\link{SoftwareConfig}}
}
| /googledataprocv1.auto/man/SoftwareConfig.properties.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 868 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataproc_objects.R
\name{SoftwareConfig.properties}
\alias{SoftwareConfig.properties}
\title{SoftwareConfig.properties Object}
\usage{
SoftwareConfig.properties()
}
\value{
SoftwareConfig.properties object
}
\description{
SoftwareConfig.properties Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
[Optional] The properties to set on daemon config files. Property keys are specified in `prefix:property` format, such as `core:fs.defaultFS`. The following are supported prefixes and their mappings: * core: `core-site.xml` * hdfs: `hdfs-site.xml` * mapred: `mapred-site.xml` * yarn: `yarn-site.xml` * hive: `hive-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf`
}
\seealso{
Other SoftwareConfig functions: \code{\link{SoftwareConfig}}
}
|
setwd("~/LearningR/Bundesliga/")
library(pscl)
library(amen)
library(Matrix)
loadData <- function() {
download.file("http://www.football-data.co.uk/mmz4281/1617/D1.csv", "BL2016.csv")
download.file("http://www.football-data.co.uk/mmz4281/1516/D1.csv", "BL2015.csv")
data<-read.csv("BL2016.csv")
data$season<-2016
#data2<-read.csv("BL2015.csv")
#data2$season<-2015
#data<-read.csv("BL2015.csv")
#data$season<-2015
#data<-rbind(data, data2)
teams <- unique(data$HomeTeam)
results <- data[,c('HomeTeam', 'AwayTeam', 'FTHG', 'FTAG', 'FTR', 'HTHG', 'HTAG', 'HTR', 'season')]
table(results$FTR , results$season)
results$spieltag <- floor((9:(nrow(results)+8))/9)
teamresults <- data.frame(team=results$HomeTeam, otherTeam=results$AwayTeam,
goals=results$HTHG, otherGoals=results$HTAG, where="Home", spieltag=results$spieltag, season=results$season)
teamresults <- rbind(data.frame(team=results$AwayTeam, otherTeam=results$HomeTeam,
goals=results$HTAG, otherGoals=results$HTHG, where="Away", spieltag=results$spieltag, season=results$season),
teamresults)
teamresults <- data.frame(team=results$HomeTeam, otherTeam=results$AwayTeam,
goals=results$FTHG, otherGoals=results$FTAG, where="Home", spieltag=results$spieltag, season=results$season)
teamresults <- rbind(data.frame(team=results$AwayTeam, otherTeam=results$HomeTeam,
goals=results$FTAG, otherGoals=results$FTHG, where="Away", spieltag=results$spieltag, season=results$season),
teamresults)
teamresults$goals <- sapply(teamresults$goals, min, 4)
teamresults$otherGoals <- sapply(teamresults$otherGoals, min, 4)
teamresults$weights<-(teamresults$season-2014)*1.02^teamresults$spieltag
return (list(games = results, teamresults = teamresults))
}
buildMask <- function() {
cn<-expand.grid(0:4, 0:4)
mask<-list()
for (key in paste(cn$Var1, cn$Var2))
mask[[key]]<-matrix(5,5,data=c(0), dimnames = list(0:4, 0:4))
for (i in 0:4)
for (j in 0:4) { # draw
mask[[paste(i,i)]][j+1,j+1]<-2
mask[[paste(i,i)]][i+1,i+1]<-6
}
for (i in 1:4)
for (j in 0:(i-1)) {
for (k in 1:4)
for (l in 0:(k-1)) { # home
mask[[paste(i,j)]][k+1,l+1]<-2
if (i-j==k-l) mask[[paste(i,j)]][k+1,l+1]<-3
}
mask[[paste(i,j)]][i+1,j+1]<-4
}
for (i in 0:3)
for (j in (i+1):4) {
for (k in 0:3)
for (l in (k+1):4) { # home
mask[[paste(i,j)]][k+1,l+1]<-4
if (i-j==k-l) mask[[paste(i,j)]][k+1,l+1]<-5
}
mask[[paste(i,j)]][i+1,j+1]<-7
}
return(mask)
}
ld<-loadData()
mask<-buildMask()
games<-ld$games[ld$games$season==2016,]
games$FTHG<-sapply(games$FTHG, min, 4)
games$FTAG<-sapply(games$FTAG, min, 4)
games$HTHG<-sapply(games$HTHG, min, 4)
games$HTAG<-sapply(games$HTAG, min, 4)
games$FTDIFF<-(games$FTHG-games$FTAG)
games$HTDIFF<-(games$HTHG-games$HTAG)
teams<-unique(games$AwayTeam)
Y1<-spMatrix(nrow=length(teams),
ncol=length(teams),
i = as.integer(games$HomeTeam),
j=as.integer(games$AwayTeam),
x = games$FTHG) # - games$FTAG)
Y<-as.matrix(Y1)
Y<-matrix(nrow=length(teams),
ncol=length(teams),
data=NA)
rownames(Y)<-levels(teams)
colnames(Y)<-levels(teams)
Xd<-array(dim=c(length(teams),length(teams),3),
data=NA, dimnames = list(home=levels(teams), away=levels(teams),
info=c('HTHG', 'HTAG', 'HTDIFF')))
rownames(Y)<-levels(teams)
colnames(Y)<-levels(teams)
for (i in 1:nrow(games))
{
g <- games[i,]
Y[as.integer(g$HomeTeam), as.integer(g$AwayTeam)]<-g$FTHG # -g$FTAG
Xd[as.integer(g$HomeTeam), as.integer(g$AwayTeam),1]<-g$HTHG
Xd[as.integer(g$HomeTeam), as.integer(g$AwayTeam),2]<-g$HTAG
Xd[as.integer(g$HomeTeam), as.integer(g$AwayTeam),3]<-g$HTHG-g$HTAG
}
fit_SRM<-ame(Y, Xd=Xd[,,c(1,3)], nscan=5000, plot=TRUE, print=TRUE)
summary(fit_SRM)
fit_SRM$YPM
fit_SRM_bas<-ame(Y, nscan=5000, odens=10, plot=TRUE, print=TRUE)
str(fit_SRM_bas)
summary(fit_SRM_bas)
plot(fit_SRM_bas)
plot(fit_SRM)
mean(fit_SRM_bas$BETA)
fit_SRM_bas$YPM
Y[2,1]
fit_SRM_bas$YPM[2,1]
Y[1,2]
fit_SRM_bas$YPM[1,2]
fit_SRM_bas$APM[2]+fit_SRM_bas$BPM[1]+mean(fit_SRM_bas$BETA)
fit_SRM_bas$EZ[2,1]
fit_SRM_bas$APM[1]+fit_SRM_bas$BPM[2]+mean(fit_SRM_bas$BETA)
fit_SRM_bas$EZ[1,2]
fit_SRM_bas$YPM[1,2]
plot(sort(fit_SRM_bas$BETA))
gofstats(Y)
gofstats(fit_SRM$YPM)
c(Y)
str(fit_SRM)
Rowcountry<-matrix(rownames(Y),nrow(Y),ncol(Y))
Colcountry<-t(Rowcountry)
anova(lm( c(Y) ~ c(Rowcountry) + c(Colcountry) ) )
rmean<-rowMeans(Y,na.rm=TRUE) ; cmean<-colMeans(Y,na.rm=TRUE)
muhat<-mean(Y,na.rm=TRUE)
ahat<-rmean-muhat
bhat<-cmean-muhat
sd(ahat)
# additive "exporter" effects
head( sort(ahat,decreasing=TRUE) )
cov( cbind(ahat,bhat) )
cor( ahat, bhat)
R <- Y - ( muhat + outer(ahat,bhat,"+") )
cov( cbind( c(R),c(t(R)) ), use="complete")
cor( c(R),c(t(R)), use="complete")
plot( c(fit_SRM$YPM), c(Y))
plot( c(Y), c(fit_SRM$YPM)-c(Y))
plot(c(fit_SRM$YPM), c(fit_SRM$YPM-Y))
cor(c(fit_SRM$YPM-Y), c(fit_SRM$YPM), use = "na.or.complete")
cor(c(Y), c(fit_SRM$YPM), use = "na.or.complete")
summary(c(fit_SRM$YPM))
str(Y)
summary(fit_SRM$BETA)
fit_SRM$U
fit_SRM$V
summary(fit_SRM$GOF)
library(hclust)
cl<-hclust(d = dist(fit_SRM$U[,1]))
plot(cl)
apply(fit_SRM$GOF,2,mean)
gofstats(Y)
fit_SRM<-ame(Y, Xd=Xd[,,c(1,3)], nscan=5000, R=4, model="nrm", plot=TRUE, print=TRUE)
summary(fit_SRM)
muhat
cov( cbind(ahat,bhat))
apply(fit_SRM$BETA,2,mean)
fit_SRM$BETA
str(fit_SRM)
gofstats(fit_SRM)
fit_SRM$U[,1] %*% t(fit_SRM$V[,1]) == fit_SRM$UVPM
((fit_SRM$U[,1] %*% t(fit_SRM$V[,1]))
+(fit_SRM$U[,2] %*% t(fit_SRM$V[,2]))
+(fit_SRM$U[,3] %*% t(fit_SRM$V[,3]))
+(fit_SRM$U[,4] %*% t(fit_SRM$V[,4]))
)[1:6,1:6]
Y[1:6,1:6]
fit_SRM$UVPM[1:6,1:6]
fit_SRM$YPM
nextmatches<-c(
"Augsburg", "RB Leipzig",
"Werder Bremen", "Darmstadt",
"Dortmund", "Leverkusen",
"Mainz", "Wolfsburg",
"FC Koln", "Bayern Munich",
"Hoffenheim", "Ingolstadt",
"M'gladbach", "Schalke 04",
"Ein Frankfurt", "Freiburg",
"Hamburg", "Hertha"
)
nextmatches<-c(
"Leverkusen", "Werder Bremen",
"Darmstadt", "Mainz",
"RB Leipzig", "Wolfsburg",
"Hertha", "Dortmund",
"Freiburg", "Hoffenheim",
"Bayern Munich", "Ein Frankfurt",
"Ingolstadt", "FC Koln",
"Schalke 04", "Augsburg",
"Hamburg", "M'gladbach"
)
nm<-matrix(data = nextmatches, ncol=2, byrow = T)
sapply(1:9, function(i) paste(nm[i,1], "-", nm[i,2], ": ", fit_SRM$YPM[nm[i,1], nm[i,2]]))
fit_rm<-ame(Y,Xd=Xd[,,3],rvar=FALSE,cvar=FALSE,dcor=FALSE, nscan=5000, plot=TRUE, print=TRUE)
summary(fit_rm)
buildModel <- function(teamresults) {
# m.team<-glm(formula = goals ~ (team+otherTeam)*where, data=teamresults, family = poisson, weights = teamresults$weights)
# m.team<-glm(formula = goals ~ (team+otherTeam)*where, data=teamresults, family = quasipoisson, weights = weights)
m.team<-hurdle(formula = goals ~ (team+otherTeam)*where, data=teamresults, dist = "negbin", weights = weights)
plot(teamresults$goals, fitted(m.team))
print(summary(m.team))
print(summary(dpois(teamresults$goals, fitted(m.team))))
# summary(dpois(teamresults$goals, 0))
# summary(dpois(teamresults$goals, 1))
# summary(dpois(teamresults$goals, 2))
return(m.team)
}
predictMatches<-function(model, newmatches) {
newmatches$lh <- predict(object=model, type = "response",
newdata=data.frame(team=newmatches$team, otherTeam=newmatches$otherTeam, where="Home"))
newmatches$la <- predict(object=model, type = "response",
newdata=data.frame(team=newmatches$otherTeam, otherTeam=newmatches$team, where="Away"))
lambdas<-cbind(sapply(0:4, function(x) dpois(x, newmatches$lh)), sapply(0:4, function(x) dpois(x, newmatches$la)))
colnames(lambdas)<-c(paste0('LH', 0:4), paste0('LA', 0:4))
predoutcomes<-apply(lambdas, 1, function(x) {x[1:5]%o%x[6:10]})
predoutcomes<-t(predoutcomes)
cn<-expand.grid(0:4, 0:4)
colnames(predoutcomes)<-paste(cn$Var1, cn$Var2)
predhg<-apply(lambdas[,1:5], 1, which.max)-1
predag<-apply(lambdas[,6:10], 1, which.max)-1
return (list(newmatches=newmatches, predoutcomes=predoutcomes, predgoals=data.frame(hg=predhg, ag=predag)))
}
recommend <- function(prediction) {
tend<-apply(prediction$predoutcomes, 1, function(x) {
rm<-matrix(5,5,data=x);
c(
homewinprob = sum(lower.tri(rm)*rm),
drawprob=sum(diag(rm)),
awaywinprob = sum(upper.tri(rm)*rm),
prediction = which.max(x)
)
})
tend<-t(tend)
return(cbind(prediction$newmatches, tend[,1:3], pred=colnames(prediction$predoutcomes)[tend[,4]]))
}
maxExpectation <- function(predoutcomes) {
expectedValues<-sapply(1:25, function(i) predoutcomes %*% unlist(mask[i]), simplify = "array")
colnames(expectedValues)<-names(mask)
ordering<-t(apply(-expectedValues, 1, order)[1:3,])
data.frame(
best=colnames(expectedValues)[ordering[,1]],
exp=apply(expectedValues, 1, max),
best2=colnames(expectedValues)[ordering[,2]],
exp2=apply(expectedValues, 1, function(x) {x[order(-x)[2]]}),
best3=colnames(expectedValues)[ordering[,3]],
exp3=apply(expectedValues, 1, function(x) {x[order(-x)[3]]})
)
}
ld<-loadData()
mask<-buildMask()
model<-buildModel(ld$teamresults)
newmatches<-ld$teamresults[teamresults$where=='Home',c('team', 'otherTeam')]
prediction <- predictMatches(model, newmatches)
table(prediction$predgoals$hg, prediction$predgoals$ag)
table(ld$games$HTHG, ld$games$HTAG)
table(prediction$predgoals$hg, ld$games$HTHG)
table(prediction$predgoals$ag, ld$games$HTAG)
qqplot(prediction$predgoals$hg, ld$games$HTHG)
qqplot(prediction$predgoals$ag, ld$games$HTAG)
plot(ld$games$HTHG, prediction$newmatches$lh)
plot(ld$games$HTHG - ld$games$HTAG, prediction$newmatches$lh - prediction$newmatches$la)
cor(ld$games$HTHG - ld$games$HTAG, prediction$newmatches$lh - prediction$newmatches$la)
cor(prediction$newmatches$lh, ld$games$HTHG)
plot(ld$games$HTHG, x=prediction$newmatches$lh)
plot(ld$games$HTAG, prediction$newmatches$la)
recommend(prediction)
nextmatches<-c(
"Wolfsburg", "Werder Bremen",
"Bayern Munich", "Hamburg",
"Leverkusen", "Mainz",
"Darmstadt", "Augsburg",
"Freiburg", "Dortmund",
"RB Leipzig", "FC Koln",
"Hertha", "Ein Frankfurt",
"Ingolstadt", "M'gladbach",
"Schalke 04", "Hoffenheim"
)
nextmatches<-as.data.frame(matrix(nextmatches,ncol=2,byrow=TRUE))
colnames(nextmatches)<-c('team', 'otherTeam')
prediction <- predictMatches(model, nextmatches)
recommend(prediction)
cbind(recommend(prediction), maxExpectation(prediction$predoutcomes))
cbind(prediction$newmatches, ld$games)
sum(maxExpectation(prediction$predoutcomes)$exp)
sum(maxExpectation(prediction$predoutcomes)$exp2)
sum(maxExpectation(prediction$predoutcomes)$exp3)
plotGamePred<-function(pred) {
ord<-order(pred, decreasing = T)
plot(pred[ord])
text(pred[ord], names(pred[ord]))
maxExpectation(pred)
}
sort(prediction$predoutcomes[1,], decreasing = T)
plot(sort(prediction$predoutcomes[1,], decreasing = T))
text(sort(prediction$predoutcomes[1,], decreasing = T), names(sort(prediction$predoutcomes[1,], decreasing = T)))
plotGamePred(prediction$predoutcomes[1,])
pred<-prediction$predoutcomes[1,]
labels
apply(expectedValues, 1, max)
expectedValues[9,order(-expectedValues[9,])]
matrix(expectedValues[8,], nrow=5, ncol=5, dimnames = list(0:4, 0:4))
matrix(prediction$predoutcomes[8,], nrow=5, ncol=5, dimnames = list(0:4, 0:4))
prediction$predoutcomes[1,]
sum(prediction$predoutcomes %*% unlist(mask[1]))
sum(prediction$predoutcomes[1,] * unlist(mask[1]))
sum(prediction$predoutcomes[1,] * unlist(mask[20]))
cbind(unlist(mask[2]), names(mask), prediction$predoutcomes[1,], names(prediction$predoutcomes[1,]))
rowSums(prediction$predoutcomes * unlist(mask[2]))
prediction$predoutcomes[1,]
ld$teamresults[teamresults$where=='Home',c('team', 'otherTeam')]
teams
fr <- teamresults[teamresults$where=='Home',]
fr$lh <- predict(m.team, type = "response", newdata=data.frame(team=fr$team, otherTeam=fr$otherTeam, where="Home"))
fr$la <- predict(m.team, type = "response", newdata=data.frame(team=fr$otherTeam, otherTeam=fr$team, where="Away"))
plot(lh-la ~ I(goals-otherGoals), data=fr )
abline(lm(lh-la ~ I(goals-otherGoals), data=fr ))
summary(lm(lh-la ~ I(goals-otherGoals), data=fr ))
cor(fr$lh-fr$la, fr$goals-fr$otherGoals)
lambdas<-cbind(sapply(0:4, function(x) dpois(x, fr$lh)), sapply(0:4, function(x) dpois(x, fr$la)))
str(lambdas)
colnames(lambdas)<-c(paste0('LH', 0:4), paste0('LA', 0:4))
predoutcomes<-apply(lambdas, 1, function(x) {x[1:5]%o%x[6:10]})
predoutcomes<-t(predoutcomes)
cn<-expand.grid(0:4, 0:4)
colnames(predoutcomes)<-paste(cn$Var1, cn$Var2)
tend<-apply(predoutcomes, 1, function(x) {
rm<-matrix(5,5,data=x);
c(
homewinprob = sum(lower.tri(rm)*rm),
drawprob=sum(diag(rm)),
awaywinprob = sum(upper.tri(rm)*rm))
})
tend<-t(tend)
summary(tend)
table(apply(tend, 1, which.max))
table(sign(fr$goals-fr$otherGoals))
table(apply(tend, 1, which.max), sign(fr$goals-fr$otherGoals))
m.diff<-lm(formula = goals-otherGoals ~ (team+otherTeam)*where, data=teamresults, weights = weights)
teamresults$diffpred <-fitted(m.diff)
summary(m.diff)
plot(m.diff)
plot(diffpred ~ I(goals-otherGoals), data=teamresults )
abline(lm(diffpred ~ I(goals-otherGoals), data=teamresults ))
allpred<-sapply(0:6, function(x) dpois(x, fitted(m.team)))
bestpred<-apply(allpred, 1, which.max)-1
table(data.frame(pred=bestpred, act=teamresults$goals)) #, diff=bestpred - teamresults$goals) )
summary(data.frame(pred=bestpred, act=teamresults$goals))
predictMatch <- function(t1, t2) {
team <- t1
otherTeam <- t2
hg<-predict(m.team, type = "response", newdata=data.frame(team=team, otherTeam=otherTeam, where="Home"))
ag<-predict(m.team, type = "response", newdata=data.frame(team=otherTeam, otherTeam=team, where="Away"))
hgdist<-sapply(0:6, function(x) dpois(x, hg))
agdist<-sapply(0:6, function(x) dpois(x, ag))
predoutcomes<-round(sapply(0:6, function(x) dpois(x, hg))%o%sapply(0:6, function(x) dpois(x, ag)), 4)*100
colnames(predoutcomes)<-0:6
rownames(predoutcomes)<-0:6
drawprob<-sum(diag(predoutcomes))
homewinprob<-sum(lower.tri(predoutcomes)*predoutcomes)
awaywinprob<-sum(upper.tri(predoutcomes)*predoutcomes)
return (list(tendency = data.frame(team=t1, otherTeam=t2, homewinprob, drawprob, awaywinprob,
hg=which.max(hgdist)-1, ag=which.max(agdist)-1), pred=predoutcomes)
)
}
str(tend)
matrix(7,7,data = predoutcomes[1,])
lambdas[1,]
str((predoutcomes))
table(sign(fr$lh-fr$la), sign(fr$goals-fr$otherGoals))
ppois(0, 1)+dpois(1,1)
dpois(0,1)
ppois(0,1)
ppois(2, 1, lower.tail = F)
ppois(0, 1, lower.tail = T)
ppois(0, 1, lower.tail = F)
densityplot(lh-la ~ I(goals-otherGoals), data=fr)
fittedresults$goals - fittedresults$otherGoals, )
hg<-predict(m.team, type = "response", newdata=data.frame(team=team, otherTeam=otherTeam, where="Home"))
ag<-predict(m.team, type = "response", newdata=data.frame(team=otherTeam, otherTeam=team, where="Away"))
allgamespred<-apply(results, 1, function(x) {predictMatch(x[['HomeTeam']], x[['AwayTeam']])})
allgames_tenpred<-(sapply(allgamespred, function(x) x$tendency[, c('homewinprob', 'drawprob', 'awaywinprob')]))
allgames_tenpred<-t(allgames_tenpred)
allgames_tenpred[,c('homewinprob', 'drawprob', 'awaywinprob')]
str(as.matrix(allgames_tenpred))
actualtend<-cbind(ifelse(results$FTR=='H', 1, 0), ifelse(results$FTR=='D', 1, 0), ifelse(results$FTR=='A', 1, 0))
str(actualtend)
as.matrix(allgames_tenpred)*cbind(ifelse(results$FTR=='H', 1, 0), ifelse(results$FTR=='D', 1, 0), ifelse(results$FTR=='A', 1, 0))
summary(unlist(ifelse(results$FTR=='H', allgames_tenpred[,1], ifelse(results$FTR=='D', allgames_tenpred[,2], allgames_tenpred[,3]))))
table(apply(allgames_tenpred[,c('homewinprob', 'drawprob', 'awaywinprob')], 1, function(x) which.max(x)))
allgames_tenpred[1:2,]
str(results)
results$HomeTeam
results$AwayTeam
teams
predictMatch(teams[11],teams[17])
predictMatch(teams[15],teams[3])
predictMatch(teams[4],teams[9])
predictMatch(teams[6],teams[7])
predictMatch(teams[10],teams[1])
predictMatch(teams[13],teams[19])
predictMatch(teams[18],teams[20])
predictMatch(teams[12],teams[16])
t1<-teams[18]
t2<-teams[20]
table(results$FTHG, results$FTAG)
var(results$FTHG)
mean(results$FTHG)
41.89+29.6+28.47
var(results$FTAG)
mean(results$FTAG)
var(teamresults$goals)
mean(teamresults$goals)
var(teamresults$otherGoals)
mean(teamresults$otherGoals)
predictMatch(teams[11],teams[17])
predictMatch(teams[11],teams[17])
teams[3]
t1<-1
t2<-12
colnames(teams[5])
str(teams)
predict(m.team, type = "response", newdata=data.frame(team="Augsburg", otherTeam="Leverkusen", where="Home"))
library(pscl)
m.team<-hurdle(formula = goals ~ team*where+otherTeam, data=teamresults, dist = "poisson")
m.team<-hurdle(formula = goals ~ (team+otherTeam)*where, data=teamresults, dist = "geometric")
m.team<-hurdle(formula = goals ~ (team+otherTeam)*where, data=teamresults, dist = "negbin")
summary(m.team)
fittedgoals<-round(dpois(0:6, (fitted(m.team)[0]))*nrow(teamresults))
names(fittedgoals)<-0:6
rbind(fittedgoals, actual=table(teamresults$goals))
rbind(fittedstatic, actual=table(teamresults$goals))
plot(teamresults$goals, fitted(m.team))
boxplot(fitted(m.team) ~ teamresults$goals)
summary(dpois(teamresults$goals, fitted(m.team)-0.14))
plot(dpois(teamresults$goals, fitted(m.team)))
summary(dpois(teamresults$goals+1, fitted(m.team)))
summary(dpois(teamresults$goals+2, fitted(m.team)))
summary(dpois(teamresults$goals+3, fitted(m.team)))
summary(dpois(teamresults$goals-1, fitted(m.team)))
summary(dpois(0, fitted(m.team)))
summary(dpois(1, fitted(m.team)))
summary(dpois(2, fitted(m.team)))
summary(dpois(teamresults$goals, fitted(staticlambda)))
summary(m.team)
summary(fitted(m.team))
# , teamresults$team, teamresults$otherTeam, teamresults$otherGoals, teamresults$where)
which.max(allpred)
names(fittedstatic)<-0:6
summary(fitted(m.team))
m.diff<-glm(formula = goals-otherGoals ~ (team+otherTeam)*where, data=teamresults, family = poisson)
homedefense<-glm(formula = FTHG ~ AwayTeam, data=results, family = poisson)
poisson.test(x=teamresults$goals, r = 0.3472)
poisson.test(137, 24.19893)
0.3472
reshape(results, timevar = "HomeTeam", direction = "wide", idvar = "spieltag")
recast(results, spieltag~HomeTeam~FTHG, id.var=c("HomeTeam", "spieltag", "FTHG"))
library(dplyr)
results %>% summarize(results)
aggregate(FTHG ~ HomeTeam, results, mean)
aggregate(FTAG ~ AwayTeam, results, mean)
homeattack<-glm(formula = FTHG ~ HomeTeam, data=results, family = poisson)
homedefense<-glm(formula = FTHG ~ AwayTeam, data=results, family = poisson)
awayattack<-glm(formula = FTAG ~ AwayTeam, data=results, family = poisson)
awaydefense<-glm(formula = FTAG ~ HomeTeam, data=results, family = poisson)
homegoals_x<-glm(formula = FTHG ~ HomeTeam*AwayTeam, data=results, family = poisson)
homegoals<-glm(formula = FTHG ~ HomeTeam+AwayTeam, data=results, family = poisson)
awaygoals_x<-glm(formula = FTAG ~ HomeTeam*AwayTeam, data=results, family = poisson)
awaygoals<-glm(formula = FTAG ~ HomeTeam+AwayTeam, data=results, family = poisson)
summary(homegoals)
predict(homegoals, newdata = data)
predict(homegoals)
summary(residuals(homegoals))
summary(residuals(awaygoals))
summary(residuals(homegoals_x))
summary(predict(homegoals, type = "response"))
summary(predict(awaygoals, type = "response"))
summary(predict(homegoals_x, type = "response"))
summary(predict(awaygoals_x, type = "response"))
cbind(results, H=predict(homegoals, type = "response"), A=predict(awaygoals, type = "response"))
cbind(results,
H=round(predict(homegoals_x, type = "response"), 2),
A=round(predict(awaygoals_x, type = "response"), 2))
plot(residuals(homegoals, type = "response") ~ FTHG, data=results)
plot(results$FTAG, residuals(awaygoals, type = "response"))
plot(predict(homegoals, type = "response") ~ FTHG, data=results)
plot(predict(awaygoals, type = "response") ~ FTAG, data=results)
predict(homegoals, type = "response", newdata=data.frame(HomeTeam="Dortmund", AwayTeam="Ingolstadt"))
predict(awaygoals, type = "response", newdata=data.frame(HomeTeam="Dortmund", AwayTeam="Ingolstadt"))
predict(homegoals, newdata=data.frame(HomeTeam="Dortmund", AwayTeam="Bayern Munich"))
lambda<-predict(homegoals, type = "response", newdata=data.frame(HomeTeam="Dortmund", AwayTeam="Bayern Munich"))
lambda2<-predict(awaygoals, type = "response", newdata=data.frame(HomeTeam="Dortmund", AwayTeam="Bayern Munich"))
plot(dpois(0:10, lambda))
plot(dpois(0:10, lambda2))
dpois(0:5, lambda) %o% dpois(0:5, lambda2)
exp(-lambda)*lambda^4/factorial(4)
exp(0.99373-0.02707-0.95141)
0.99373-0.02707-0.03221
dpois(0, fitted(homegoals))
dpois(1, fitted(homegoals))
dpois(2, fitted(homegoals))
dpois(3, fitted(homegoals))
dpois(0:10, fitted(homegoals))
allmodel
table(results, HomeTeam~FTHG)
results
results2 <- data[,c('HomeTeam', 'AwayTeam', 'FTHG', 'FTAG', 'FTR', 'HTHG', 'HTAG', 'HTR')]
summary(results2)
table(results2$HTR, results2$FTR) / nrow(results2) * 100
table(results2$HTR, results2$HTR)
table(results2$HTHG)
table(results2$FTHG-results2$HTHG)
table(results2$HTAG)
table(results2$FTAG-results2$HTAG)
library(MNP) # loads the MNP package
example(mnp) # runs the example script
detergent
m.probit<-mnp(formula = sign(goals-otherGoals)~I(as.integer(team)%%10), data=teamresults, verbose=T)
summary(m.probit)
m.probitdiff<-mnp(formula = (goals-otherGoals)~(team+otherTeam)*where, data=teamresults, verbose=T)
summary(m.probitdiff)
predict(m.probit, newdata = teamresults[1:10,])
residuals(m.probit)
as.integer(teamresults$team)
| /R/Predict_3.R | no_license | martinstorch/football | R | false | false | 21,912 | r | setwd("~/LearningR/Bundesliga/")
library(pscl)
library(amen)
library(Matrix)
loadData <- function() {
download.file("http://www.football-data.co.uk/mmz4281/1617/D1.csv", "BL2016.csv")
download.file("http://www.football-data.co.uk/mmz4281/1516/D1.csv", "BL2015.csv")
data<-read.csv("BL2016.csv")
data$season<-2016
#data2<-read.csv("BL2015.csv")
#data2$season<-2015
#data<-read.csv("BL2015.csv")
#data$season<-2015
#data<-rbind(data, data2)
teams <- unique(data$HomeTeam)
results <- data[,c('HomeTeam', 'AwayTeam', 'FTHG', 'FTAG', 'FTR', 'HTHG', 'HTAG', 'HTR', 'season')]
table(results$FTR , results$season)
results$spieltag <- floor((9:(nrow(results)+8))/9)
teamresults <- data.frame(team=results$HomeTeam, otherTeam=results$AwayTeam,
goals=results$HTHG, otherGoals=results$HTAG, where="Home", spieltag=results$spieltag, season=results$season)
teamresults <- rbind(data.frame(team=results$AwayTeam, otherTeam=results$HomeTeam,
goals=results$HTAG, otherGoals=results$HTHG, where="Away", spieltag=results$spieltag, season=results$season),
teamresults)
teamresults <- data.frame(team=results$HomeTeam, otherTeam=results$AwayTeam,
goals=results$FTHG, otherGoals=results$FTAG, where="Home", spieltag=results$spieltag, season=results$season)
teamresults <- rbind(data.frame(team=results$AwayTeam, otherTeam=results$HomeTeam,
goals=results$FTAG, otherGoals=results$FTHG, where="Away", spieltag=results$spieltag, season=results$season),
teamresults)
teamresults$goals <- sapply(teamresults$goals, min, 4)
teamresults$otherGoals <- sapply(teamresults$otherGoals, min, 4)
teamresults$weights<-(teamresults$season-2014)*1.02^teamresults$spieltag
return (list(games = results, teamresults = teamresults))
}
buildMask <- function() {
cn<-expand.grid(0:4, 0:4)
mask<-list()
for (key in paste(cn$Var1, cn$Var2))
mask[[key]]<-matrix(5,5,data=c(0), dimnames = list(0:4, 0:4))
for (i in 0:4)
for (j in 0:4) { # draw
mask[[paste(i,i)]][j+1,j+1]<-2
mask[[paste(i,i)]][i+1,i+1]<-6
}
for (i in 1:4)
for (j in 0:(i-1)) {
for (k in 1:4)
for (l in 0:(k-1)) { # home
mask[[paste(i,j)]][k+1,l+1]<-2
if (i-j==k-l) mask[[paste(i,j)]][k+1,l+1]<-3
}
mask[[paste(i,j)]][i+1,j+1]<-4
}
for (i in 0:3)
for (j in (i+1):4) {
for (k in 0:3)
for (l in (k+1):4) { # home
mask[[paste(i,j)]][k+1,l+1]<-4
if (i-j==k-l) mask[[paste(i,j)]][k+1,l+1]<-5
}
mask[[paste(i,j)]][i+1,j+1]<-7
}
return(mask)
}
ld<-loadData()
mask<-buildMask()
games<-ld$games[ld$games$season==2016,]
games$FTHG<-sapply(games$FTHG, min, 4)
games$FTAG<-sapply(games$FTAG, min, 4)
games$HTHG<-sapply(games$HTHG, min, 4)
games$HTAG<-sapply(games$HTAG, min, 4)
games$FTDIFF<-(games$FTHG-games$FTAG)
games$HTDIFF<-(games$HTHG-games$HTAG)
teams<-unique(games$AwayTeam)
Y1<-spMatrix(nrow=length(teams),
ncol=length(teams),
i = as.integer(games$HomeTeam),
j=as.integer(games$AwayTeam),
x = games$FTHG) # - games$FTAG)
Y<-as.matrix(Y1)
Y<-matrix(nrow=length(teams),
ncol=length(teams),
data=NA)
rownames(Y)<-levels(teams)
colnames(Y)<-levels(teams)
Xd<-array(dim=c(length(teams),length(teams),3),
data=NA, dimnames = list(home=levels(teams), away=levels(teams),
info=c('HTHG', 'HTAG', 'HTDIFF')))
rownames(Y)<-levels(teams)
colnames(Y)<-levels(teams)
for (i in 1:nrow(games))
{
g <- games[i,]
Y[as.integer(g$HomeTeam), as.integer(g$AwayTeam)]<-g$FTHG # -g$FTAG
Xd[as.integer(g$HomeTeam), as.integer(g$AwayTeam),1]<-g$HTHG
Xd[as.integer(g$HomeTeam), as.integer(g$AwayTeam),2]<-g$HTAG
Xd[as.integer(g$HomeTeam), as.integer(g$AwayTeam),3]<-g$HTHG-g$HTAG
}
fit_SRM<-ame(Y, Xd=Xd[,,c(1,3)], nscan=5000, plot=TRUE, print=TRUE)
summary(fit_SRM)
fit_SRM$YPM
fit_SRM_bas<-ame(Y, nscan=5000, odens=10, plot=TRUE, print=TRUE)
str(fit_SRM_bas)
summary(fit_SRM_bas)
plot(fit_SRM_bas)
plot(fit_SRM)
mean(fit_SRM_bas$BETA)
fit_SRM_bas$YPM
Y[2,1]
fit_SRM_bas$YPM[2,1]
Y[1,2]
fit_SRM_bas$YPM[1,2]
fit_SRM_bas$APM[2]+fit_SRM_bas$BPM[1]+mean(fit_SRM_bas$BETA)
fit_SRM_bas$EZ[2,1]
fit_SRM_bas$APM[1]+fit_SRM_bas$BPM[2]+mean(fit_SRM_bas$BETA)
fit_SRM_bas$EZ[1,2]
fit_SRM_bas$YPM[1,2]
plot(sort(fit_SRM_bas$BETA))
gofstats(Y)
gofstats(fit_SRM$YPM)
c(Y)
str(fit_SRM)
Rowcountry<-matrix(rownames(Y),nrow(Y),ncol(Y))
Colcountry<-t(Rowcountry)
anova(lm( c(Y) ~ c(Rowcountry) + c(Colcountry) ) )
rmean<-rowMeans(Y,na.rm=TRUE) ; cmean<-colMeans(Y,na.rm=TRUE)
muhat<-mean(Y,na.rm=TRUE)
ahat<-rmean-muhat
bhat<-cmean-muhat
sd(ahat)
# additive "exporter" effects
head( sort(ahat,decreasing=TRUE) )
cov( cbind(ahat,bhat) )
cor( ahat, bhat)
R <- Y - ( muhat + outer(ahat,bhat,"+") )
cov( cbind( c(R),c(t(R)) ), use="complete")
cor( c(R),c(t(R)), use="complete")
plot( c(fit_SRM$YPM), c(Y))
plot( c(Y), c(fit_SRM$YPM)-c(Y))
plot(c(fit_SRM$YPM), c(fit_SRM$YPM-Y))
cor(c(fit_SRM$YPM-Y), c(fit_SRM$YPM), use = "na.or.complete")
cor(c(Y), c(fit_SRM$YPM), use = "na.or.complete")
summary(c(fit_SRM$YPM))
str(Y)
summary(fit_SRM$BETA)
fit_SRM$U
fit_SRM$V
summary(fit_SRM$GOF)
library(hclust)
cl<-hclust(d = dist(fit_SRM$U[,1]))
plot(cl)
apply(fit_SRM$GOF,2,mean)
gofstats(Y)
fit_SRM<-ame(Y, Xd=Xd[,,c(1,3)], nscan=5000, R=4, model="nrm", plot=TRUE, print=TRUE)
summary(fit_SRM)
muhat
cov( cbind(ahat,bhat))
apply(fit_SRM$BETA,2,mean)
fit_SRM$BETA
str(fit_SRM)
gofstats(fit_SRM)
fit_SRM$U[,1] %*% t(fit_SRM$V[,1]) == fit_SRM$UVPM
((fit_SRM$U[,1] %*% t(fit_SRM$V[,1]))
+(fit_SRM$U[,2] %*% t(fit_SRM$V[,2]))
+(fit_SRM$U[,3] %*% t(fit_SRM$V[,3]))
+(fit_SRM$U[,4] %*% t(fit_SRM$V[,4]))
)[1:6,1:6]
Y[1:6,1:6]
fit_SRM$UVPM[1:6,1:6]
fit_SRM$YPM
nextmatches<-c(
"Augsburg", "RB Leipzig",
"Werder Bremen", "Darmstadt",
"Dortmund", "Leverkusen",
"Mainz", "Wolfsburg",
"FC Koln", "Bayern Munich",
"Hoffenheim", "Ingolstadt",
"M'gladbach", "Schalke 04",
"Ein Frankfurt", "Freiburg",
"Hamburg", "Hertha"
)
nextmatches<-c(
"Leverkusen", "Werder Bremen",
"Darmstadt", "Mainz",
"RB Leipzig", "Wolfsburg",
"Hertha", "Dortmund",
"Freiburg", "Hoffenheim",
"Bayern Munich", "Ein Frankfurt",
"Ingolstadt", "FC Koln",
"Schalke 04", "Augsburg",
"Hamburg", "M'gladbach"
)
nm<-matrix(data = nextmatches, ncol=2, byrow = T)
sapply(1:9, function(i) paste(nm[i,1], "-", nm[i,2], ": ", fit_SRM$YPM[nm[i,1], nm[i,2]]))
fit_rm<-ame(Y,Xd=Xd[,,3],rvar=FALSE,cvar=FALSE,dcor=FALSE, nscan=5000, plot=TRUE, print=TRUE)
summary(fit_rm)
buildModel <- function(teamresults) {
# m.team<-glm(formula = goals ~ (team+otherTeam)*where, data=teamresults, family = poisson, weights = teamresults$weights)
# m.team<-glm(formula = goals ~ (team+otherTeam)*where, data=teamresults, family = quasipoisson, weights = weights)
m.team<-hurdle(formula = goals ~ (team+otherTeam)*where, data=teamresults, dist = "negbin", weights = weights)
plot(teamresults$goals, fitted(m.team))
print(summary(m.team))
print(summary(dpois(teamresults$goals, fitted(m.team))))
# summary(dpois(teamresults$goals, 0))
# summary(dpois(teamresults$goals, 1))
# summary(dpois(teamresults$goals, 2))
return(m.team)
}
predictMatches<-function(model, newmatches) {
newmatches$lh <- predict(object=model, type = "response",
newdata=data.frame(team=newmatches$team, otherTeam=newmatches$otherTeam, where="Home"))
newmatches$la <- predict(object=model, type = "response",
newdata=data.frame(team=newmatches$otherTeam, otherTeam=newmatches$team, where="Away"))
lambdas<-cbind(sapply(0:4, function(x) dpois(x, newmatches$lh)), sapply(0:4, function(x) dpois(x, newmatches$la)))
colnames(lambdas)<-c(paste0('LH', 0:4), paste0('LA', 0:4))
predoutcomes<-apply(lambdas, 1, function(x) {x[1:5]%o%x[6:10]})
predoutcomes<-t(predoutcomes)
cn<-expand.grid(0:4, 0:4)
colnames(predoutcomes)<-paste(cn$Var1, cn$Var2)
predhg<-apply(lambdas[,1:5], 1, which.max)-1
predag<-apply(lambdas[,6:10], 1, which.max)-1
return (list(newmatches=newmatches, predoutcomes=predoutcomes, predgoals=data.frame(hg=predhg, ag=predag)))
}
recommend <- function(prediction) {
tend<-apply(prediction$predoutcomes, 1, function(x) {
rm<-matrix(5,5,data=x);
c(
homewinprob = sum(lower.tri(rm)*rm),
drawprob=sum(diag(rm)),
awaywinprob = sum(upper.tri(rm)*rm),
prediction = which.max(x)
)
})
tend<-t(tend)
return(cbind(prediction$newmatches, tend[,1:3], pred=colnames(prediction$predoutcomes)[tend[,4]]))
}
maxExpectation <- function(predoutcomes) {
expectedValues<-sapply(1:25, function(i) predoutcomes %*% unlist(mask[i]), simplify = "array")
colnames(expectedValues)<-names(mask)
ordering<-t(apply(-expectedValues, 1, order)[1:3,])
data.frame(
best=colnames(expectedValues)[ordering[,1]],
exp=apply(expectedValues, 1, max),
best2=colnames(expectedValues)[ordering[,2]],
exp2=apply(expectedValues, 1, function(x) {x[order(-x)[2]]}),
best3=colnames(expectedValues)[ordering[,3]],
exp3=apply(expectedValues, 1, function(x) {x[order(-x)[3]]})
)
}
ld<-loadData()
mask<-buildMask()
model<-buildModel(ld$teamresults)
newmatches<-ld$teamresults[teamresults$where=='Home',c('team', 'otherTeam')]
prediction <- predictMatches(model, newmatches)
table(prediction$predgoals$hg, prediction$predgoals$ag)
table(ld$games$HTHG, ld$games$HTAG)
table(prediction$predgoals$hg, ld$games$HTHG)
table(prediction$predgoals$ag, ld$games$HTAG)
qqplot(prediction$predgoals$hg, ld$games$HTHG)
qqplot(prediction$predgoals$ag, ld$games$HTAG)
plot(ld$games$HTHG, prediction$newmatches$lh)
plot(ld$games$HTHG - ld$games$HTAG, prediction$newmatches$lh - prediction$newmatches$la)
cor(ld$games$HTHG - ld$games$HTAG, prediction$newmatches$lh - prediction$newmatches$la)
cor(prediction$newmatches$lh, ld$games$HTHG)
plot(ld$games$HTHG, x=prediction$newmatches$lh)
plot(ld$games$HTAG, prediction$newmatches$la)
recommend(prediction)
nextmatches<-c(
"Wolfsburg", "Werder Bremen",
"Bayern Munich", "Hamburg",
"Leverkusen", "Mainz",
"Darmstadt", "Augsburg",
"Freiburg", "Dortmund",
"RB Leipzig", "FC Koln",
"Hertha", "Ein Frankfurt",
"Ingolstadt", "M'gladbach",
"Schalke 04", "Hoffenheim"
)
nextmatches<-as.data.frame(matrix(nextmatches,ncol=2,byrow=TRUE))
colnames(nextmatches)<-c('team', 'otherTeam')
prediction <- predictMatches(model, nextmatches)
recommend(prediction)
cbind(recommend(prediction), maxExpectation(prediction$predoutcomes))
cbind(prediction$newmatches, ld$games)
sum(maxExpectation(prediction$predoutcomes)$exp)
sum(maxExpectation(prediction$predoutcomes)$exp2)
sum(maxExpectation(prediction$predoutcomes)$exp3)
plotGamePred<-function(pred) {
ord<-order(pred, decreasing = T)
plot(pred[ord])
text(pred[ord], names(pred[ord]))
maxExpectation(pred)
}
sort(prediction$predoutcomes[1,], decreasing = T)
plot(sort(prediction$predoutcomes[1,], decreasing = T))
text(sort(prediction$predoutcomes[1,], decreasing = T), names(sort(prediction$predoutcomes[1,], decreasing = T)))
plotGamePred(prediction$predoutcomes[1,])
pred<-prediction$predoutcomes[1,]
labels
apply(expectedValues, 1, max)
expectedValues[9,order(-expectedValues[9,])]
matrix(expectedValues[8,], nrow=5, ncol=5, dimnames = list(0:4, 0:4))
matrix(prediction$predoutcomes[8,], nrow=5, ncol=5, dimnames = list(0:4, 0:4))
prediction$predoutcomes[1,]
sum(prediction$predoutcomes %*% unlist(mask[1]))
sum(prediction$predoutcomes[1,] * unlist(mask[1]))
sum(prediction$predoutcomes[1,] * unlist(mask[20]))
cbind(unlist(mask[2]), names(mask), prediction$predoutcomes[1,], names(prediction$predoutcomes[1,]))
rowSums(prediction$predoutcomes * unlist(mask[2]))
prediction$predoutcomes[1,]
ld$teamresults[teamresults$where=='Home',c('team', 'otherTeam')]
teams
fr <- teamresults[teamresults$where=='Home',]
fr$lh <- predict(m.team, type = "response", newdata=data.frame(team=fr$team, otherTeam=fr$otherTeam, where="Home"))
fr$la <- predict(m.team, type = "response", newdata=data.frame(team=fr$otherTeam, otherTeam=fr$team, where="Away"))
plot(lh-la ~ I(goals-otherGoals), data=fr )
abline(lm(lh-la ~ I(goals-otherGoals), data=fr ))
summary(lm(lh-la ~ I(goals-otherGoals), data=fr ))
cor(fr$lh-fr$la, fr$goals-fr$otherGoals)
lambdas<-cbind(sapply(0:4, function(x) dpois(x, fr$lh)), sapply(0:4, function(x) dpois(x, fr$la)))
str(lambdas)
colnames(lambdas)<-c(paste0('LH', 0:4), paste0('LA', 0:4))
predoutcomes<-apply(lambdas, 1, function(x) {x[1:5]%o%x[6:10]})
predoutcomes<-t(predoutcomes)
cn<-expand.grid(0:4, 0:4)
colnames(predoutcomes)<-paste(cn$Var1, cn$Var2)
tend<-apply(predoutcomes, 1, function(x) {
rm<-matrix(5,5,data=x);
c(
homewinprob = sum(lower.tri(rm)*rm),
drawprob=sum(diag(rm)),
awaywinprob = sum(upper.tri(rm)*rm))
})
tend<-t(tend)
summary(tend)
table(apply(tend, 1, which.max))
table(sign(fr$goals-fr$otherGoals))
table(apply(tend, 1, which.max), sign(fr$goals-fr$otherGoals))
m.diff<-lm(formula = goals-otherGoals ~ (team+otherTeam)*where, data=teamresults, weights = weights)
teamresults$diffpred <-fitted(m.diff)
summary(m.diff)
plot(m.diff)
plot(diffpred ~ I(goals-otherGoals), data=teamresults )
abline(lm(diffpred ~ I(goals-otherGoals), data=teamresults ))
allpred<-sapply(0:6, function(x) dpois(x, fitted(m.team)))
bestpred<-apply(allpred, 1, which.max)-1
table(data.frame(pred=bestpred, act=teamresults$goals)) #, diff=bestpred - teamresults$goals) )
summary(data.frame(pred=bestpred, act=teamresults$goals))
predictMatch <- function(t1, t2) {
team <- t1
otherTeam <- t2
hg<-predict(m.team, type = "response", newdata=data.frame(team=team, otherTeam=otherTeam, where="Home"))
ag<-predict(m.team, type = "response", newdata=data.frame(team=otherTeam, otherTeam=team, where="Away"))
hgdist<-sapply(0:6, function(x) dpois(x, hg))
agdist<-sapply(0:6, function(x) dpois(x, ag))
predoutcomes<-round(sapply(0:6, function(x) dpois(x, hg))%o%sapply(0:6, function(x) dpois(x, ag)), 4)*100
colnames(predoutcomes)<-0:6
rownames(predoutcomes)<-0:6
drawprob<-sum(diag(predoutcomes))
homewinprob<-sum(lower.tri(predoutcomes)*predoutcomes)
awaywinprob<-sum(upper.tri(predoutcomes)*predoutcomes)
return (list(tendency = data.frame(team=t1, otherTeam=t2, homewinprob, drawprob, awaywinprob,
hg=which.max(hgdist)-1, ag=which.max(agdist)-1), pred=predoutcomes)
)
}
str(tend)
matrix(7,7,data = predoutcomes[1,])
lambdas[1,]
str((predoutcomes))
table(sign(fr$lh-fr$la), sign(fr$goals-fr$otherGoals))
ppois(0, 1)+dpois(1,1)
dpois(0,1)
ppois(0,1)
ppois(2, 1, lower.tail = F)
ppois(0, 1, lower.tail = T)
ppois(0, 1, lower.tail = F)
densityplot(lh-la ~ I(goals-otherGoals), data=fr)
fittedresults$goals - fittedresults$otherGoals, )
hg<-predict(m.team, type = "response", newdata=data.frame(team=team, otherTeam=otherTeam, where="Home"))
ag<-predict(m.team, type = "response", newdata=data.frame(team=otherTeam, otherTeam=team, where="Away"))
allgamespred<-apply(results, 1, function(x) {predictMatch(x[['HomeTeam']], x[['AwayTeam']])})
allgames_tenpred<-(sapply(allgamespred, function(x) x$tendency[, c('homewinprob', 'drawprob', 'awaywinprob')]))
allgames_tenpred<-t(allgames_tenpred)
allgames_tenpred[,c('homewinprob', 'drawprob', 'awaywinprob')]
str(as.matrix(allgames_tenpred))
actualtend<-cbind(ifelse(results$FTR=='H', 1, 0), ifelse(results$FTR=='D', 1, 0), ifelse(results$FTR=='A', 1, 0))
str(actualtend)
as.matrix(allgames_tenpred)*cbind(ifelse(results$FTR=='H', 1, 0), ifelse(results$FTR=='D', 1, 0), ifelse(results$FTR=='A', 1, 0))
summary(unlist(ifelse(results$FTR=='H', allgames_tenpred[,1], ifelse(results$FTR=='D', allgames_tenpred[,2], allgames_tenpred[,3]))))
table(apply(allgames_tenpred[,c('homewinprob', 'drawprob', 'awaywinprob')], 1, function(x) which.max(x)))
allgames_tenpred[1:2,]
str(results)
results$HomeTeam
results$AwayTeam
teams
predictMatch(teams[11],teams[17])
predictMatch(teams[15],teams[3])
predictMatch(teams[4],teams[9])
predictMatch(teams[6],teams[7])
predictMatch(teams[10],teams[1])
predictMatch(teams[13],teams[19])
predictMatch(teams[18],teams[20])
predictMatch(teams[12],teams[16])
t1<-teams[18]
t2<-teams[20]
table(results$FTHG, results$FTAG)
var(results$FTHG)
mean(results$FTHG)
41.89+29.6+28.47
var(results$FTAG)
mean(results$FTAG)
var(teamresults$goals)
mean(teamresults$goals)
var(teamresults$otherGoals)
mean(teamresults$otherGoals)
predictMatch(teams[11],teams[17])
predictMatch(teams[11],teams[17])
teams[3]
t1<-1
t2<-12
colnames(teams[5])
str(teams)
predict(m.team, type = "response", newdata=data.frame(team="Augsburg", otherTeam="Leverkusen", where="Home"))
library(pscl)
m.team<-hurdle(formula = goals ~ team*where+otherTeam, data=teamresults, dist = "poisson")
m.team<-hurdle(formula = goals ~ (team+otherTeam)*where, data=teamresults, dist = "geometric")
m.team<-hurdle(formula = goals ~ (team+otherTeam)*where, data=teamresults, dist = "negbin")
summary(m.team)
fittedgoals<-round(dpois(0:6, (fitted(m.team)[0]))*nrow(teamresults))
names(fittedgoals)<-0:6
rbind(fittedgoals, actual=table(teamresults$goals))
rbind(fittedstatic, actual=table(teamresults$goals))
plot(teamresults$goals, fitted(m.team))
boxplot(fitted(m.team) ~ teamresults$goals)
summary(dpois(teamresults$goals, fitted(m.team)-0.14))
plot(dpois(teamresults$goals, fitted(m.team)))
summary(dpois(teamresults$goals+1, fitted(m.team)))
summary(dpois(teamresults$goals+2, fitted(m.team)))
summary(dpois(teamresults$goals+3, fitted(m.team)))
summary(dpois(teamresults$goals-1, fitted(m.team)))
summary(dpois(0, fitted(m.team)))
summary(dpois(1, fitted(m.team)))
summary(dpois(2, fitted(m.team)))
summary(dpois(teamresults$goals, fitted(staticlambda)))
summary(m.team)
summary(fitted(m.team))
# , teamresults$team, teamresults$otherTeam, teamresults$otherGoals, teamresults$where)
which.max(allpred)
names(fittedstatic)<-0:6
summary(fitted(m.team))
m.diff<-glm(formula = goals-otherGoals ~ (team+otherTeam)*where, data=teamresults, family = poisson)
homedefense<-glm(formula = FTHG ~ AwayTeam, data=results, family = poisson)
poisson.test(x=teamresults$goals, r = 0.3472)
poisson.test(137, 24.19893)
0.3472
reshape(results, timevar = "HomeTeam", direction = "wide", idvar = "spieltag")
recast(results, spieltag~HomeTeam~FTHG, id.var=c("HomeTeam", "spieltag", "FTHG"))
library(dplyr)
results %>% summarize(results)
aggregate(FTHG ~ HomeTeam, results, mean)
aggregate(FTAG ~ AwayTeam, results, mean)
homeattack<-glm(formula = FTHG ~ HomeTeam, data=results, family = poisson)
homedefense<-glm(formula = FTHG ~ AwayTeam, data=results, family = poisson)
awayattack<-glm(formula = FTAG ~ AwayTeam, data=results, family = poisson)
awaydefense<-glm(formula = FTAG ~ HomeTeam, data=results, family = poisson)
homegoals_x<-glm(formula = FTHG ~ HomeTeam*AwayTeam, data=results, family = poisson)
homegoals<-glm(formula = FTHG ~ HomeTeam+AwayTeam, data=results, family = poisson)
awaygoals_x<-glm(formula = FTAG ~ HomeTeam*AwayTeam, data=results, family = poisson)
awaygoals<-glm(formula = FTAG ~ HomeTeam+AwayTeam, data=results, family = poisson)
summary(homegoals)
predict(homegoals, newdata = data)
predict(homegoals)
summary(residuals(homegoals))
summary(residuals(awaygoals))
summary(residuals(homegoals_x))
summary(predict(homegoals, type = "response"))
summary(predict(awaygoals, type = "response"))
summary(predict(homegoals_x, type = "response"))
summary(predict(awaygoals_x, type = "response"))
cbind(results, H=predict(homegoals, type = "response"), A=predict(awaygoals, type = "response"))
cbind(results,
H=round(predict(homegoals_x, type = "response"), 2),
A=round(predict(awaygoals_x, type = "response"), 2))
plot(residuals(homegoals, type = "response") ~ FTHG, data=results)
plot(results$FTAG, residuals(awaygoals, type = "response"))
plot(predict(homegoals, type = "response") ~ FTHG, data=results)
plot(predict(awaygoals, type = "response") ~ FTAG, data=results)
predict(homegoals, type = "response", newdata=data.frame(HomeTeam="Dortmund", AwayTeam="Ingolstadt"))
predict(awaygoals, type = "response", newdata=data.frame(HomeTeam="Dortmund", AwayTeam="Ingolstadt"))
predict(homegoals, newdata=data.frame(HomeTeam="Dortmund", AwayTeam="Bayern Munich"))
lambda<-predict(homegoals, type = "response", newdata=data.frame(HomeTeam="Dortmund", AwayTeam="Bayern Munich"))
lambda2<-predict(awaygoals, type = "response", newdata=data.frame(HomeTeam="Dortmund", AwayTeam="Bayern Munich"))
plot(dpois(0:10, lambda))
plot(dpois(0:10, lambda2))
dpois(0:5, lambda) %o% dpois(0:5, lambda2)
exp(-lambda)*lambda^4/factorial(4)
exp(0.99373-0.02707-0.95141)
0.99373-0.02707-0.03221
dpois(0, fitted(homegoals))
dpois(1, fitted(homegoals))
dpois(2, fitted(homegoals))
dpois(3, fitted(homegoals))
dpois(0:10, fitted(homegoals))
allmodel
table(results, HomeTeam~FTHG)
results
results2 <- data[,c('HomeTeam', 'AwayTeam', 'FTHG', 'FTAG', 'FTR', 'HTHG', 'HTAG', 'HTR')]
summary(results2)
table(results2$HTR, results2$FTR) / nrow(results2) * 100
table(results2$HTR, results2$HTR)
table(results2$HTHG)
table(results2$FTHG-results2$HTHG)
table(results2$HTAG)
table(results2$FTAG-results2$HTAG)
library(MNP) # loads the MNP package
example(mnp) # runs the example script
detergent
m.probit<-mnp(formula = sign(goals-otherGoals)~I(as.integer(team)%%10), data=teamresults, verbose=T)
summary(m.probit)
m.probitdiff<-mnp(formula = (goals-otherGoals)~(team+otherTeam)*where, data=teamresults, verbose=T)
summary(m.probitdiff)
predict(m.probit, newdata = teamresults[1:10,])
residuals(m.probit)
as.integer(teamresults$team)
|
library(ggplot2)
values <- rnorm(10000000)
ns <- c()
is <- c()
meanEstimate <- c()
sdEstimate <- c()
estimator <- c()
k <- 1
for (n in 3^(1:10)) {
for (i in 1:1000) {
sampled <- c(sample(values, n, replace = T), rep(10, 10))
ns[k] <- n
is[k] <- i
meanEstimate[k] <- mean(sampled)
sdEstimate[k] <- sd(sampled)
estimator[k] <- "Normal"
k <- k + 1
ns[k] <- n
is[k] <- i
meanEstimate[k] <- median(sampled)
sdEstimate[k] <- (quantile(sampled, probs = pnorm(1)) - quantile(sampled, probs = pnorm(-1)))/2
estimator[k] <- "Robust"
k <- k + 1
}
}
result <- data.frame(
n = as.factor(ns),
i = as.factor(is),
mean = meanEstimate,
sd = sdEstimate,
estimator = estimator,
stringsAsFactors = F
)
meanPlot <- ggplot() + theme_bw() +
geom_violin(data = result, mapping = aes(x = n, y = mean, col = estimator, fill = estimator), alpha = 0.8)
meanPlot
sdPlot <- ggplot() + theme_bw() +
geom_violin(data = result, mapping = aes(x = n, y = sd, col = estimator, fill = estimator), alpha = 0.8)
sdPlot
| /src/estimators/convergence_speed.R | no_license | mvaudel/utils | R | false | false | 1,096 | r |
library(ggplot2)
values <- rnorm(10000000)
ns <- c()
is <- c()
meanEstimate <- c()
sdEstimate <- c()
estimator <- c()
k <- 1
for (n in 3^(1:10)) {
for (i in 1:1000) {
sampled <- c(sample(values, n, replace = T), rep(10, 10))
ns[k] <- n
is[k] <- i
meanEstimate[k] <- mean(sampled)
sdEstimate[k] <- sd(sampled)
estimator[k] <- "Normal"
k <- k + 1
ns[k] <- n
is[k] <- i
meanEstimate[k] <- median(sampled)
sdEstimate[k] <- (quantile(sampled, probs = pnorm(1)) - quantile(sampled, probs = pnorm(-1)))/2
estimator[k] <- "Robust"
k <- k + 1
}
}
result <- data.frame(
n = as.factor(ns),
i = as.factor(is),
mean = meanEstimate,
sd = sdEstimate,
estimator = estimator,
stringsAsFactors = F
)
meanPlot <- ggplot() + theme_bw() +
geom_violin(data = result, mapping = aes(x = n, y = mean, col = estimator, fill = estimator), alpha = 0.8)
meanPlot
sdPlot <- ggplot() + theme_bw() +
geom_violin(data = result, mapping = aes(x = n, y = sd, col = estimator, fill = estimator), alpha = 0.8)
sdPlot
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.tsfm.r
\name{predict.tsfm}
\alias{predict.tsfm}
\title{Predicts asset returns based on a fitted time series factor model}
\usage{
\method{predict}{tsfm}(object, newdata = NULL, ...)
}
\arguments{
\item{object}{an object of class \code{tsfm} produced by \code{fitTsfm}.}
\item{newdata}{a vector, matrix, data.frame, xts, timeSeries or zoo object
containing the variables with which to predict.}
\item{...}{optional arguments passed to \code{predict.lm} or
\code{\link[robust]{predict.lmRob}}, such as \code{se.fit}, or, to
\code{\link[lars]{predict.lars}} such as \code{mode}.}
}
\value{
\code{predict.tsfm} produces a matrix of return predictions, if all assets
have equal history. If not, a list of predicted return vectors of unequal
length is produced.
}
\description{
S3 \code{predict} method for object of class \code{tsfm}. It
calls the \code{predict} method for fitted objects of class \code{lm},
\code{lmRob} or \code{lars} as appropriate.
}
\examples{
# load data from the database
data(managers)
# fit the factor model with LS
fit <- fitTsfm(asset.names=colnames(managers[,(1:6)]),
factor.names=c("EDHEC.LS.EQ","SP500.TR"), data=managers)
pred.fit <- predict(fit)
newdata <- data.frame("EDHEC.LS.EQ"=rnorm(n=120), "SP500.TR"=rnorm(n=120))
rownames(newdata) <- rownames(fit$data)
pred.fit2 <- predict(fit, newdata, interval="confidence")
}
\seealso{
\code{\link{fitTsfm}}, \code{\link{summary.tsfm}}
}
\author{
Yi-An Chen and Sangeetha Srinivasan
}
| /man/predict.tsfm.Rd | no_license | AvinashAcharya/factorAnalytics | R | false | true | 1,569 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.tsfm.r
\name{predict.tsfm}
\alias{predict.tsfm}
\title{Predicts asset returns based on a fitted time series factor model}
\usage{
\method{predict}{tsfm}(object, newdata = NULL, ...)
}
\arguments{
\item{object}{an object of class \code{tsfm} produced by \code{fitTsfm}.}
\item{newdata}{a vector, matrix, data.frame, xts, timeSeries or zoo object
containing the variables with which to predict.}
\item{...}{optional arguments passed to \code{predict.lm} or
\code{\link[robust]{predict.lmRob}}, such as \code{se.fit}, or, to
\code{\link[lars]{predict.lars}} such as \code{mode}.}
}
\value{
\code{predict.tsfm} produces a matrix of return predictions, if all assets
have equal history. If not, a list of predicted return vectors of unequal
length is produced.
}
\description{
S3 \code{predict} method for object of class \code{tsfm}. It
calls the \code{predict} method for fitted objects of class \code{lm},
\code{lmRob} or \code{lars} as appropriate.
}
\examples{
# load data from the database
data(managers)
# fit the factor model with LS
fit <- fitTsfm(asset.names=colnames(managers[,(1:6)]),
factor.names=c("EDHEC.LS.EQ","SP500.TR"), data=managers)
pred.fit <- predict(fit)
newdata <- data.frame("EDHEC.LS.EQ"=rnorm(n=120), "SP500.TR"=rnorm(n=120))
rownames(newdata) <- rownames(fit$data)
pred.fit2 <- predict(fit, newdata, interval="confidence")
}
\seealso{
\code{\link{fitTsfm}}, \code{\link{summary.tsfm}}
}
\author{
Yi-An Chen and Sangeetha Srinivasan
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{listnames_to_column}
\alias{listnames_to_column}
\title{Add ID column (list element names)}
\usage{
listnames_to_column(ls, colname = "sample")
}
\arguments{
\item{ls}{A named list of dataframes}
\item{colname}{The name of the column to add IDs to}
}
\description{
In a list of dataframes, add an ID column with the list name. Neat, if you want a dataframe instead of list object.
}
| /man/listnames_to_column.Rd | no_license | lmuenter/coulteR | R | false | true | 476 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{listnames_to_column}
\alias{listnames_to_column}
\title{Add ID column (list element names)}
\usage{
listnames_to_column(ls, colname = "sample")
}
\arguments{
\item{ls}{A named list of dataframes}
\item{colname}{The name of the column to add IDs to}
}
\description{
In a list of dataframes, add an ID column with the list name. Neat, if you want a dataframe instead of list object.
}
|
.parseFileName <- function(filepath) {
fileName <- basename(filepath)
splitFileName <- strsplit(fileName, "\\.")
fileuuid <- sapply(strsplit(fileName, "\\."), "[", 3L)
if (length(strsplit(fileuuid, "-")[[1]]) != 5L)
stop("Inconsistent UUID in file name")
TCGAtranslateID(fileuuid, type = "entity_id")
}
#' Read Exon level files and create a GRangesList
#'
#' This function serves to read exon-level expression data. It works for exon
#' quantification (raw counts and RPKM) and junction quantification
#' (raw counts) files paths and represent such data as a
#' \linkS4class{GRangesList}. The data can be downloaded
#' via the TCGA Legacy Archive. File name and structure requirements are as
#' follows: The third position delimited by dots (".") in the file name should
#' be the universally unique identifier (UUID). The column containing the
#' ranged information is labeled "exon."
#'
#' @param filepaths A vector of valid exon data file paths
#' @param sampleNames A vector of TCGA barcodes to be applied if not present in
#' the data
#' @return A \linkS4class{GRangesList} object
#'
#' @importFrom GenomicRanges GRanges GRangesList
#'
#' @author Marcel Ramos
#'
#' @examples
#'
#' pkgDir <- system.file("extdata", package = "TCGAutils", mustWork = TRUE)
#' exonFile <- list.files(pkgDir, pattern = "cation.txt$", full.names = TRUE)
#' makeGRangesListFromExonFiles(exonFile)
#'
#' @export makeGRangesListFromExonFiles
makeGRangesListFromExonFiles <-
function(filepaths, sampleNames = NULL, rangeCol = "exon") {
btData <- lapply(filepaths, function(file) {
read_delim(file, delim = "\t")
})
if (!is.null(sampleNames)) {
if (length(filepaths) != length(sampleNames))
stop("Inconsistent sample names obtained from file names")
} else {
sampleNames <- unlist(lapply(filepaths, .parseFileName))
if (!length(sampleNames))
sampleNames <- NULL
}
names(btData) <- sampleNames
GRangesList(
lapply(btData, function(range) {
newGRanges <- GRanges(as.character(range[[rangeCol]]))
mcols(newGRanges) <- range[, names(range) != rangeCol]
newGRanges
})
)
}
| /R/makeGRangesListFromExonFiles.R | no_license | mutual-ai/TCGAutils | R | false | false | 2,219 | r | .parseFileName <- function(filepath) {
fileName <- basename(filepath)
splitFileName <- strsplit(fileName, "\\.")
fileuuid <- sapply(strsplit(fileName, "\\."), "[", 3L)
if (length(strsplit(fileuuid, "-")[[1]]) != 5L)
stop("Inconsistent UUID in file name")
TCGAtranslateID(fileuuid, type = "entity_id")
}
#' Read Exon level files and create a GRangesList
#'
#' This function serves to read exon-level expression data. It works for exon
#' quantification (raw counts and RPKM) and junction quantification
#' (raw counts) files paths and represent such data as a
#' \linkS4class{GRangesList}. The data can be downloaded
#' via the TCGA Legacy Archive. File name and structure requirements are as
#' follows: The third position delimited by dots (".") in the file name should
#' be the universally unique identifier (UUID). The column containing the
#' ranged information is labeled "exon."
#'
#' @param filepaths A vector of valid exon data file paths
#' @param sampleNames A vector of TCGA barcodes to be applied if not present in
#' the data
#' @return A \linkS4class{GRangesList} object
#'
#' @importFrom GenomicRanges GRanges GRangesList
#'
#' @author Marcel Ramos
#'
#' @examples
#'
#' pkgDir <- system.file("extdata", package = "TCGAutils", mustWork = TRUE)
#' exonFile <- list.files(pkgDir, pattern = "cation.txt$", full.names = TRUE)
#' makeGRangesListFromExonFiles(exonFile)
#'
#' @export makeGRangesListFromExonFiles
makeGRangesListFromExonFiles <-
function(filepaths, sampleNames = NULL, rangeCol = "exon") {
btData <- lapply(filepaths, function(file) {
read_delim(file, delim = "\t")
})
if (!is.null(sampleNames)) {
if (length(filepaths) != length(sampleNames))
stop("Inconsistent sample names obtained from file names")
} else {
sampleNames <- unlist(lapply(filepaths, .parseFileName))
if (!length(sampleNames))
sampleNames <- NULL
}
names(btData) <- sampleNames
GRangesList(
lapply(btData, function(range) {
newGRanges <- GRanges(as.character(range[[rangeCol]]))
mcols(newGRanges) <- range[, names(range) != rangeCol]
newGRanges
})
)
}
|
#' Estimate the one-inflated positive Poisson mixture model (OIPPMM)
#'
#' @param y A vector of positive integers.
#' @param l lambda, a vector of starting values for the positive Poisson
#' components. If \code{NULL}, starting values will be found via grid search,
#' and mixture models with successively more components will be estimated
#' until the non-parametric MLE is found, or \code{maxk} is reached.
#' @param p pi, a vector of starting values for the mixture weights.
#' \code{l} and \code{p} must be initialized together, or not at all. If
#' \code{NULL}, grid search and estimation for successive numbers of mixture
#' components will commence until the non-parametric MLE is found, or \code{maxk}
#' is reached.
#' @param K the number of components to be estimated in the OIPPMM. If \code{NULL},
#' mixture models with successively more components will be estimated until the
#' non-parametric MLE is found, or \code{maxk} is reached.
#' @param tol Tolerance of the EM algorithm. The EM algorithm proceeds until the
#' proportional difference between all successive parameter estimates for
#' lambda and pi are less than \code{tol}. Default is 0.001\%.
#' @param maxLikmethod Maximization method passed to \pkg{maxLik}. Default is
#' Newton-Raphson.
#' @param maxiters Maximum number of EM iterations.
#' @param minlam The minimum value that a mixture component can have before it
#' is considered to be a redundant one-inflating component. If any value in
#' lambda is less than \code{minlam}, the algorithm stops and the
#' non-parametric MLE is declared to be found. Only relevant if \code{l} and
#' \code{p} are \code{NULL}, so that \code{inflmix} is searching for the
#' non-parametric MLE.
#' @param reduntol After the EM algorithm converges, the estimation process will
#' begin again (including a grid search for new starting values), unless any
#' two components in lambda are within \code{reduntol} of each other.
#' The non-parametric MLE is then declared to be found. Only relevant if
#' \code{l} and \code{p} are \code{NULL}.
#' @param maxk The maximum number of positive Poisson components to be attempted
#' in the search for the non-parametric MLE.
#' @return If \code{inflmix} is called with starting values for \code{l} and
#' \code{p}, returns a list containing:
#' \tabular{ll}{
#' \code{termreas} \tab the reason that the EM algorithm terminated (either
#' convergence or iteration limit) \cr
#' \code{iterations} \tab the number of iterations until convergence \cr
#' \code{lambda} \tab the estimated values for the positive Poisson parameters \cr
#' \code{pi} \tab the estimated values for the component weights \cr
#' \code{logl} \tab the value of the log-likelihood function evaluated at the
#' parameter estimates for lambda and pi \cr
#' \code{n} \tab the sample size, the length of the vector \code{y} \cr
#' \code{predicted} \tab the predicted counts obtained by evaluting the
#' probability mass function of the OIPPMM model at the parameter estimates for
#' lambda and pi, and for \eqn{y = 1,\dots,max(y)} \cr
#' \code{chisq} \tab the Pearson chi-square distance statistic obtained by
#' comparing the actual and predicted counts \cr
#' \code{HTn0} \tab the Horvitz-Thompson estimator for the number of missing
#' zeros \cr
#' }
#' If \code{inflmix} is called without starting values for \code{l} and
#' \code{p} (\code{l=NULL} and \code{p=NULL}), then \code{inflmix} returns an
#' object of class 'inflmixNPMLE', a list containing each of the above objects,
#' for each estimated OIPPMM model with successively more mixture components,
#' in the search for the non-parametric MLE. An additional object is also provided:
#' \code{termreasNPMLE} which documents the reason for the termination of the search
#' for the NPMLE (either NPMLE found, or \code{maxk} reached).
#' @seealso \code{\link{rinflmix}} and \code{\link{rinflmixN}} for the generation of
#' random numbers from the OIPPMM.
#' @examples
#' # Estimate several OIPPMMs with increasing number of components, until adding an
#' # additional component yields no improvement in the log-likelihood.
#' zz <- inflmix(1:20)
#' # The custom print method displays results in table
#' zz
#' # Provide starting values instead of searching for the NPMLE
#' inflmix(1:20, l=c(1, 4), p=c(.4, .4))
#' # Fix the number of components, without providing starting values
#' inflmix(1:20, K = 2)
#' @import stats
#' @import utils
#' @export
inflmix <- function(y, l=NULL, p=NULL, K=NULL, tol=.00001, maxLikmethod="nr",
maxiters=1e4, minlam=0.01, reduntol=0.05, maxk = 4) {
if(!is.integer(y) && !is.numeric(y)) {
stop("y must be of type integer or numeric")}
if(is.numeric(y) && !floor(y)==y) {stop("y must contain only integers")}
if(any(y < 1)) {stop("y must be positive")}
if(is.matrix(y) && ncol(y) > 1) {stop("y must be one-dimensional")}
if(length(l) != length(p)) {stop("l and p must have the same dimension")}
if(!is.null(l) && is.null(p) || is.null(l) && !is.null(p)) {
stop("l and p must be initialized together, or not at all")}
y <- as.integer(y)
oippmmlogl <- function(y, l, p) {
pmfpp <- function(y,lk) {
dpois(y, lk) / (1 - dpois(0, lk))
}
bigk <- length(l)
sum(log(rowSums(sapply(1:bigk, function(j) p[j] * pmfpp(y, l[j]))) + (y == 1) * (1 - sum(p))))
}
inflmgrid <- function(y, bigk, nlam = 10, npi = 3) {
lam <- seq(0.1, (max(y) - 2), length.out = nlam)
lams <- t(combn(lam, bigk))
pis <- expand.grid(replicate(bigk + 1, 1:npi, simplify=F))
pis <- pis / rowSums(pis)
pis <- as.matrix(pis[1:(nrow(pis) - 1), 1:bigk])
loglmat <- sapply(1:nrow(lams), function(q) sapply(1:nrow(pis), function(r) {
oippmmlogl(y, lams[q, ], pis[r, ])}))
coords <- which(loglmat == max(loglmat), arr.ind = T)
list(l = lams[coords[1, 2],], p = pis[coords[1, 1],])
}
# PMF of PP distribution
pmfpp <- function(y,lk) {
dpois(y, lk) / (1 - dpois(0, lk))
}
estimate <- function(l, p) {
# Not the real log-l. Omits terms not relevant for optimization
logl <- function(l) {
sum(sapply(1:bigk, function(j) {sum(w[, j] * (y * log(l[j]) - log(exp(l[j]) - 1)))}))
}
# Estimate the weights based on the current values of the lambdas and "pi"s
getweights <- function(p, l) {
denom <- rowSums(sapply(1:bigk, function(j) {p[j] * pmfpp(y, l[j])})) + (1 - sum(p)) * (y == 1)
sapply(1:bigk, function(j) {p[j] * pmfpp(y, l[j]) / denom})
}
z <- list()
iters <- 0L
repeat {
iters <- iters + 1L
if(iters > maxiters) {
z$termreas <- "Iteration limit reached"
break
}
w <- getweights(p, l)
# Maximize the log-likelihood (eq. 6), and obtain vector of lambda_hats
lhat <- maxLik::maxLik(logl, method=maxLikmethod, start=l)$estimate
# Update the "pi"s
phat <- colMeans(getweights(p, lhat))
# Check for convergence
if(all(abs(c((phat - p) / p, (lhat - l)/ l)) < tol)) {
z$termreas <- "Convergence reached, within tolerance"
z$iterations <- iters
break
}
# Update all parameter estimates and continue
l <- lhat
p <- phat
}
z$lambda <- lhat
z$pi <- phat
z$logl <- oippmmlogl(y, lhat, phat)
z$n <- length(y)
z$predicted <- z$n * sapply(1:max(y), function(i) {
sum(sapply(1:bigk, function(j) {p[j] * pmfpp(i, l[j])})) + (1 - sum(p)) * (i == 1)})
z$chisq <- sum(((tabulate(y) - z$predicted) ^ 2) / z$predicted)
z$HTn0 <- sum(sapply(1:bigk, function(j) {
(p[j] / sum(p)) * (z$n / (1 - exp(-l[j])) - z$n)}))
z
}
if(is.null(l) && is.null(K)) {
bigk <- 1
zz <- list()
class(zz) <- "inflmixNPMLE"
repeat {
start <- inflmgrid(y, bigk)
zz[[bigk]] <- estimate(start$l, start$p)
names(zz)[bigk] <- paste("K =", bigk)
if(bigk > 1 && any(abs(combn(zz[[bigk]]$lambda, 2)[1, ] - combn(zz[[bigk]]$lambda, 2)[2, ]) < reduntol) || any(zz[[bigk]]$lambda < minlam)) {
zz$termreasNPMLE <- paste("NPMLE found: K =", (bigk - 1))
zz$KNPMLE <- bigk - 1
return(zz)
}
bigk <- bigk + 1
if(bigk > maxk) {
zz$termreasNPMLE <- "max K reached"
zz$KNPMLE <- NULL
return(zz)
}
}
} else if(!is.null(K)) {
bigk <- K
start <- inflmgrid(y, bigk)
return(estimate(start$l, start$p))
} else {
bigk <- length(l)
return(estimate(l, p))
}
}
| /R/inflmix.R | no_license | rtgodwin/inflmix | R | false | false | 8,554 | r | #' Estimate the one-inflated positive Poisson mixture model (OIPPMM)
#'
#' @param y A vector of positive integers.
#' @param l lambda, a vector of starting values for the positive Poisson
#' components. If \code{NULL}, starting values will be found via grid search,
#' and mixture models with successively more components will be estimated
#' until the non-parametric MLE is found, or \code{maxk} is reached.
#' @param p pi, a vector of starting values for the mixture weights.
#' \code{l} and \code{p} must be initialized together, or not at all. If
#' \code{NULL}, grid search and estimation for successive numbers of mixture
#' components will commence until the non-parametric MLE is found, or \code{maxk}
#' is reached.
#' @param K the number of components to be estimated in the OIPPMM. If \code{NULL},
#' mixture models with successively more components will be estimated until the
#' non-parametric MLE is found, or \code{maxk} is reached.
#' @param tol Tolerance of the EM algorithm. The EM algorithm proceeds until the
#' proportional difference between all successive parameter estimates for
#' lambda and pi are less than \code{tol}. Default is 0.001\%.
#' @param maxLikmethod Maximization method passed to \pkg{maxLik}. Default is
#' Newton-Raphson.
#' @param maxiters Maximum number of EM iterations.
#' @param minlam The minimum value that a mixture component can have before it
#' is considered to be a redundant one-inflating component. If any value in
#' lambda is less than \code{minlam}, the algorithm stops and the
#' non-parametric MLE is declared to be found. Only relevant if \code{l} and
#' \code{p} are \code{NULL}, so that \code{inflmix} is searching for the
#' non-parametric MLE.
#' @param reduntol After the EM algorithm converges, the estimation process will
#' begin again (including a grid search for new starting values), unless any
#' two components in lambda are within \code{reduntol} of each other.
#' The non-parametric MLE is then declared to be found. Only relevant if
#' \code{l} and \code{p} are \code{NULL}.
#' @param maxk The maximum number of positive Poisson components to be attempted
#' in the search for the non-parametric MLE.
#' @return If \code{inflmix} is called with starting values for \code{l} and
#' \code{p}, returns a list containing:
#' \tabular{ll}{
#' \code{termreas} \tab the reason that the EM algorithm terminated (either
#' convergence or iteration limit) \cr
#' \code{iterations} \tab the number of iterations until convergence \cr
#' \code{lambda} \tab the estimated values for the positive Poisson parameters \cr
#' \code{pi} \tab the estimated values for the component weights \cr
#' \code{logl} \tab the value of the log-likelihood function evaluated at the
#' parameter estimates for lambda and pi \cr
#' \code{n} \tab the sample size, the length of the vector \code{y} \cr
#' \code{predicted} \tab the predicted counts obtained by evaluting the
#' probability mass function of the OIPPMM model at the parameter estimates for
#' lambda and pi, and for \eqn{y = 1,\dots,max(y)} \cr
#' \code{chisq} \tab the Pearson chi-square distance statistic obtained by
#' comparing the actual and predicted counts \cr
#' \code{HTn0} \tab the Horvitz-Thompson estimator for the number of missing
#' zeros \cr
#' }
#' If \code{inflmix} is called without starting values for \code{l} and
#' \code{p} (\code{l=NULL} and \code{p=NULL}), then \code{inflmix} returns an
#' object of class 'inflmixNPMLE', a list containing each of the above objects,
#' for each estimated OIPPMM model with successively more mixture components,
#' in the search for the non-parametric MLE. An additional object is also provided:
#' \code{termreasNPMLE} which documents the reason for the termination of the search
#' for the NPMLE (either NPMLE found, or \code{maxk} reached).
#' @seealso \code{\link{rinflmix}} and \code{\link{rinflmixN}} for the generation of
#' random numbers from the OIPPMM.
#' @examples
#' # Estimate several OIPPMMs with increasing number of components, until adding an
#' # additional component yields no improvement in the log-likelihood.
#' zz <- inflmix(1:20)
#' # The custom print method displays results in table
#' zz
#' # Provide starting values instead of searching for the NPMLE
#' inflmix(1:20, l=c(1, 4), p=c(.4, .4))
#' # Fix the number of components, without providing starting values
#' inflmix(1:20, K = 2)
#' @import stats
#' @import utils
#' @export
inflmix <- function(y, l=NULL, p=NULL, K=NULL, tol=.00001, maxLikmethod="nr",
maxiters=1e4, minlam=0.01, reduntol=0.05, maxk = 4) {
if(!is.integer(y) && !is.numeric(y)) {
stop("y must be of type integer or numeric")}
if(is.numeric(y) && !floor(y)==y) {stop("y must contain only integers")}
if(any(y < 1)) {stop("y must be positive")}
if(is.matrix(y) && ncol(y) > 1) {stop("y must be one-dimensional")}
if(length(l) != length(p)) {stop("l and p must have the same dimension")}
if(!is.null(l) && is.null(p) || is.null(l) && !is.null(p)) {
stop("l and p must be initialized together, or not at all")}
y <- as.integer(y)
oippmmlogl <- function(y, l, p) {
pmfpp <- function(y,lk) {
dpois(y, lk) / (1 - dpois(0, lk))
}
bigk <- length(l)
sum(log(rowSums(sapply(1:bigk, function(j) p[j] * pmfpp(y, l[j]))) + (y == 1) * (1 - sum(p))))
}
inflmgrid <- function(y, bigk, nlam = 10, npi = 3) {
lam <- seq(0.1, (max(y) - 2), length.out = nlam)
lams <- t(combn(lam, bigk))
pis <- expand.grid(replicate(bigk + 1, 1:npi, simplify=F))
pis <- pis / rowSums(pis)
pis <- as.matrix(pis[1:(nrow(pis) - 1), 1:bigk])
loglmat <- sapply(1:nrow(lams), function(q) sapply(1:nrow(pis), function(r) {
oippmmlogl(y, lams[q, ], pis[r, ])}))
coords <- which(loglmat == max(loglmat), arr.ind = T)
list(l = lams[coords[1, 2],], p = pis[coords[1, 1],])
}
# PMF of PP distribution
pmfpp <- function(y,lk) {
dpois(y, lk) / (1 - dpois(0, lk))
}
estimate <- function(l, p) {
# Not the real log-l. Omits terms not relevant for optimization
logl <- function(l) {
sum(sapply(1:bigk, function(j) {sum(w[, j] * (y * log(l[j]) - log(exp(l[j]) - 1)))}))
}
# Estimate the weights based on the current values of the lambdas and "pi"s
getweights <- function(p, l) {
denom <- rowSums(sapply(1:bigk, function(j) {p[j] * pmfpp(y, l[j])})) + (1 - sum(p)) * (y == 1)
sapply(1:bigk, function(j) {p[j] * pmfpp(y, l[j]) / denom})
}
z <- list()
iters <- 0L
repeat {
iters <- iters + 1L
if(iters > maxiters) {
z$termreas <- "Iteration limit reached"
break
}
w <- getweights(p, l)
# Maximize the log-likelihood (eq. 6), and obtain vector of lambda_hats
lhat <- maxLik::maxLik(logl, method=maxLikmethod, start=l)$estimate
# Update the "pi"s
phat <- colMeans(getweights(p, lhat))
# Check for convergence
if(all(abs(c((phat - p) / p, (lhat - l)/ l)) < tol)) {
z$termreas <- "Convergence reached, within tolerance"
z$iterations <- iters
break
}
# Update all parameter estimates and continue
l <- lhat
p <- phat
}
z$lambda <- lhat
z$pi <- phat
z$logl <- oippmmlogl(y, lhat, phat)
z$n <- length(y)
z$predicted <- z$n * sapply(1:max(y), function(i) {
sum(sapply(1:bigk, function(j) {p[j] * pmfpp(i, l[j])})) + (1 - sum(p)) * (i == 1)})
z$chisq <- sum(((tabulate(y) - z$predicted) ^ 2) / z$predicted)
z$HTn0 <- sum(sapply(1:bigk, function(j) {
(p[j] / sum(p)) * (z$n / (1 - exp(-l[j])) - z$n)}))
z
}
if(is.null(l) && is.null(K)) {
bigk <- 1
zz <- list()
class(zz) <- "inflmixNPMLE"
repeat {
start <- inflmgrid(y, bigk)
zz[[bigk]] <- estimate(start$l, start$p)
names(zz)[bigk] <- paste("K =", bigk)
if(bigk > 1 && any(abs(combn(zz[[bigk]]$lambda, 2)[1, ] - combn(zz[[bigk]]$lambda, 2)[2, ]) < reduntol) || any(zz[[bigk]]$lambda < minlam)) {
zz$termreasNPMLE <- paste("NPMLE found: K =", (bigk - 1))
zz$KNPMLE <- bigk - 1
return(zz)
}
bigk <- bigk + 1
if(bigk > maxk) {
zz$termreasNPMLE <- "max K reached"
zz$KNPMLE <- NULL
return(zz)
}
}
} else if(!is.null(K)) {
bigk <- K
start <- inflmgrid(y, bigk)
return(estimate(start$l, start$p))
} else {
bigk <- length(l)
return(estimate(l, p))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tibble.R
\docType{package}
\name{tibble-package}
\alias{tibble-package}
\title{Simple Data Frames}
\description{
Provides a 'tbl_df' class that offers better checking and
printing capabilities than traditional data frames.
}
\section{Getting started}{
See \code{\link{tbl_df}} for an introduction,
\code{\link{data_frame}} and \code{\link{frame_data}} for construction,
\code{\link{as_data_frame}} for coercion,
and \code{\link{print.tbl_df}} and \code{\link{glimpse}} for display.
}
\section{Package options}{
Display options for \code{tbl_df}, used by \code{\link{trunc_mat}} and
(indirectly) by \code{\link{print.tbl_df}}.
\describe{
\item{\code{tibble.print_max}}{Row number threshold: Maximum number of rows
printed. Set to \code{Inf} to always print all rows. Default: 20.}
\item{\code{tibble.print_min}}{Number of rows printed if row number
threshold is exceeded. Default: 10.}
\item{\code{tibble.width}}{Output width. Default: \code{NULL} (use
\code{width} option).}
}
}
| /man/tibble-package.Rd | no_license | bhive01/tibble | R | false | true | 1,068 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tibble.R
\docType{package}
\name{tibble-package}
\alias{tibble-package}
\title{Simple Data Frames}
\description{
Provides a 'tbl_df' class that offers better checking and
printing capabilities than traditional data frames.
}
\section{Getting started}{
See \code{\link{tbl_df}} for an introduction,
\code{\link{data_frame}} and \code{\link{frame_data}} for construction,
\code{\link{as_data_frame}} for coercion,
and \code{\link{print.tbl_df}} and \code{\link{glimpse}} for display.
}
\section{Package options}{
Display options for \code{tbl_df}, used by \code{\link{trunc_mat}} and
(indirectly) by \code{\link{print.tbl_df}}.
\describe{
\item{\code{tibble.print_max}}{Row number threshold: Maximum number of rows
printed. Set to \code{Inf} to always print all rows. Default: 20.}
\item{\code{tibble.print_min}}{Number of rows printed if row number
threshold is exceeded. Default: 10.}
\item{\code{tibble.width}}{Output width. Default: \code{NULL} (use
\code{width} option).}
}
}
|
choose_TSA_tissue <- function(
input
) {
renderUI({
choices <- sort(names(DATASETS_light[[input$TSA_DATASET_CHOICE]]))
pickerInput(
inputId = "TSA_TISSUE_CHOICE",
label = "Tissue",
choices = choices,
options = list(`actions-box` = TRUE),
multiple = FALSE
)
})
}
choose_TSA_emitter <- function(
input
) {
renderUI({
req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE)
obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
req(obj)
choices <- sort(unique(scDiffCom:::get_cci_table_filtered(obj)[["Emitter Cell Type"]]))
pickerInput(
inputId = "TSA_EMITTER_CHOICE",
label = "Emitter Cell Type",
choices = choices,
selected = choices,
options = list(`actions-box` = TRUE),
multiple = TRUE
)
})
}
choose_TSA_receiver <- function(
input
) {
renderUI({
req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE)
obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
req(obj)
choices <- sort(unique(scDiffCom:::get_cci_table_filtered(obj)[["Receiver Cell Type"]]))
pickerInput(
inputId = "TSA_RECEIVER_CHOICE",
label = "Receiver Cell Type",
choices = choices,
selected = choices,
options = list(`actions-box` = TRUE),
multiple = TRUE
)
})
}
get_TSA_slider_log2fc <- function(
input
) {
renderUI({
req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE)
obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
req(obj)
max_val <- ceiling(max(scDiffCom:::get_cci_table_filtered(obj)[["LOG2FC"]]))
sliderInput(
inputId = "TSA_SLIDER_LOG2FC",
label = "LOG2FC Threshold",
min = 0,
max = max_val,
value = 0,
step = 0.01
)
})
}
choose_TSA_ORA_category <- function(
input
) {
renderUI({
req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE)
obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
req(obj)
choices <- names(scDiffCom:::get_ora_tables(obj))
pickerInput(
inputId = "TSA_ORA_CATEGORY_CHOICE",
label = "Category",
choices = choices,
options = list(`actions-box` = TRUE),
multiple = FALSE
)
})
}
get_TSA_ORA_slider_or <- function(
input
) {
renderUI({
req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE, input$TSA_ORA_CATEGORY_CHOICE, input$TSA_ORA_TYPE_CHOICE)
obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
req(obj)
ora_table <- scDiffCom:::get_ora_tables(obj)[[input$TSA_ORA_CATEGORY_CHOICE]]
req(ora_table)
if(input$TSA_ORA_TYPE_CHOICE == "Up") {
max_val <- ceiling(max(ora_table[["Odds Ratio Up"]]))
} else if(input$TSA_ORA_TYPE_CHOICE == "Down") {
max_val <- ceiling(max(ora_table[["Odds Ratio Down"]]))
} else if(input$TSA_ORA_TYPE_CHOICE == "Stable") {
max_val <- ceiling(max(ora_table[["Odds Ratio Stable"]]))
}
sliderInput(
inputId = "TSA_ORA_SLIDER_OR",
label = "Odds Ratio Threshold",
min = 1,
max = max_val,
value = 1,
step = 0.01
)
})
}
get_TSA_interaction_table <- function(
input
) {
DT::renderDataTable({
req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE, input$TSA_EMITTER_CHOICE, input$TSA_RECEIVER_CHOICE,
input$TSA_SLIDER_PVALUE, input$TSA_SLIDER_LOG2FC)
obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
req(obj)
dt <- scDiffCom:::get_cci_table_filtered(obj)
req(dt)
dt <- dt[
`Emitter Cell Type` %in% input$TSA_EMITTER_CHOICE &
`Receiver Cell Type` %in% input$TSA_RECEIVER_CHOICE &
`Adj. P-Value` <= input$TSA_SLIDER_PVALUE &
abs(LOG2FC) >= input$TSA_SLIDER_LOG2FC
]
setorder(
dt,
-LOG2FC,
`Adj. P-Value`
)
show_DT(
dt,
cols_to_show_DATA,
cols_numeric_DATA
)
})
}
plot_TSA_VOLCANO <- function(
input
) {
renderPlot({
req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE, input$TSA_EMITTER_CHOICE, input$TSA_RECEIVER_CHOICE,
input$TSA_SLIDER_PVALUE, input$TSA_SLIDER_LOG2FC)
obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
req(obj)
dt <- scDiffCom:::get_cci_table_filtered(obj)
req(dt)
dt <- dt[
`Emitter Cell Type` %in% input$TSA_EMITTER_CHOICE &
`Receiver Cell Type` %in% input$TSA_RECEIVER_CHOICE &
`Adj. P-Value` <= input$TSA_SLIDER_PVALUE &
abs(LOG2FC) >= input$TSA_SLIDER_LOG2FC
]
show_volcano(dt)
})
}
get_TSA_ORA_table <- function(
input
) {
DT::renderDataTable({
req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE, input$TSA_ORA_CATEGORY_CHOICE, input$TSA_ORA_TYPE_CHOICE)
obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
req(obj)
dt <- scDiffCom:::get_ora_tables(obj)[[input$TSA_ORA_CATEGORY_CHOICE]]
req(dt)
if(input$TSA_ORA_TYPE_CHOICE == "Up") {
dt <- dt[`Odds Ratio Up` >= 1, c("Value", "Odds Ratio Up", "Adj. P-Value Up")]
dt <- dt[`Adj. P-Value Up` <= input$TSA_ORA_SLIDER_PVALUE &
`Odds Ratio Up` >= input$TSA_ORA_SLIDER_OR]
setorder(dt, `Adj. P-Value Up`)
cols_numeric <- c("Odds Ratio Up", "Adj. P-Value Up")
} else if(input$TSA_ORA_TYPE_CHOICE == "Down") {
dt <- dt[`Odds Ratio Down` >= 1, c("Value", "Odds Ratio Down", "Adj. P-Value Down")]
dt <- dt[`Adj. P-Value Down` <= input$TSA_ORA_SLIDER_PVALUE &
`Odds Ratio Down` >= input$TSA_ORA_SLIDER_OR]
setorder(dt, `Adj. P-Value Down`)
cols_numeric <- c("Odds Ratio Down", "Adj. P-Value Down")
} else if(input$TSA_ORA_TYPE_CHOICE == "Stable") {
dt <- dt[`Odds Ratio Stable` >= 1, c("Value", "Odds Ratio Stable", "Adj. P-Value Stable")]
dt <- dt[`Adj. P-Value Stable` <= input$TSA_ORA_SLIDER_PVALUE &
`Odds Ratio Stable` >= input$TSA_ORA_SLIDER_OR]
setorder(dt, `Adj. P-Value Stable`)
cols_numeric <- c("Odds Ratio Stable", "Adj. P-Value Stable")
}
show_DT(
dt,
cols_to_show = colnames(dt),
cols_numeric = cols_numeric
)
})
}
plot_TSA_ORA <- function(
input
) {
renderPlot({
req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE, input$TSA_ORA_CATEGORY_CHOICE, input$TSA_ORA_TYPE_CHOICE)
obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
req(obj)
if (input$TSA_ORA_TYPE_CHOICE == "Up") {
OR_val <- "Odds Ratio Up"
pval_val <- "Adj. P-Value Up"
ORA_score_val <- "ORA_score_UP"
} else if (input$TSA_ORA_TYPE_CHOICE == "Down") {
OR_val <- "Odds Ratio Down"
pval_val <- "Adj. P-Value Down"
ORA_score_val <- "ORA_score_DOWN"
} else if ( input$TSA_ORA_TYPE_CHOICE == "Stable") {
OR_val <- "Odds Ratio Stable"
pval_val <- "Adj. P-Value Stable"
ORA_score_val <- "ORA_score_FLAT"
}
scDiffCom:::plot_ORA(
object = obj,
category = input$TSA_ORA_CATEGORY_CHOICE,
OR_val,
pval_val,
ORA_score_val,
max_value = 20,
OR_cutoff = input$TSA_ORA_SLIDER_OR,
pval_cutoff = input$TSA_ORA_SLIDER_PVALUE
)
})
}
# No call to analyzeGraph, called directly from tab_tissue_specific.R
# build_TSA_INTERACTIVE_BIPARTITE_NET <- function(
# input
# ) {
# renderPlot({
# stop('Not implemented')
# req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE, input$TSA_ORA_CATEGORY_CHOICE, input$TSA_ORA_TYPE_CHOICE)
# obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
# req(obj)
# # TO ADD
# # scDiffCom::build_interactive_network
# })
# }
#
# build_TSA_INTERACTIVE_CELLCOMMUNITY_NET <- function(
# input
# ) {
# renderPlot({
# stop('Not implemented')
# req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE, input$TSA_ORA_CATEGORY_CHOICE, input$TSA_ORA_TYPE_CHOICE)
# obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
# req(obj)
# # TO ADD
# # scDiffCom::build_interactive_network
# })
# } | /shinyApp/utils_tissue_specific.R | no_license | robi-tacutu/scAgeCom | R | false | false | 8,149 | r | choose_TSA_tissue <- function(
input
) {
renderUI({
choices <- sort(names(DATASETS_light[[input$TSA_DATASET_CHOICE]]))
pickerInput(
inputId = "TSA_TISSUE_CHOICE",
label = "Tissue",
choices = choices,
options = list(`actions-box` = TRUE),
multiple = FALSE
)
})
}
choose_TSA_emitter <- function(
input
) {
renderUI({
req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE)
obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
req(obj)
choices <- sort(unique(scDiffCom:::get_cci_table_filtered(obj)[["Emitter Cell Type"]]))
pickerInput(
inputId = "TSA_EMITTER_CHOICE",
label = "Emitter Cell Type",
choices = choices,
selected = choices,
options = list(`actions-box` = TRUE),
multiple = TRUE
)
})
}
choose_TSA_receiver <- function(
input
) {
renderUI({
req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE)
obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
req(obj)
choices <- sort(unique(scDiffCom:::get_cci_table_filtered(obj)[["Receiver Cell Type"]]))
pickerInput(
inputId = "TSA_RECEIVER_CHOICE",
label = "Receiver Cell Type",
choices = choices,
selected = choices,
options = list(`actions-box` = TRUE),
multiple = TRUE
)
})
}
get_TSA_slider_log2fc <- function(
input
) {
renderUI({
req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE)
obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
req(obj)
max_val <- ceiling(max(scDiffCom:::get_cci_table_filtered(obj)[["LOG2FC"]]))
sliderInput(
inputId = "TSA_SLIDER_LOG2FC",
label = "LOG2FC Threshold",
min = 0,
max = max_val,
value = 0,
step = 0.01
)
})
}
choose_TSA_ORA_category <- function(
input
) {
renderUI({
req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE)
obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
req(obj)
choices <- names(scDiffCom:::get_ora_tables(obj))
pickerInput(
inputId = "TSA_ORA_CATEGORY_CHOICE",
label = "Category",
choices = choices,
options = list(`actions-box` = TRUE),
multiple = FALSE
)
})
}
get_TSA_ORA_slider_or <- function(
input
) {
renderUI({
req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE, input$TSA_ORA_CATEGORY_CHOICE, input$TSA_ORA_TYPE_CHOICE)
obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
req(obj)
ora_table <- scDiffCom:::get_ora_tables(obj)[[input$TSA_ORA_CATEGORY_CHOICE]]
req(ora_table)
if(input$TSA_ORA_TYPE_CHOICE == "Up") {
max_val <- ceiling(max(ora_table[["Odds Ratio Up"]]))
} else if(input$TSA_ORA_TYPE_CHOICE == "Down") {
max_val <- ceiling(max(ora_table[["Odds Ratio Down"]]))
} else if(input$TSA_ORA_TYPE_CHOICE == "Stable") {
max_val <- ceiling(max(ora_table[["Odds Ratio Stable"]]))
}
sliderInput(
inputId = "TSA_ORA_SLIDER_OR",
label = "Odds Ratio Threshold",
min = 1,
max = max_val,
value = 1,
step = 0.01
)
})
}
get_TSA_interaction_table <- function(
input
) {
DT::renderDataTable({
req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE, input$TSA_EMITTER_CHOICE, input$TSA_RECEIVER_CHOICE,
input$TSA_SLIDER_PVALUE, input$TSA_SLIDER_LOG2FC)
obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
req(obj)
dt <- scDiffCom:::get_cci_table_filtered(obj)
req(dt)
dt <- dt[
`Emitter Cell Type` %in% input$TSA_EMITTER_CHOICE &
`Receiver Cell Type` %in% input$TSA_RECEIVER_CHOICE &
`Adj. P-Value` <= input$TSA_SLIDER_PVALUE &
abs(LOG2FC) >= input$TSA_SLIDER_LOG2FC
]
setorder(
dt,
-LOG2FC,
`Adj. P-Value`
)
show_DT(
dt,
cols_to_show_DATA,
cols_numeric_DATA
)
})
}
plot_TSA_VOLCANO <- function(
input
) {
renderPlot({
req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE, input$TSA_EMITTER_CHOICE, input$TSA_RECEIVER_CHOICE,
input$TSA_SLIDER_PVALUE, input$TSA_SLIDER_LOG2FC)
obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
req(obj)
dt <- scDiffCom:::get_cci_table_filtered(obj)
req(dt)
dt <- dt[
`Emitter Cell Type` %in% input$TSA_EMITTER_CHOICE &
`Receiver Cell Type` %in% input$TSA_RECEIVER_CHOICE &
`Adj. P-Value` <= input$TSA_SLIDER_PVALUE &
abs(LOG2FC) >= input$TSA_SLIDER_LOG2FC
]
show_volcano(dt)
})
}
get_TSA_ORA_table <- function(
input
) {
DT::renderDataTable({
req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE, input$TSA_ORA_CATEGORY_CHOICE, input$TSA_ORA_TYPE_CHOICE)
obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
req(obj)
dt <- scDiffCom:::get_ora_tables(obj)[[input$TSA_ORA_CATEGORY_CHOICE]]
req(dt)
if(input$TSA_ORA_TYPE_CHOICE == "Up") {
dt <- dt[`Odds Ratio Up` >= 1, c("Value", "Odds Ratio Up", "Adj. P-Value Up")]
dt <- dt[`Adj. P-Value Up` <= input$TSA_ORA_SLIDER_PVALUE &
`Odds Ratio Up` >= input$TSA_ORA_SLIDER_OR]
setorder(dt, `Adj. P-Value Up`)
cols_numeric <- c("Odds Ratio Up", "Adj. P-Value Up")
} else if(input$TSA_ORA_TYPE_CHOICE == "Down") {
dt <- dt[`Odds Ratio Down` >= 1, c("Value", "Odds Ratio Down", "Adj. P-Value Down")]
dt <- dt[`Adj. P-Value Down` <= input$TSA_ORA_SLIDER_PVALUE &
`Odds Ratio Down` >= input$TSA_ORA_SLIDER_OR]
setorder(dt, `Adj. P-Value Down`)
cols_numeric <- c("Odds Ratio Down", "Adj. P-Value Down")
} else if(input$TSA_ORA_TYPE_CHOICE == "Stable") {
dt <- dt[`Odds Ratio Stable` >= 1, c("Value", "Odds Ratio Stable", "Adj. P-Value Stable")]
dt <- dt[`Adj. P-Value Stable` <= input$TSA_ORA_SLIDER_PVALUE &
`Odds Ratio Stable` >= input$TSA_ORA_SLIDER_OR]
setorder(dt, `Adj. P-Value Stable`)
cols_numeric <- c("Odds Ratio Stable", "Adj. P-Value Stable")
}
show_DT(
dt,
cols_to_show = colnames(dt),
cols_numeric = cols_numeric
)
})
}
plot_TSA_ORA <- function(
input
) {
renderPlot({
req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE, input$TSA_ORA_CATEGORY_CHOICE, input$TSA_ORA_TYPE_CHOICE)
obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
req(obj)
if (input$TSA_ORA_TYPE_CHOICE == "Up") {
OR_val <- "Odds Ratio Up"
pval_val <- "Adj. P-Value Up"
ORA_score_val <- "ORA_score_UP"
} else if (input$TSA_ORA_TYPE_CHOICE == "Down") {
OR_val <- "Odds Ratio Down"
pval_val <- "Adj. P-Value Down"
ORA_score_val <- "ORA_score_DOWN"
} else if ( input$TSA_ORA_TYPE_CHOICE == "Stable") {
OR_val <- "Odds Ratio Stable"
pval_val <- "Adj. P-Value Stable"
ORA_score_val <- "ORA_score_FLAT"
}
scDiffCom:::plot_ORA(
object = obj,
category = input$TSA_ORA_CATEGORY_CHOICE,
OR_val,
pval_val,
ORA_score_val,
max_value = 20,
OR_cutoff = input$TSA_ORA_SLIDER_OR,
pval_cutoff = input$TSA_ORA_SLIDER_PVALUE
)
})
}
# No call to analyzeGraph, called directly from tab_tissue_specific.R
# build_TSA_INTERACTIVE_BIPARTITE_NET <- function(
# input
# ) {
# renderPlot({
# stop('Not implemented')
# req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE, input$TSA_ORA_CATEGORY_CHOICE, input$TSA_ORA_TYPE_CHOICE)
# obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
# req(obj)
# # TO ADD
# # scDiffCom::build_interactive_network
# })
# }
#
# build_TSA_INTERACTIVE_CELLCOMMUNITY_NET <- function(
# input
# ) {
# renderPlot({
# stop('Not implemented')
# req(input$TSA_DATASET_CHOICE, input$TSA_TISSUE_CHOICE, input$TSA_ORA_CATEGORY_CHOICE, input$TSA_ORA_TYPE_CHOICE)
# obj <- DATASETS_light[[input$TSA_DATASET_CHOICE]][[input$TSA_TISSUE_CHOICE]]
# req(obj)
# # TO ADD
# # scDiffCom::build_interactive_network
# })
# } |
# R语言 基础语句
# 条件判断语句(R的决策)
x <- 30L
if(is.interger(x)){
print("X is an integer")
}
x <- c("what","is","truth")
if("Truth" %in% x) {
print("Truth is found the first time")
} else if ("truth" %in% x) {
print("truth is found the second time")
} else {
print("No truth found")
}
# switch语句
x <- switch(
3,
"first",
"second",
"third",
"fourth"
)
print(x)
# repeat循环
v <- c("Hello", "loop")
cnt <- 2
repeat {
print(v)
cnt <- cnt+1
if(cnt > 5){
break
}
}
# while循环
v <- c("Hello","while loop")
cnt <- 2
while (cnt < 7) {
print(v)
cnt = cnt + 1
}
# for循环
v <- LETTERS[1:4] #得到A,B,C,D
for ( i in v) {
print(i)
}
# next语句
v <- LETTERS[1:6]
for ( i in v) {
if (i == "D") {
next # 同continue
}
print(i)
}
# R函数
# 内置函数示例
print(seq(1,32)) #创建一个从1到32的数列
print(mean(25:83)) #得到[25,26,...,82]这个数列的平均值
print(sum(41:68)) #得到序列的和值
# 用户定义的函数
function <- function(arg1, arg2=6){
for(i in 1:arg1){
b <- i^2
if(i == arg2){
next
}
print(b)
}
}
function(6)
# 连接字符串操作
a <- "Hello"
b <- 'How'
c <- "are you? "
print(paste(a,b,c))
print(paste(a,b,c, seq = '-'))
print(paste(a,b,c, seq = '', collapse = ''))
# 字符串截取
result <- substring("Extract", 5, 7)
print(result)
| /R-Program/RcommonState.R | permissive | Johnwei386/Warehouse | R | false | false | 1,380 | r | # R语言 基础语句
# 条件判断语句(R的决策)
x <- 30L
if(is.interger(x)){
print("X is an integer")
}
x <- c("what","is","truth")
if("Truth" %in% x) {
print("Truth is found the first time")
} else if ("truth" %in% x) {
print("truth is found the second time")
} else {
print("No truth found")
}
# switch语句
x <- switch(
3,
"first",
"second",
"third",
"fourth"
)
print(x)
# repeat循环
v <- c("Hello", "loop")
cnt <- 2
repeat {
print(v)
cnt <- cnt+1
if(cnt > 5){
break
}
}
# while循环
v <- c("Hello","while loop")
cnt <- 2
while (cnt < 7) {
print(v)
cnt = cnt + 1
}
# for循环
v <- LETTERS[1:4] #得到A,B,C,D
for ( i in v) {
print(i)
}
# next语句
v <- LETTERS[1:6]
for ( i in v) {
if (i == "D") {
next # 同continue
}
print(i)
}
# R函数
# 内置函数示例
print(seq(1,32)) #创建一个从1到32的数列
print(mean(25:83)) #得到[25,26,...,82]这个数列的平均值
print(sum(41:68)) #得到序列的和值
# 用户定义的函数
function <- function(arg1, arg2=6){
for(i in 1:arg1){
b <- i^2
if(i == arg2){
next
}
print(b)
}
}
function(6)
# 连接字符串操作
a <- "Hello"
b <- 'How'
c <- "are you? "
print(paste(a,b,c))
print(paste(a,b,c, seq = '-'))
print(paste(a,b,c, seq = '', collapse = ''))
# 字符串截取
result <- substring("Extract", 5, 7)
print(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get.R
\name{get_session_info}
\alias{get_session_info}
\title{Returns Session Info}
\usage{
get_session_info()
}
\value{
Formatted Session Info
}
\description{
Returns Session Info
}
\examples{
get_session_info()
}
| /man/get_session_info.Rd | permissive | bms63/timber | R | false | true | 294 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get.R
\name{get_session_info}
\alias{get_session_info}
\title{Returns Session Info}
\usage{
get_session_info()
}
\value{
Formatted Session Info
}
\description{
Returns Session Info
}
\examples{
get_session_info()
}
|
##################################
######### Age Length Key #########
##################################
# Ogle (2016); pages 87-105
# load packages
library(FSA)
library(magrittr)
library(dplyr)
library(nnet)
library(nlstools)
library(readxl)
cdfw <- read_excel("S:/CNR/Labs-Quist/Blackburn/Projects/WST/California/R code/Age Analysis/cdfw_alk.xlsx")
headtail(cdfw)
str(cdfw)
table(cdfw$agecap) # N = 1003
# apply 5 cm wide length interval
cdfw %<>% mutate(lcat5 = lencat(lencap, w = 5))
headtail(cdfw) # make sure lcat5 appears as new column
table(cdfw$lcat5) # only 1 fish in the 200 lcat5 category
# make sure data.frame includes unaged fish(3 false and 3 true)
is.na(headtail(cdfw)$age)
# extract unaged fish
cdfw.unaged <- filter(cdfw, is.na(agecap))
table(cdfw.unaged$lencap)
table(cdfw.unaged$lcat5) # look at # of unaged fish in each lenght bin
cdfw.aged <-filter(cdfw, !is.na(agecap))
summary(cdfw.aged$agecap)
mean(cdfw.aged$agecap)
# check to see if extraction worked
all(is.na(cdfw.unaged$agecap)) # better be TRUE
headtail(cdfw.unaged)
table(cdfw.unaged$lcat5)
any(is.na(cdfw.aged$agecap)) # better be FALSE
headtail(cdfw.aged) # n = 372
table(cdfw.aged$lcat5)
# construct data.frame with the frequency of fish in each length catergory and age combo
( alk.freq <- xtabs(~lcat5 + agecap, data = cdfw.aged) )
write.csv(alk.freq,"C:/Users/Quuist Grad/Desktop/cdfw_alkfreq.csv" )
# computes the sum of rows in the frequency table
rowSums(alk.freq) # large amount of small fish in sample
# row proportions
( alk <- prop.table(alk.freq, margin = 1) )
round(alk, 3) # rounded for display purposes only
# write.csv(alk,"C:/Users/Quuist Grad/Desktop/POOPY.csv" )
## Visualize ALK ##
# Area plot represenation of the observed age-length key for WST in the SSJ
alkPlot(alk, type = "area", pal = "gray", showLegend = TRUE, leg.cex = 0.7, xlab = "Fork length (cm)")
alkplot.sm <-alkPlot(alk, type = "bubble", xlab = "Fork length (cm)")
## Applying an ALK ##
## unadjusted data ##
# Age distribution
( len.n <- xtabs(~lcat5, data = cdfw) )
( tmp <- sweep(alk, MARGIN = 1, FUN = "*", STATS = len.n) )
# number of fish allocated to each age
( ad1 <- colSums(tmp) )
# Proportion of fish at each age
round(prop.table(ad1), 3)
age_dist <- alkAgeDist(alk, lenA.n = rowSums(alk.freq), len.n = len.n)
age_dist
write.csv(age_dist,"C:/Users/Quuist Grad/Desktop/FART.csv")
( age_len2 <- alkMeanVar(alk, lencap~lcat5 + agecap, data = cdfw.aged, len.n = len.n) )
write.csv(age_len2,"C:/Users/Quuist Grad/Desktop/agelen5_cdfw.csv")
cdfw.unaged.mod <- alkIndivAge(alk, agecap~lencap, data = cdfw.unaged) #apply ALK
headtail(cdfw.unaged.mod)
cdfw.fnl <-rbind(cdfw.aged, cdfw.unaged.mod)
headtail(cdfw.fnl)
write.csv(cdfw.fnl,"C:/Users/Quuist Grad/Desktop/cdfwfnl5_cdfw.csv")
cdfw.sumlen <- cdfw.fnl %>% group_by(agecap) %>%
summarize(n = validn(lencap), mn = mean(lencap, na.rm = TRUE),
sd =sd(lencap, na.rm = TRUE), se = se (lencap, na.rm = TRUE)) %>%
as.data.frame()
write.csv(cdfw.sumlen,"C:/Users/Quuist Grad/Desktop/sumlen5_cdfw.csv")
plot(lencap~agecap, data = cdfw.fnl, pch = 19, col = rgb(0,0,0,1/10),
xlab = "Age", ylab = "Fork Length (cm)", ylim = c(0,300))
lines(mn~agecap, data = cdfw.sumlen, lwd = 2, lty = 2)
cdfw.fnl
write.csv(cdfw.fnl,"C:/Users/Quuist Grad/Desktop/cdfw_fnl.csv")
describe(cdfw.fnl$agecap, type = 2)
describe(cdfw.aged$agecap, type = 2)
write.csv(cdfw.aged,"C:/Users/Quuist Grad/Desktop/cdfw_aged.csv")
SumWST <- cdfw.aged %>% group_by(agecap) %>%
summarize(n=validn(lencap), mnlen = mean(lencap, na.rm = T),
selen=se(lencap, na.rm = T)) %>%
as.data.frame()
write.csv(SumWST,"C:/Users/Quuist Grad/Desktop/SumWST.csv")
###################
## Adjusted data ##
###################
# import data
cdfw_adjusted <- read_excel("S:/CNR/Labs-Quist/Blackburn/Projects/WST/California/R code/Age Analysis/cdfw_adjusted.xlsx")
cdfw_adjusted
# Age distribution
( len.n <- xtabs(~lcat5, data = cdfw_adjusted) )
( tmp <- sweep(alk, MARGIN = 1, FUN = "*", STATS = len.n) )
# number of fish allocated to each age
( ad1 <- colSums(tmp) )
# Proportion of fish at each age
round(prop.table(ad1), 3)
age_dist <- alkAgeDist(alk, lenA.n = rowSums(alk.freq), len.n = len.n)
age_dist
write.csv(age_dist,"C:/Users/Quuist Grad/Desktop/adjusted.csv")
( age_len2 <- alkMeanVar(alk, lencap~lcat5 + agecap, data = cdfw.aged, len.n = len.n) )
write.csv(age_len2,"C:/Users/Quuist Grad/Desktop/agelen5_cdfw_adjusted.csv")
cdfw.unaged.mod <- alkIndivAge(alk, agecap~lencap, data = cdfw.unaged) #apply ALK
headtail(cdfw.unaged.mod)
cdfw.fnl <-rbind(cdfw.aged, cdfw.unaged.mod)
headtail(cdfw.fnl)
write.csv(cdfw.fnl,"C:/Users/Quuist Grad/Desktop/cdfwfnl5_cdfw_adjusted.csv")
cdfw.sumlen <- cdfw.fnl %>% group_by(agecap) %>%
summarize(n = validn(lencap), mn = mean(lencap, na.rm = TRUE),
sd =sd(lencap, na.rm = TRUE), se = se (lencap, na.rm = TRUE)) %>%
as.data.frame()
write.csv(cdfw.sumlen,"C:/Users/Quuist Grad/Desktop/sumlen5_cdfw_adjusted.csv")
plot(lencap~agecap, data = cdfw.fnl, pch = 19, col = rgb(0,0,0,1/10),
xlab = "Age", ylab = "Fork Length (cm)", ylim = c(0,300))
lines(mn~agecap, data = cdfw.sumlen, lwd = 2, lty = 2)
cdfw.fnl
| /blackburn_code/LengthAge/ALK_cdfw.R | no_license | szjhobbs/sturgeon_pop_model | R | false | false | 5,251 | r | ##################################
######### Age Length Key #########
##################################
# Ogle (2016); pages 87-105
# load packages
library(FSA)
library(magrittr)
library(dplyr)
library(nnet)
library(nlstools)
library(readxl)
cdfw <- read_excel("S:/CNR/Labs-Quist/Blackburn/Projects/WST/California/R code/Age Analysis/cdfw_alk.xlsx")
headtail(cdfw)
str(cdfw)
table(cdfw$agecap) # N = 1003
# apply 5 cm wide length interval
cdfw %<>% mutate(lcat5 = lencat(lencap, w = 5))
headtail(cdfw) # make sure lcat5 appears as new column
table(cdfw$lcat5) # only 1 fish in the 200 lcat5 category
# make sure data.frame includes unaged fish(3 false and 3 true)
is.na(headtail(cdfw)$age)
# extract unaged fish
cdfw.unaged <- filter(cdfw, is.na(agecap))
table(cdfw.unaged$lencap)
table(cdfw.unaged$lcat5) # look at # of unaged fish in each lenght bin
cdfw.aged <-filter(cdfw, !is.na(agecap))
summary(cdfw.aged$agecap)
mean(cdfw.aged$agecap)
# check to see if extraction worked
all(is.na(cdfw.unaged$agecap)) # better be TRUE
headtail(cdfw.unaged)
table(cdfw.unaged$lcat5)
any(is.na(cdfw.aged$agecap)) # better be FALSE
headtail(cdfw.aged) # n = 372
table(cdfw.aged$lcat5)
# construct data.frame with the frequency of fish in each length catergory and age combo
( alk.freq <- xtabs(~lcat5 + agecap, data = cdfw.aged) )
write.csv(alk.freq,"C:/Users/Quuist Grad/Desktop/cdfw_alkfreq.csv" )
# computes the sum of rows in the frequency table
rowSums(alk.freq) # large amount of small fish in sample
# row proportions
( alk <- prop.table(alk.freq, margin = 1) )
round(alk, 3) # rounded for display purposes only
# write.csv(alk,"C:/Users/Quuist Grad/Desktop/POOPY.csv" )
## Visualize ALK ##
# Area plot represenation of the observed age-length key for WST in the SSJ
alkPlot(alk, type = "area", pal = "gray", showLegend = TRUE, leg.cex = 0.7, xlab = "Fork length (cm)")
alkplot.sm <-alkPlot(alk, type = "bubble", xlab = "Fork length (cm)")
## Applying an ALK ##
## unadjusted data ##
# Age distribution
( len.n <- xtabs(~lcat5, data = cdfw) )
( tmp <- sweep(alk, MARGIN = 1, FUN = "*", STATS = len.n) )
# number of fish allocated to each age
( ad1 <- colSums(tmp) )
# Proportion of fish at each age
round(prop.table(ad1), 3)
age_dist <- alkAgeDist(alk, lenA.n = rowSums(alk.freq), len.n = len.n)
age_dist
write.csv(age_dist,"C:/Users/Quuist Grad/Desktop/FART.csv")
( age_len2 <- alkMeanVar(alk, lencap~lcat5 + agecap, data = cdfw.aged, len.n = len.n) )
write.csv(age_len2,"C:/Users/Quuist Grad/Desktop/agelen5_cdfw.csv")
cdfw.unaged.mod <- alkIndivAge(alk, agecap~lencap, data = cdfw.unaged) #apply ALK
headtail(cdfw.unaged.mod)
cdfw.fnl <-rbind(cdfw.aged, cdfw.unaged.mod)
headtail(cdfw.fnl)
write.csv(cdfw.fnl,"C:/Users/Quuist Grad/Desktop/cdfwfnl5_cdfw.csv")
cdfw.sumlen <- cdfw.fnl %>% group_by(agecap) %>%
summarize(n = validn(lencap), mn = mean(lencap, na.rm = TRUE),
sd =sd(lencap, na.rm = TRUE), se = se (lencap, na.rm = TRUE)) %>%
as.data.frame()
write.csv(cdfw.sumlen,"C:/Users/Quuist Grad/Desktop/sumlen5_cdfw.csv")
plot(lencap~agecap, data = cdfw.fnl, pch = 19, col = rgb(0,0,0,1/10),
xlab = "Age", ylab = "Fork Length (cm)", ylim = c(0,300))
lines(mn~agecap, data = cdfw.sumlen, lwd = 2, lty = 2)
cdfw.fnl
write.csv(cdfw.fnl,"C:/Users/Quuist Grad/Desktop/cdfw_fnl.csv")
describe(cdfw.fnl$agecap, type = 2)
describe(cdfw.aged$agecap, type = 2)
write.csv(cdfw.aged,"C:/Users/Quuist Grad/Desktop/cdfw_aged.csv")
SumWST <- cdfw.aged %>% group_by(agecap) %>%
summarize(n=validn(lencap), mnlen = mean(lencap, na.rm = T),
selen=se(lencap, na.rm = T)) %>%
as.data.frame()
write.csv(SumWST,"C:/Users/Quuist Grad/Desktop/SumWST.csv")
###################
## Adjusted data ##
###################
# import data
cdfw_adjusted <- read_excel("S:/CNR/Labs-Quist/Blackburn/Projects/WST/California/R code/Age Analysis/cdfw_adjusted.xlsx")
cdfw_adjusted
# Age distribution
( len.n <- xtabs(~lcat5, data = cdfw_adjusted) )
( tmp <- sweep(alk, MARGIN = 1, FUN = "*", STATS = len.n) )
# number of fish allocated to each age
( ad1 <- colSums(tmp) )
# Proportion of fish at each age
round(prop.table(ad1), 3)
age_dist <- alkAgeDist(alk, lenA.n = rowSums(alk.freq), len.n = len.n)
age_dist
write.csv(age_dist,"C:/Users/Quuist Grad/Desktop/adjusted.csv")
( age_len2 <- alkMeanVar(alk, lencap~lcat5 + agecap, data = cdfw.aged, len.n = len.n) )
write.csv(age_len2,"C:/Users/Quuist Grad/Desktop/agelen5_cdfw_adjusted.csv")
cdfw.unaged.mod <- alkIndivAge(alk, agecap~lencap, data = cdfw.unaged) #apply ALK
headtail(cdfw.unaged.mod)
cdfw.fnl <-rbind(cdfw.aged, cdfw.unaged.mod)
headtail(cdfw.fnl)
write.csv(cdfw.fnl,"C:/Users/Quuist Grad/Desktop/cdfwfnl5_cdfw_adjusted.csv")
cdfw.sumlen <- cdfw.fnl %>% group_by(agecap) %>%
summarize(n = validn(lencap), mn = mean(lencap, na.rm = TRUE),
sd =sd(lencap, na.rm = TRUE), se = se (lencap, na.rm = TRUE)) %>%
as.data.frame()
write.csv(cdfw.sumlen,"C:/Users/Quuist Grad/Desktop/sumlen5_cdfw_adjusted.csv")
plot(lencap~agecap, data = cdfw.fnl, pch = 19, col = rgb(0,0,0,1/10),
xlab = "Age", ylab = "Fork Length (cm)", ylim = c(0,300))
lines(mn~agecap, data = cdfw.sumlen, lwd = 2, lty = 2)
cdfw.fnl
|
stopifnot(require("testthat"),
require("glmmTMB"),
require("lme4"))
data(sleepstudy, cbpp,
package = "lme4")
context("variance structures")
## two equivalent diagonal constructions
fm1 <- glmmTMB(Reaction ~ Days + diag(Days| Subject), sleepstudy)
fm2 <- glmmTMB(Reaction ~ Days + ( 1 | Subject) + (0+Days | Subject),
sleepstudy)
fm2L <- lmer(Reaction ~ Days + ( 1 | Subject) + (0+Days | Subject),
sleepstudy, REML=FALSE)
fm3 <- glmmTMB(Reaction ~ Days + (Days| Subject), sleepstudy)
fm4 <- glmmTMB(Reaction ~ Days + cs(Days| Subject), sleepstudy)
fm3L <- lmer(Reaction ~ Days + ( Days | Subject),
sleepstudy, REML=FALSE)
test_that("diag", {
## two formulations of diag and lme4 all give same log-lik
expect_equal(logLik(fm1),logLik(fm2L))
expect_equal(logLik(fm1),logLik(fm2))
})
test_that("cs_us", {
## for a two-level factor, compound symmetry and unstructured
## give same result
expect_equal(logLik(fm3),logLik(fm4))
expect_equal(logLik(fm3),logLik(fm3L))
})
| /glmmTMB/tests/testthat/test-varstruc.R | no_license | ogaoue/glmmTMB | R | false | false | 1,052 | r | stopifnot(require("testthat"),
require("glmmTMB"),
require("lme4"))
data(sleepstudy, cbpp,
package = "lme4")
context("variance structures")
## two equivalent diagonal constructions
fm1 <- glmmTMB(Reaction ~ Days + diag(Days| Subject), sleepstudy)
fm2 <- glmmTMB(Reaction ~ Days + ( 1 | Subject) + (0+Days | Subject),
sleepstudy)
fm2L <- lmer(Reaction ~ Days + ( 1 | Subject) + (0+Days | Subject),
sleepstudy, REML=FALSE)
fm3 <- glmmTMB(Reaction ~ Days + (Days| Subject), sleepstudy)
fm4 <- glmmTMB(Reaction ~ Days + cs(Days| Subject), sleepstudy)
fm3L <- lmer(Reaction ~ Days + ( Days | Subject),
sleepstudy, REML=FALSE)
test_that("diag", {
## two formulations of diag and lme4 all give same log-lik
expect_equal(logLik(fm1),logLik(fm2L))
expect_equal(logLik(fm1),logLik(fm2))
})
test_that("cs_us", {
## for a two-level factor, compound symmetry and unstructured
## give same result
expect_equal(logLik(fm3),logLik(fm4))
expect_equal(logLik(fm3),logLik(fm3L))
})
|
# Labels in different order to confound as.Splits
treeSym8 <- ape::read.tree(text='((e, (f, (g, h))), (((a, b), c), d));')
treeBal8 <- ape::read.tree(text='(((e, f), (g, h)), ((a, b), (c, d)));')
treeOpp8 <- ape::read.tree(text='(((a, f), (c, h)), ((g, b), (e, d)));')
treesSBO8 <- structure(list(treeSym8, treeBal8, treeOpp8),
class = 'multiPhylo')
treesSSBB8 <- structure(list(treeSym8, treeSym8, treeBal8, treeBal8),
class = 'multiPhylo')
treeCat8 <- ape::read.tree(text='((((h, g), f), e), (d, (c, (b, a))));')
treeTac8 <- ape::read.tree(text='((((e, c), g), a), (h, (b, (d, f))));')
treeStar8 <- ape::read.tree(text='(e, c, g, h, b, a, d, f);')
treeAb.Cdefgh <- ape::read.tree(text='((a, b), (c, d, e, f, g, h));')
treeAbc.Defgh <- ape::read.tree(text='((a, b, c), (d, e, f, g, h));')
treeAcd.Befgh <- ape::read.tree(text='((a, c, d), (b, e, f, g, h));')
treeAbcd.Efgh <- ape::read.tree(text='((a, b, c, d), (e, f, g, h));')
treeTwoSplits <- ape::read.tree(text="(((a, b), c, d), (e, f, g, h));")
testTrees <- c(treesSBO8, treeCat8, treeTac8, treeStar8, treeAb.Cdefgh,
treeAbc.Defgh, treeAbcd.Efgh, treeAcd.Befgh, treeTwoSplits)
test_that("Split compatibility is correctly established", {
expect_true(SplitsCompatible(as.logical(c(0,0,1,1,0)),
as.logical(c(0,0,1,1,0))))
expect_true(SplitsCompatible( as.logical(c(0,0,1,1,0)),
!as.logical(c(0,0,1,1,0))))
expect_true(SplitsCompatible(as.logical(c(0,0,1,1,0)),
as.logical(c(1,0,1,1,0))))
expect_true(SplitsCompatible(!as.logical(c(0,0,1,1,0)),
as.logical(c(1,0,1,1,0))))
expect_false(SplitsCompatible(as.logical(c(0,0,1,1,0)),
as.logical(c(1,1,0,1,0))))
})
methodsToTest <- list(
SharedPhylogeneticInfo,
DifferentPhylogeneticInfo,
MatchingSplitInfo,
MatchingSplitInfoDistance,
MutualClusteringInfo,
ClusteringInfoDistance,
NyeSimilarity,
JaccardRobinsonFoulds,
MatchingSplitDistance,
RobinsonFoulds,
InfoRobinsonFoulds,
KendallColijn # List last: requires rooted trees.
)
NormalizationTest <- function (FUNC, ...) {
expect_equal(c(1L, 1L),
FUNC(treesSSBB8, normalize = TRUE, ...)[c(1, 6)],
tolerance = 1e-7)
}
test_that('Bad labels cause error', {
treeBadLabel8 <- ape::read.tree(text='((a, b, c, D), (e, f, g, h));')
lapply(methodsToTest, function(Func)
expect_error(Func(treeSym8, treeBadLabel8)))
})
test_that('Size mismatch causes error', {
treeSym7 <- ape::read.tree(text='((e, (f, g)), (((a, b), c), d));')
splits7 <- as.Splits(treeSym7)
splits8 <- as.Splits(treeSym8)
lapply(methodsToTest, function(Func)
expect_error(Func(treeSym8, treeSym7)))
lapply(methodsToTest, function(Func)
expect_error(Func(treeSym7, treeSym8)))
expect_error(MeilaVariationOfInformation(splits7, splits8))
Test <- function (Func) {
expect_error(Func(splits8, as.Splits(BalancedTree(9)), 8))
}
Test(cpp_robinson_foulds_distance)
Test(cpp_robinson_foulds_info)
Test(cpp_matching_split_distance)
Test(cpp_jaccard_similarity)
Test(cpp_msi_distance)
Test(cpp_mutual_clustering)
Test(cpp_shared_phylo)
})
test_that('Metrics handle polytomies', {
polytomy8 <- ape::read.tree(text='(a, b, c, d, e, f, g, h);')
lapply(list(SharedPhylogeneticInfo, MutualClusteringInfo,
MatchingSplitDistance, NyeSimilarity),
function (Func) expect_equal(0, Func(treeSym8, polytomy8)))
})
#Func <- ClusteringInfoDistance # FUNC =
test_that('Output dimensions are correct', {
list1 <- list(sym = treeSym8, bal = treeBal8)
list2 <- list(sym = treeSym8, abc = treeAbc.Defgh, abcd = treeAbcd.Efgh)
dimNames <- list(c('sym', 'bal'), c('sym', 'abc', 'abcd'))
Test <- function (Func) {
allPhylo <- matrix(
c(Func(treeSym8, treeSym8), Func(treeBal8, treeSym8),
Func(treeSym8, treeAbc.Defgh), Func(treeBal8, treeAbc.Defgh),
Func(treeSym8, treeAbcd.Efgh), Func(treeBal8, treeAbcd.Efgh)),
2L, 3L, dimnames = dimNames)
phylo1 <- matrix(c(Func(treeSym8, list2), Func(treeBal8, list2)),
byrow = TRUE, 2L, 3L, dimnames = dimNames)
phylo2 <- matrix(c(Func(list1, treeSym8), Func(list1, treeAbc.Defgh),
Func(list1, treeAbcd.Efgh)), 2L, 3L, dimnames = dimNames)
noPhylo <- Func(list1, list2)
expect_equal(allPhylo, phylo1)
expect_equal(allPhylo, phylo2)
expect_equal(allPhylo, noPhylo)
}
lapply(methodsToTest, Test)
})
test_that('Robinson Foulds Distance is correctly calculated', {
RFTest <- function (t1, t2) {
expect_equal(suppressMessages(phangorn::RF.dist(t1, t2)),
RobinsonFoulds(t1, t2))
expected <- RobinsonFoulds(t1, t2, reportMatching = TRUE, similarity = TRUE)
attr(expected, 'pairScores') <- attr(expected, 'pairScores') == 0L
expect_equal(expected, RobinsonFouldsMatching(t1, t2))
}
RFTest(treeSym8, treeSym8)
RFTest(treeSym8, treeStar8)
RFTest(treeStar8, treeStar8)
RFTest(treeAb.Cdefgh, treeAbc.Defgh)
RFTest(treeAb.Cdefgh, treeAbcd.Efgh)
# at 2020-10, RF uses Day algorithm if tree2 = null; old algo if tree2 = tree1.
expect_equal(RobinsonFoulds(testTrees, testTrees),
as.matrix(RobinsonFoulds(testTrees)),
ignore_attr = TRUE)
# Invariant to tree description order
sq_pectinate <- ape::read.tree(text='((((((1, 2), 3), 4), 5), 6), (7, (8, (9, (10, 11)))));')
shuffle1 <- ape::read.tree(text='(((((1, 5), 2), 6), (3, 4)), ((8, (7, 9)), (10, 11)));')
shuffle2 <- ape::read.tree(text='(((8, (7, 9)), (10, 11)), ((((1, 5), 2), 6), (3, 4)));')
RFTest(shuffle1, sq_pectinate)
RFTest(sq_pectinate, shuffle1)
RFTest(shuffle1, shuffle2)
RFTest(shuffle1, sq_pectinate)
RFTest(shuffle2, sq_pectinate)
})
test_that('Shared Phylogenetic Info is correctly calculated', {
expect_equal(5.529821, tolerance = 1e-7,
cpp_shared_phylo(
as.Splits(as.logical(c(1, 1, 1, 1, 0, 0, 0, 0))),
as.Splits(as.logical(c(1, 1, 1, 1, 0, 0, 0, 0))),
8L)$score)
expect_equal(0.2895066, tolerance = 1e-7,
cpp_shared_phylo(
as.Splits(as.logical(c(1, 1, 0, 0, 0, 0, 0, 0))),
as.Splits(as.logical(c(0, 0, 1, 1, 0, 0, 0, 0))),
8L)$score)
expect_equal(1.137504, tolerance = 1e-6,
cpp_shared_phylo(
as.Splits(as.logical(c(1, 1, 0, 0, 0, 0, 0, 0))),
as.Splits(as.logical(c(1, 1, 1, 1, 0, 0, 0, 0))),
8L)$score)
expect_equal(3.45943, tolerance = 1e-6,
cpp_shared_phylo(
as.Splits(as.logical(c(1, 1, 0, 0, 0, 0, 0, 0))),
as.Splits(as.logical(c(1, 1, 0, 0, 0, 0, 0, 0))),
8L)$score)
expect_equal(22.53747, tolerance = 1e-05,
SharedPhylogeneticInfo(treeSym8, treeSym8, normalize = FALSE))
expect_equal(1, tolerance = 1e-05,
SharedPhylogeneticInfo(treeSym8, treeSym8, normalize = TRUE))
expect_equal(0,
SharedPhylogeneticInfo(treeSym8, treeStar8, normalize = TRUE))
expect_equal(0,
SharedPhylogeneticInfo(treeStar8, treeStar8, normalize = FALSE))
expect_equal(NaN, # Division by zero
SharedPhylogeneticInfo(treeStar8, treeStar8, normalize = TRUE))
expect_equal(13.75284, SharedPhylogeneticInfo(treeSym8, treeBal8), tolerance=1e-05)
expect_equal(DifferentPhylogeneticInfo(treeSym8, treeAcd.Befgh),
DifferentPhylogeneticInfo(treeAcd.Befgh, treeSym8), tolerance=1e-05)
expect_equal(0, DifferentPhylogeneticInfo(treeSym8, treeSym8, normalize = TRUE))
infoSymBal <- SplitwiseInfo(treeSym8) + SplitwiseInfo(treeBal8)
expect_equal(infoSymBal - 13.75284 - 13.75284, tolerance = 1e-05,
DifferentPhylogeneticInfo(treeSym8, treeBal8, normalize = TRUE) * infoSymBal)
expect_equal(22.53747 + SharedPhylogeneticInfo(treeAcd.Befgh, treeAcd.Befgh) -
(2 * SharedPhylogeneticInfo(treeSym8, treeAcd.Befgh)),
DifferentPhylogeneticInfo(treeSym8, treeAcd.Befgh),
tolerance=1e-06)
expect_equal(-log2(945/10395),
SharedPhylogeneticInfo(treeSym8, treeAb.Cdefgh),
tolerance = 1e-06)
expect_equal(22.53747 + SharedPhylogeneticInfo(treeBal8, treeBal8) - 13.75284 - 13.75284,
DifferentPhylogeneticInfo(treeSym8, treeBal8), tolerance=1e-05)
expect_equal(-log2(945/10395),
SharedPhylogeneticInfo(treeSym8, treeAb.Cdefgh),
tolerance = 1e-06)
expect_equal(-log2(315/10395),
SharedPhylogeneticInfo(treeSym8, treeAbc.Defgh),
tolerance = 1e-06)
expect_equal(0, DifferentPhylogeneticInfo(treeSym8, treeSym8))
expect_equal(SplitwiseInfo(treeSym8) - SplitwiseInfo(treeAcd.Befgh),
DifferentPhylogeneticInfo(treeSym8, treeAbc.Defgh),
tolerance = 1e-06)
# Test symmetry of small vs large splits
expect_equal(SharedPhylogeneticInfo(treeSym8, treeAbc.Defgh),
SharedPhylogeneticInfo(treeAbc.Defgh, treeSym8))
expect_equal(-log2(225/10395), SharedPhylogeneticInfo(treeSym8, treeAbcd.Efgh))
expect_equal(-log2(225/10395) - log2(945/10395),
SharedPhylogeneticInfo(treeSym8, treeTwoSplits),
tolerance = 1e-7)
expect_equal(SplitSharedInformation(8, 4, 3),
SharedPhylogeneticInfo(treeTwoSplits, treeAbc.Defgh),
tolerance = 1e-7)
expect_equal(SplitInformation(4, 4) + SplitInformation (3, 5) -
(2 * SplitSharedInformation(8, 4, 3)),
SplitDifferentInformation(8, 4, 3),
tolerance=1e-07)
expect_equal(SharedPhylogeneticInfo(treeSym8, list(treeSym8, treeBal8)),
SharedPhylogeneticInfo(list(treeSym8, treeBal8), treeSym8),
tolerance = 1e-7)
# Test tree too large to cache
set.seed(101)
t1 <- ape::rtree(101)
t2 <- ape::rtree(101, rooted = FALSE)
expect_equal(SharedPhylogeneticInfo(t1, t2),
SharedPhylogeneticInfo(t2, t1))
})
test_that('MatchingSplitInfo() is correctly calculated', {
BinaryToSplit <- function (binary) matrix(as.logical(binary))
expect_equal(log2(3),
MatchingSplitInfoSplits(
as.Splits(c(rep(TRUE, 2), rep(FALSE, 6))),
as.Splits(c(FALSE, FALSE, rep(TRUE, 2), rep(FALSE, 4)))),
tolerance = 1e-7)
expect_equal(log2(3),
MatchingSplitInfoSplits(
as.Splits(c(rep(FALSE, 6), rep(TRUE, 2))),
as.Splits(c(FALSE, FALSE, rep(TRUE, 2), rep(FALSE, 4)))),
tolerance = 1e-7)
expect_equal(log2(3), cpp_msi_distance(
as.Splits(c(rep(TRUE, 2), rep(FALSE, 6))),
as.Splits(c(FALSE, FALSE, rep(TRUE, 2), rep(FALSE, 4))),
8L)$score, tolerance = 1e-7)
expect_equal(log2(3), cpp_msi_distance(
as.Splits(rep(c(FALSE, TRUE), each = 4L)),
as.Splits(rep(c(FALSE, TRUE), 4L)),
8L)$score, tolerance = 1e-7)
expect_equal(SharedPhylogeneticInfo(treeSym8, treeSym8),
MatchingSplitInfo(treeSym8, treeSym8), tolerance = 1e-05)
expect_equal(0, MatchingSplitInfo(treeSym8, treeStar8))
expect_equal(0, MatchingSplitInfo(treeStar8, treeStar8))
expect_equal(MatchingSplitInfo(treeAb.Cdefgh, treeAbc.Defgh),
MatchingSplitInfo(treeAbc.Defgh, treeAb.Cdefgh))
expect_equal(MatchingSplitInfo(treeAbcd.Efgh, treeAb.Cdefgh),
MatchingSplitInfo(treeAb.Cdefgh, treeAbcd.Efgh))
expect_equal(-(TreeTools::Log2TreesMatchingSplit(2, 5) - Log2Unrooted.int(7)),
MatchingSplitInfo(treeAb.Cdefgh, treeAbc.Defgh),
tolerance = 1e-7)
expect_true(MatchingSplitInfo(treeSym8, treeBal8) >
MatchingSplitInfo(treeSym8, treeOpp8))
expect_equal(0, MatchingSplitInfoDistance(treeSym8, treeSym8))
NormalizationTest(MatchingSplitInfo)
})
test_that("Shared Phylogenetic Information is correctly estimated", {
exp <- ExpectedVariation(treeSym8, treeAbc.Defgh, samples = 1000L)
tol <- exp[, 'Std. Err.'] * 2
# Expected values calculated with 100k samples
expect_equal(1.175422, exp['SharedPhylogeneticInfo', 'Estimate'],
tolerance = tol[1])
expect_equal(3.099776, exp['MatchingSplitInfo', 'Estimate'],
tolerance = tol[2])
expect_equal(25.231023, exp['DifferentPhylogeneticInfo', 'Estimate'],
tolerance = tol[3])
expect_equal(21.382314, exp['MatchingSplitInfoDistance', 'Estimate'],
tolerance = tol[4])
expect_equal(exp[, 'sd'], exp[, 'Std. Err.'] * sqrt(exp[, 'n']))
})
test_that('Clustering information is correctly calculated', {
expect_equal(Entropy(c(3, 5) / 8) * 2 - Entropy(c(0, 0, 3, 5) / 8),
cpp_mutual_clustering(
as.Splits(as.logical(c(1, 1, 1, 0, 0, 0, 0, 0))),
as.Splits(as.logical(c(1, 1, 1, 0, 0, 0, 0, 0))),
8L)$score,
tolerance = 1e-7)
expect_equal(Entropy(c(2, 6) / 8) * 2 - Entropy(c(0, 2, 2, 4) / 8),
cpp_mutual_clustering(
as.Splits(as.logical(c(1, 1, 0, 0, 0, 0, 0, 0))),
as.Splits(as.logical(c(0, 0, 1, 1, 0, 0, 0, 0))),
8L)$score, tolerance = 1e-7)
expect_equal(Entropy(c(5, 4) / 9) + Entropy(c(3, 6) / 9) -
Entropy(c(3, 2, 0, 4) / 9),
cpp_mutual_clustering(
as.Splits(as.logical(c(1, 1, 1, 1, 1, 0, 0, 0, 0))),
as.Splits(as.logical(c(0, 0, 1, 1, 1, 0, 0, 0, 0))),
9L)$score,
tolerance = 1e-7)
expect_equal(Entropy(c(4, 4) / 8) * 2 - Entropy(c(2, 2, 2, 2) / 8),
cpp_mutual_clustering(
as.Splits(as.logical(c(1, 1, 1, 1, 0, 0, 0, 0))),
as.Splits(as.logical(c(1, 0, 1, 0, 1, 0, 1, 0))),
8L)$score,
tolerance = 1e-7)
expect_equal(Entropy(c(4, 4) / 8) * 2 - Entropy(c(0, 0, 4, 4) / 8),
cpp_mutual_clustering(
as.Splits(as.logical(c(1, 1, 1, 1, 0, 0, 0, 0))),
as.Splits(as.logical(c(1, 1, 1, 1, 0, 0, 0, 0))),
8L)$score,
tolerance = 1e-7)
expect_equal(ClusteringEntropy(treeSym8),
MutualClusteringInfo(treeSym8, treeSym8),
tolerance = 1e-05)
expect_equal(8 * ClusteringEntropy(treeSym8), ClusteringInfo(treeSym8))
expect_equal(0, MutualClusteringInfo(treeSym8, treeStar8))
expect_equal(0, MutualClusteringInfo(treeStar8, treeStar8))
expect_equal(TreeDistance(treeSym8, treeBal8),
ClusteringInfoDistance(treeSym8, treeBal8, normalize = TRUE))
expect_equal(1, MutualClusteringInfo(treeSym8, treeSym8, normalize = TRUE),
tolerance = 1e-7)
expect_true(MutualClusteringInfo(treeSym8, treeBal8, normalize = pmin) >
MutualClusteringInfo(treeSym8, treeBal8, normalize = pmax))
expect_equal(ClusteringEntropy(treeSym8) + ClusteringEntropy(treeBal8) -
(2 * MutualClusteringInfo(treeBal8, treeSym8)),
ClusteringInfoDistance(treeSym8, treeBal8), tolerance = 1e-05)
expect_equal(MutualClusteringInfo(treeAb.Cdefgh, treeAbc.Defgh),
MutualClusteringInfo(treeAbc.Defgh, treeAb.Cdefgh),
tolerance = 1e-05)
# Different resolution
randomBif20 <- structure(list(
edge = structure(c(21L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L, 30L,
31L, 32L, 32L, 31L, 30L, 29L, 33L, 34L, 34L, 33L, 28L,
35L, 36L, 36L, 35L, 27L, 26L, 37L, 37L, 25L, 38L, 38L,
39L, 39L, 24L, 23L, 22L, 1L, 22L, 23L, 24L, 25L, 26L,
27L, 28L, 29L, 30L, 31L, 32L, 2L, 14L, 7L, 10L, 33L, 34L,
4L, 6L, 8L, 35L, 36L, 13L, 16L, 18L, 17L, 37L, 5L, 15L,
38L, 11L, 39L, 12L, 19L, 9L, 3L, 20L),
.Dim = c(38L, 2L)), Nnode = 19L,
tip.label = c("t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9", "t10",
"t11", "t12", "t13", "t14", "t15", "t16", "t17", "t18", "t19",
"t20"), br = NULL), class = "phylo")
threeAwayPoly <- structure(
list(edge = structure(c(21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L, 29L,
28L, 27L, 26L, 30L, 30L, 30L, 26L, 31L, 31L, 25L,
32L, 33L, 33L, 32L, 25L, 25L, 24L, 34L, 34L, 34L,
23L, 22L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L,
29L, 2L, 8L, 14L, 10L, 30L, 13L, 16L, 18L, 31L, 4L,
6L, 32L, 33L, 15L, 20L, 5L, 7L, 17L, 34L, 11L, 12L,
19L, 9L, 3L, 1L), .Dim = c(33L, 2L)),
tip.label = c("t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9",
"t10", "t11", "t12", "t13", "t14", "t15", "t16", "t17",
"t18", "t19", "t20"),
Nnode = 14L), class = "phylo")
expect_equal(
MutualClusteringInfo(threeAwayPoly, randomBif20),
MutualClusteringInfo(randomBif20, threeAwayPoly))
match <- MutualClusteringInfo(randomBif20, threeAwayPoly, reportMatching = TRUE)
expect_equal(c(NA, NA, 1, 2, NA, 3, 7, 11, 10, 4, 6, 9, 8, NA, 5, 12, NA),
attr(match, 'matching'))
# Multiple bins, calculated expectation
library('TreeTools', quietly = TRUE, warn.conflicts = FALSE)
b65m <- lapply(c(1, 2, 70), AddTip, tree = BalancedTree(64))
self <- ClusteringEntropy(b65m)
diff <- ClusteringEntropy(b65m[[1]], sum = FALSE)["72"]
# Copied from C:
ic_element <- function (nkK, nk, nK, n) {
if (nkK && nk && nK) {
if (nkK == nk && nkK == nK && nkK + nkK == n) return (nkK);
numerator = nkK * n
denominator = nk * nK
if (numerator == denominator) return (0);
nkK * (log2(numerator) - log2(denominator));
} else 0;
}
expect_equal(diff,
(ic_element(63, 63, 63, 65) +
ic_element(00, 63, 02, 65) +
ic_element(00, 02, 63, 65) +
ic_element(02, 02, 02, 65)) / 65,
ignore_attr = TRUE)
new <- (ic_element(65-3, 63, 63, 65) +
ic_element(1, 63, 02, 65) +
ic_element(1, 02, 63, 65) +
ic_element(1, 02, 02, 65)) / 65
other <- self[1] - diff[1] + new # Calc'd = 20.45412
expect_equal(other, MutualClusteringInfo(b65m[[1]], b65m[[2]]),
ignore_attr = TRUE)
expectation <- matrix(other, 3, 3)
diag(expectation) <- self
expect_equal(expectation, MutualClusteringInfo(b65m), ignore_attr = TRUE)
expect_equal(ClusteringEntropy(BalancedTree(64)),
MutualClusteringInfo(BalancedTree(64), BalancedTree(64)))
expect_equal(ClusteringEntropy(BalancedTree(644)),
MutualClusteringInfo(BalancedTree(644), BalancedTree(644)))
expect_gt(ClusteringEntropy(BalancedTree(64)),
MutualClusteringInfo(BalancedTree(64), PectinateTree(64)))
expect_gt(ClusteringEntropy(BalancedTree(644)),
MutualClusteringInfo(BalancedTree(644), PectinateTree(644)))
NormalizationTest(MutualClusteringInfo)
})
test_that("Matchings are correct", {
# Different resolution: used to cause memory leak
randomBif20 <- structure(list(
edge = structure(c(21L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L, 30L,
31L, 32L, 32L, 31L, 30L, 29L, 33L, 34L, 34L, 33L, 28L,
35L, 36L, 36L, 35L, 27L, 26L, 37L, 37L, 25L, 38L, 38L,
39L, 39L, 24L, 23L, 22L, 1L, 22L, 23L, 24L, 25L, 26L,
27L, 28L, 29L, 30L, 31L, 32L, 2L, 14L, 7L, 10L, 33L, 34L,
4L, 6L, 8L, 35L, 36L, 13L, 16L, 18L, 17L, 37L, 5L, 15L,
38L, 11L, 39L, 12L, 19L, 9L, 3L, 20L),
.Dim = c(38L, 2L)), Nnode = 19L,
tip.label = c("t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9", "t10",
"t11", "t12", "t13", "t14", "t15", "t16", "t17", "t18", "t19",
"t20"), br = NULL), class = "phylo")
threeAwayPoly <- structure(
list(edge = structure(c(21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L, 29L,
28L, 27L, 26L, 30L, 30L, 30L, 26L, 31L, 31L, 25L,
32L, 33L, 33L, 32L, 25L, 25L, 24L, 34L, 34L, 34L,
23L, 22L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L,
29L, 2L, 8L, 14L, 10L, 30L, 13L, 16L, 18L, 31L, 4L,
6L, 32L, 33L, 15L, 20L, 5L, 7L, 17L, 34L, 11L, 12L,
19L, 9L, 3L, 1L), .Dim = c(33L, 2L)),
tip.label = c("t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9",
"t10", "t11", "t12", "t13", "t14", "t15", "t16", "t17",
"t18", "t19", "t20"),
Nnode = 14L), class = "phylo")
expect_equal(
MutualClusteringInfo(threeAwayPoly, randomBif20),
MutualClusteringInfo(randomBif20, threeAwayPoly))
t1 <- PectinateTree(letters[1:11])
t2 <- ape::read.tree(text = '(a, (c, (b, (d, e, ((g, h, f), (k, (j, i)))))));')
t3 <- CollapseNode(PectinateTree(c(letters[11], letters[1:10])), 16:19)
s1 <- as.Splits(t1)
s2 <- as.Splits(t2, t1)
s3 <- as.Splits(t3, t1)
n1 <- dim(s1)[1]
n2 <- dim(s2)[1]
n3 <- dim(s3)[1]
n <- NTip(s1)
# Plot
# par(mfrow = 2:1, cex = 0.9, mar = rep(0,4))
# JRF2T <- function(...) JaccardRobinsonFoulds(..., k = 2)
# JRF2F <- function(...) JaccardRobinsonFoulds(..., k = 2, allowConflict = FALSE)
# VisualizeMatching(MatchingSplitDistance, t1, t2, setPar=F)
# LabelSplits(t2, setNames(1:6, names(s2)), adj = 2)
# VisualizeMatching(MatchingSplitDistance, t2, t1, setPar=F)
# LabelSplits(t1, setNames(1:8, names(s1)), adj = 2)
Test <- function (CppFn, x12, x21, ...) {
r12 <- CppFn(s1, s2, n, ...)
r21 <- CppFn(s2, s1, n, ...)
r13 <- CppFn(s1, s3, n, ...)
r31 <- CppFn(s3, s1, n, ...)
expect_equal(r12$score, r21$score)
expect_equal(r13$score, r31$score)
m12 <- r12$matching
m21 <- r21$matching
expect_equal(n1, length(m12))
expect_equal(length(m12[!is.na(m12)]), length(unique(m12[!is.na(m12)])))
expect_equal(n2, length(m21))
expect_equal(length(m21[!is.na(m21)]), length(unique(m21[!is.na(m21)])))
expect_lte(dim(s1)[1] - dim(s2)[1], sum(is.na(m12)))
m13 <- r13$matching
m31 <- r31$matching
expect_equal(n1, length(m13))
expect_equal(length(m13[!is.na(m13)]), length(unique(m13[!is.na(m13)])))
expect_equal(n3, length(m31))
expect_equal(length(m31[!is.na(m31)]), length(unique(m31[!is.na(m31)])))
expect_lte(dim(s1)[1] - dim(s3)[1], sum(is.na(m13)))
for (i in seq_along(m12)) expect_true(m12[i] %in% x12[[i]])
for (i in seq_along(m21)) expect_true(m21[i] %in% x21[[i]])
}
Test(TreeDist:::cpp_robinson_foulds_distance,
list(NA, 2, NA, 3, NA, NA, 5, NA),
list(NA, 2, 4, NA, 7, NA)
)
Test(TreeDist:::cpp_robinson_foulds_info,
list(NA, 2, NA, 3, NA, NA, 5, NA),
list(NA, 2, 4, NA, 7, NA)
)
Test(TreeDist:::cpp_matching_split_distance,
list(1, 2, 4, 3, NA, NA, 5, 6),
list(1, 2, 5, 4, 7, 6)
)
Test(TreeDist:::cpp_jaccard_similarity,
list(NA, 2, 1, 3, 4, 6, 5, NA),
list(3, 2, 4, 5, 7, 6),
k = 2,
allowConflict = TRUE)
Test(TreeDist:::cpp_jaccard_similarity,
list(NA, 2, 1, 3, NA, 6, 5, 4),
list(3, 2, 4, 1, 7, 6),
k = 2,
allowConflict = FALSE)
Test(TreeDist:::cpp_msi_distance,
list(NA, 2, 1, 4, 3, 6, 5, NA),
list(3, 2, c(4, 5), c(4, 5), c(6, 7), c(7, 6))
)
Test(TreeDist:::cpp_shared_phylo,
list(NA, 2, 4, 3, 1, 6, 5, NA),
list(5, 2, 4, 3, 7, 6)
)
Test(TreeDist:::cpp_mutual_clustering,
list(4, 2, NA, 3, 6, NA, 5, 1),
list(8, 2, 4, 5, 7, 1)
)
})
test_that('Matching Split Distance is correctly calculated', {
expect_equal(0L, MatchingSplitDistance(treeSym8, treeSym8))
expect_equal(0L, MatchingSplitDistance(treeStar8, treeSym8))
expect_equal(0L, MatchingSplitDistance(treeStar8, treeStar8))
match0 <- MatchingSplitDistance(treeStar8, treeStar8, reportMatching = TRUE)
expect_equal(rep(0L, 4), c(match0, vapply(attributes(match0), length, 0)),
ignore_attr = TRUE)
expect_equal(1L, MatchingSplitDistance(treeAb.Cdefgh, treeAbc.Defgh))
expect_equal(2L, MatchingSplitDistance(treeAb.Cdefgh, treeAbcd.Efgh))
splitAB <- as.Splits(c(rep(TRUE, 2), rep(FALSE, 7)))
splitABC <- as.Splits(c(rep(TRUE, 3), rep(FALSE, 6)))
splitAEF <- as.Splits(c(TRUE, rep(FALSE, 3), TRUE, TRUE, rep(FALSE, 3)))
splitABCD <- as.Splits(c(rep(TRUE, 4), rep(FALSE, 5)))
splitABCDE <- as.Splits(c(rep(TRUE, 5), rep(FALSE, 4)))
splitAI <- as.Splits(c(TRUE, rep(FALSE, 7), TRUE))
expect_equal(2L, MatchingSplitDistanceSplits(splitAB, splitAI))
expect_equal(2L, MatchingSplitDistanceSplits(splitAB, splitABCD))
expect_equal(3L, MatchingSplitDistanceSplits(splitAB, splitABCDE))
expect_equal(4L, MatchingSplitDistanceSplits(splitABC, splitAEF))
expect_equal(MatchingSplitDistanceSplits(splitABC, splitAEF),
MatchingSplitDistanceSplits(splitAEF, splitABC))
# Invariant to tree description order
sq_pectinate <- ape::read.tree(text='((((((1, 2), 3), 4), 5), 6), (7, (8, (9, (10, 11)))));')
shuffle1 <- ape::read.tree(text='(((((1, 5), 2), 6), (3, 4)), ((8, (7, 9)), (10, 11)));')
shuffle2 <- ape::read.tree(text='(((8, (7, 9)), (10, 11)), ((((1, 5), 2), 6), (3, 4)));')
expect_equal(MatchingSplitDistance(shuffle1, sq_pectinate),
MatchingSplitDistance(sq_pectinate, shuffle1))
expect_equal(0L, MatchingSplitDistance(shuffle1, shuffle2))
expect_equal(MatchingSplitDistance(shuffle1, sq_pectinate),
MatchingSplitDistance(shuffle2, sq_pectinate))
})
test_that('NyeSimilarity is correctly calculated, and matches JRF', {
listBalSym <- list(treeBal8, treeSym8)
JRF <- function (..., sim = TRUE)
JaccardRobinsonFoulds(..., k = 1, similarity = sim, allowConflict = TRUE)
expect_equal(5L, NyeSimilarity(as.Splits(treeSym8), treeSym8))
expect_equal(1, NyeSimilarity(treeSym8, treeSym8, normalize = TRUE))
expect_equal(1, JRF(treeSym8, treeSym8, normalize = TRUE))
expect_equal(0, NyeSimilarity(treeSym8, treeStar8, normalize = FALSE))
expect_equal(0, NyeSimilarity(treeSym8, treeStar8, normalize = TRUE))
expect_equal(0, JRF(treeSym8, treeStar8, normalize = TRUE))
expect_equal(0, NyeSimilarity(treeStar8, treeStar8, normalize = FALSE))
expect_equal(NaN, NyeSimilarity(treeStar8, treeStar8, normalize = TRUE,
normalizeMax = FALSE))
expect_equal(c(3.8, 5), NyeSimilarity(treeSym8, listBalSym))
expect_equal(2 / 3, NyeSimilarity(treeAb.Cdefgh, treeAbc.Defgh),
tolerance = 1e-7)
expect_equal(2 * (1 / 3), tolerance = 1e-7,
NyeSimilarity(treeAb.Cdefgh, treeAbc.Defgh, similarity = FALSE))
expect_equal(1L, NyeSimilarity(treeSym8, treeAbcd.Efgh, normalize = FALSE))
expect_equal(1L / 5L, NyeSimilarity(treeSym8, treeAbcd.Efgh, normalize = 5L))
expect_equal(0.2, JRF(treeSym8, treeAbcd.Efgh, normalize = 5L * 2L))
expect_equal(1/3, NyeSimilarity(treeSym8, treeAbcd.Efgh, normalize = TRUE))
expect_equal(1/3, JRF(treeSym8, treeAbcd.Efgh, normalize = TRUE))
expect_equal(2/3, NyeSimilarity(treeSym8, treeAbcd.Efgh, similarity = FALSE,
normalize = TRUE))
expect_equal(2/3, JRF(treeSym8, treeAbcd.Efgh, sim = FALSE, normalize = TRUE))
expect_equal(1L / ((5L + 1L) / 2L),
NyeSimilarity(treeSym8, treeAbcd.Efgh, normalize = TRUE))
expect_true(NyeSimilarity(treeSym8, treeBal8) >
NyeSimilarity(treeSym8, treeOpp8))
NormalizationTest(NyeSimilarity)
})
test_that('Jaccard RF extremes tend to equivalent functions', {
expect_equal(JaccardRobinsonFoulds(treeSym8, list(treeBal8, treeSym8),
similarity = TRUE, k = 1L,
allowConflict = TRUE),
NyeSimilarity(treeSym8, list(treeBal8, treeSym8)) * 2L)
expect_equal(JaccardRobinsonFoulds(treeSym8, list(treeBal8, treeSym8),
similarity = FALSE, k = Inf),
RobinsonFoulds(treeSym8, list(treeBal8, treeSym8)))
expect_equal(JaccardRobinsonFoulds(treeSym8, list(treeBal8, treeSym8),
similarity = FALSE, k = 999999),
RobinsonFoulds(treeSym8, list(treeBal8, treeSym8)))
})
test_that('Jaccard RF is correctly calculated', {
expect_equal(5L * 2L, JaccardRobinsonFoulds(treeSym8, treeSym8,
k = 2, similarity = TRUE))
expect_equal(c(3.32, 5) * 2L,
JaccardRobinsonFoulds(treeSym8, list(treeBal8, treeSym8),
similarity = TRUE, k = 2))
expect_equal(2 * 2, 3 * JaccardRobinsonFoulds(treeAb.Cdefgh, treeAbc.Defgh,
similarity = TRUE),
tolerance = 1e-7)
expect_equal(1, JaccardRobinsonFoulds(treeSym8, treeSym8,
similarity = TRUE, normalize = TRUE))
expect_equal(0, JaccardRobinsonFoulds(treeSym8, treeSym8,
similarity = FALSE, normalize = TRUE))
expect_equal(1L * 2L,
JaccardRobinsonFoulds(treeSym8, treeAbcd.Efgh, similarity = TRUE,
normalize = FALSE, k = 2))
expect_equal(1L * 2L / 6L,
JaccardRobinsonFoulds(treeSym8, treeAbcd.Efgh, similarity = TRUE,
normalize = TRUE, k = 2))
expect_lt(JaccardRobinsonFoulds(treeSym8, treeBal8, k = 2),
JaccardRobinsonFoulds(treeSym8, treeOpp8, k = 2))
expect_lt(JaccardRobinsonFoulds(treeSym8, treeBal8, k = 3L),
JaccardRobinsonFoulds(treeSym8, treeBal8, k = 4L))
expect_lt(JaccardRobinsonFoulds(treeCat8, treeTac8, allowConflict = TRUE),
JaccardRobinsonFoulds(treeCat8, treeTac8, allowConflict = FALSE))
expect_equal(0, JaccardRobinsonFoulds(BalancedTree(64), BalancedTree(64)))
expect_lt(0, JaccardRobinsonFoulds(BalancedTree(64), PectinateTree(64)))
expect_equal(0, JaccardRobinsonFoulds(BalancedTree(264), BalancedTree(264)))
expect_lt(0, JaccardRobinsonFoulds(BalancedTree(264), PectinateTree(264)))
})
test_that('RobinsonFoulds() is correctly calculated', {
RF <- function (tree1, tree2) {
suppressMessages(phangorn::RF.dist(tree1, tree2))
}
RFTest <- function (tree1, tree2) {
expect_equal(RF(tree1, tree2), RobinsonFoulds(tree1, tree2))
}
RFTest(treeSym8, treeSym8)
RFTest(treeBal8, treeSym8)
expect_equal(c(4, 0), RobinsonFoulds(treeSym8, list(treeBal8, treeSym8)))
RFTest(treeAb.Cdefgh, treeAbc.Defgh)
expect_equal(0, RobinsonFoulds(treeSym8, treeSym8, normalize = TRUE))
expect_equal(4L / 6L,
RobinsonFoulds(treeSym8, treeAbcd.Efgh, normalize = TRUE))
RFTest(treeSym8, treeOpp8)
RFNtipTest <- function (nTip) {
backLeaves <- paste0('t', rev(seq_len(nTip)))
RFTest(TreeTools::PectinateTree(backLeaves),
TreeTools::BalancedTree(nTip))
}
RFNtipTest(10)
RFNtipTest(32)
RFNtipTest(50)
RFNtipTest(64)
RFNtipTest(67)
RFNtipTest(128)
RFNtipTest(1024)
RFNtipTest(1027)
NormalizationTest(RobinsonFoulds, similarity = TRUE)
#TODO we may wish to revise this test once we implement diag = TRUE to
#allow similarities to be calculated on the diagonal.
expect_equal(numeric(0), RobinsonFoulds(treeSym8, normalize = TRUE))
})
test_that('Robinson Foulds Info is correctly calculated', {
expect_equal(22.53747 * 2L, tolerance = 1e-05,
InfoRobinsonFoulds(treeSym8, treeSym8, similarity = TRUE,
normalize = FALSE))
expect_equal(0, tolerance = 1e-05,
InfoRobinsonFoulds(treeSym8, treeSym8, normalize = TRUE))
expect_equal(1, tolerance = 1e-05,
InfoRobinsonFoulds(treeSym8, treeSym8, similarity = TRUE,
normalize = TRUE))
expect_equal(24.9, tolerance = 0.01,
InfoRobinsonFoulds(treeSym8, treeBal8, similarity = TRUE))
expect_equal(SplitwiseInfo(treeSym8) + SplitwiseInfo(treeBal8) -
InfoRobinsonFoulds(treeSym8, treeBal8, similarity = FALSE),
InfoRobinsonFoulds(treeSym8, treeBal8, similarity = TRUE))
expect_equal(-log2(945/10395) * 2,
InfoRobinsonFoulds(treeSym8, treeAb.Cdefgh, similarity = TRUE))
expect_equal(-log2(945/10395) * 2,
InfoRobinsonFoulds(treeSym8, treeAb.Cdefgh, similarity = TRUE))
expect_equal(-log2(315/10395) * 2,
InfoRobinsonFoulds(treeSym8, treeAbc.Defgh, similarity = TRUE))
# Test symmetry of small vs large splits
expect_equal(InfoRobinsonFoulds(treeSym8, treeAbc.Defgh),
InfoRobinsonFoulds(treeAbc.Defgh, treeSym8))
expect_equal(-log2(225/10395) * 2,
InfoRobinsonFoulds(treeSym8, treeAbcd.Efgh, similarity = TRUE))
expect_equal((-log2(225/10395) - log2(945/10395)) * 2,
InfoRobinsonFoulds(treeSym8, treeTwoSplits, similarity = TRUE))
expect_equal(InfoRobinsonFoulds(treeSym8, list(treeSym8, treeBal8)),
RobinsonFouldsInfo(list(treeSym8, treeBal8), treeSym8))
# Check that large trees work
expect_equal(0, InfoRobinsonFoulds(BalancedTree(64), BalancedTree(64)))
expect_lt(0, InfoRobinsonFoulds(BalancedTree(64), PectinateTree(64)))
expect_equal(0, InfoRobinsonFoulds(BalancedTree(129), BalancedTree(129)))
expect_lt(0, InfoRobinsonFoulds(BalancedTree(129), PectinateTree(129)))
})
test_that('Kendall-Colijn distance is correctly calculated', {
# Expected values calculated using treespace::treeDist(treeSym8, treeBal8)
expect_equal(2.828427, KendallColijn(treeSym8, treeBal8), tolerance=1e-06)
expect_equal(2.828427, KendallColijn(treeCat8, treeBal8), tolerance=1e-06)
expect_equal(7.211103, KendallColijn(treeSym8, treeOpp8), tolerance=1e-06)
expect_equal(matrix(c(0L, 8L), nrow=2, ncol=2, byrow=TRUE),
KendallColijn(list(treeSym8, treeCat8), list(treeCat8, treeTac8)), tolerance=1e-06)
expect_equal(8L, KendallColijn(treeCat8, treeTac8), tolerance=1e-06)
expect_equal(0L, KendallColijn(treeSym8, treeCat8), tolerance=1e-06)
expect_equal(8L, KendallColijn(treeSym8, treeTac8), tolerance=1e-06)
expect_equal(8L, KendallColijn(treeCat8, treeTac8), tolerance=1e-06)
expect_equal(5.291503, KendallColijn(treeSym8, treeAb.Cdefgh), tolerance=1e-06)
expect_equal(4.358899, KendallColijn(treeSym8, treeAbc.Defgh), tolerance=1e-06)
expect_equal(5L, KendallColijn(treeSym8, treeAcd.Befgh), tolerance=1e-06)
expect_equal(3.464102, KendallColijn(treeSym8, treeAbcd.Efgh), tolerance=1e-06)
expect_equal(3L, KendallColijn(treeSym8, treeTwoSplits), tolerance=1e-06)
expect_equal(2.828427, KendallColijn(treeAbc.Defgh, treeTwoSplits), tolerance=1e-06)
})
test_that('Multiple comparisons are correctly ordered', {
nTrees <- 6L
nTip <- 16L
set.seed(0)
trees <- lapply(rep(nTip, nTrees), ape::rtree, br=NULL)
trees[[1]] <- TreeTools::BalancedTree(nTip)
trees[[nTrees - 1L]] <- TreeTools::PectinateTree(nTip)
class(trees) <- 'multiPhylo'
expect_equal(phangorn::RF.dist(trees), RobinsonFoulds(trees),
ignore_attr = TRUE)
# Test CompareAll
expect_equal(as.matrix(phangorn::RF.dist(trees)),
as.matrix(CompareAll(trees, phangorn::RF.dist, 0L)),
ignore_attr = TRUE)
NNILoose <- function (x, y) NNIDist(x, y)['loose_upper']
expect_equal(CompareAll(trees, NNILoose),
CompareAll(trees, NNIDist)$loose_upper,
ignore_attr = TRUE)
})
test_that('Normalization occurs as documented', {
library('TreeTools')
tree1 <- BalancedTree(8)
tree2 <- CollapseNode(PectinateTree(8), 12:13)
info1 <- SplitwiseInfo(tree1) # 19.367
info2 <- SplitwiseInfo(tree2) # 11.963
ent1 <- ClusteringEntropy(tree1) # 4.245
ent2 <- ClusteringEntropy(tree2) # 2.577
# Phylogenetic information
spi <- SharedPhylogeneticInfo(tree1, tree2, normalize = FALSE) # 9.64
dpi <- DifferentPhylogeneticInfo(tree1, tree2, normalize = FALSE) # 12.04
expect_equal(spi + spi + dpi, info1 + info2)
expect_equal(SharedPhylogeneticInfo(tree1, tree2, normalize = TRUE),
(spi + spi) / (info1 + info2))
expect_equal(PhylogeneticInfoDistance(tree1, tree2, normalize = TRUE),
dpi / (info1 + info2))
# Matching split information
mmsi <- MatchingSplitInfo(tree1, tree2, normalize = FALSE)
msid <- MatchingSplitInfoDistance(tree1, tree2, normalize = FALSE)
expect_equal(mmsi + mmsi + msid, info1 + info2)
expect_equal(MatchingSplitInfo(tree1, tree2, normalize = TRUE),
(mmsi + mmsi) / (info1 + info2))
expect_equal(MatchingSplitInfoDistance(tree1, tree2, normalize = TRUE),
msid / (info1 + info2))
# Clustering information
mci <- MutualClusteringInfo(tree1, tree2, normalize = FALSE)
cid <- ClusteringInfoDistance(tree1, tree2, normalize = FALSE)
expect_equal(mci + mci + cid, ent1 + ent2)
expect_equal(MutualClusteringInfo(tree1, tree2, normalize = TRUE),
(mci + mci) / (ent1 + ent2))
expect_equal(ClusteringInfoDistance(tree1, tree2, normalize = TRUE),
cid / (ent1 + ent2))
})
test_that("Independent of root position", {
library('TreeTools')
bal8 <- BalancedTree(8)
pec8 <- PectinateTree(8)
trees <- lapply(list(bal8, RootTree(bal8, 't4'),
pec8, RootTree(pec8, 't4')), UnrootTree)
lapply(methodsToTest[-length(methodsToTest)], function (Method) {
dists <- as.matrix(Method(trees))
expect_equal(dists[1, 1], dists[1, 2])
expect_equal(dists[1, 3], dists[1, 4])
expect_equal(dists[1, 3], dists[2, 4])
expect_equal(dists[2, 3], dists[2, 4])
expect_equal(dists[3, 3], dists[3, 4])
})
Test <- function(Method, score = 0L, ...) {
expect_equal(score, Method(trees[[1]], trees[[1]], ...))
expect_equal(score, Method(trees[[1]], trees[[2]], ...))
expect_equal(score, Method(trees[[3]], trees[[3]], ...))
}
Test(MASTSize, 8L, rooted = FALSE)
# Tested further for NNIDist in test-tree_distance_nni.R
Test(NNIDist, c(lower = 0, best_lower = 0, tight_upper = 0, best_upper = 0,
loose_upper = 0, fack_upper = 0, li_upper = 0))
Test(SPRDist, c(spr = 0))
})
| /tests/testthat/test-tree_distance.R | no_license | pyspider/TreeDist | R | false | false | 39,023 | r | # Labels in different order to confound as.Splits
treeSym8 <- ape::read.tree(text='((e, (f, (g, h))), (((a, b), c), d));')
treeBal8 <- ape::read.tree(text='(((e, f), (g, h)), ((a, b), (c, d)));')
treeOpp8 <- ape::read.tree(text='(((a, f), (c, h)), ((g, b), (e, d)));')
treesSBO8 <- structure(list(treeSym8, treeBal8, treeOpp8),
class = 'multiPhylo')
treesSSBB8 <- structure(list(treeSym8, treeSym8, treeBal8, treeBal8),
class = 'multiPhylo')
treeCat8 <- ape::read.tree(text='((((h, g), f), e), (d, (c, (b, a))));')
treeTac8 <- ape::read.tree(text='((((e, c), g), a), (h, (b, (d, f))));')
treeStar8 <- ape::read.tree(text='(e, c, g, h, b, a, d, f);')
treeAb.Cdefgh <- ape::read.tree(text='((a, b), (c, d, e, f, g, h));')
treeAbc.Defgh <- ape::read.tree(text='((a, b, c), (d, e, f, g, h));')
treeAcd.Befgh <- ape::read.tree(text='((a, c, d), (b, e, f, g, h));')
treeAbcd.Efgh <- ape::read.tree(text='((a, b, c, d), (e, f, g, h));')
treeTwoSplits <- ape::read.tree(text="(((a, b), c, d), (e, f, g, h));")
testTrees <- c(treesSBO8, treeCat8, treeTac8, treeStar8, treeAb.Cdefgh,
treeAbc.Defgh, treeAbcd.Efgh, treeAcd.Befgh, treeTwoSplits)
test_that("Split compatibility is correctly established", {
expect_true(SplitsCompatible(as.logical(c(0,0,1,1,0)),
as.logical(c(0,0,1,1,0))))
expect_true(SplitsCompatible( as.logical(c(0,0,1,1,0)),
!as.logical(c(0,0,1,1,0))))
expect_true(SplitsCompatible(as.logical(c(0,0,1,1,0)),
as.logical(c(1,0,1,1,0))))
expect_true(SplitsCompatible(!as.logical(c(0,0,1,1,0)),
as.logical(c(1,0,1,1,0))))
expect_false(SplitsCompatible(as.logical(c(0,0,1,1,0)),
as.logical(c(1,1,0,1,0))))
})
methodsToTest <- list(
SharedPhylogeneticInfo,
DifferentPhylogeneticInfo,
MatchingSplitInfo,
MatchingSplitInfoDistance,
MutualClusteringInfo,
ClusteringInfoDistance,
NyeSimilarity,
JaccardRobinsonFoulds,
MatchingSplitDistance,
RobinsonFoulds,
InfoRobinsonFoulds,
KendallColijn # List last: requires rooted trees.
)
NormalizationTest <- function (FUNC, ...) {
expect_equal(c(1L, 1L),
FUNC(treesSSBB8, normalize = TRUE, ...)[c(1, 6)],
tolerance = 1e-7)
}
test_that('Bad labels cause error', {
treeBadLabel8 <- ape::read.tree(text='((a, b, c, D), (e, f, g, h));')
lapply(methodsToTest, function(Func)
expect_error(Func(treeSym8, treeBadLabel8)))
})
test_that('Size mismatch causes error', {
treeSym7 <- ape::read.tree(text='((e, (f, g)), (((a, b), c), d));')
splits7 <- as.Splits(treeSym7)
splits8 <- as.Splits(treeSym8)
lapply(methodsToTest, function(Func)
expect_error(Func(treeSym8, treeSym7)))
lapply(methodsToTest, function(Func)
expect_error(Func(treeSym7, treeSym8)))
expect_error(MeilaVariationOfInformation(splits7, splits8))
Test <- function (Func) {
expect_error(Func(splits8, as.Splits(BalancedTree(9)), 8))
}
Test(cpp_robinson_foulds_distance)
Test(cpp_robinson_foulds_info)
Test(cpp_matching_split_distance)
Test(cpp_jaccard_similarity)
Test(cpp_msi_distance)
Test(cpp_mutual_clustering)
Test(cpp_shared_phylo)
})
test_that('Metrics handle polytomies', {
polytomy8 <- ape::read.tree(text='(a, b, c, d, e, f, g, h);')
lapply(list(SharedPhylogeneticInfo, MutualClusteringInfo,
MatchingSplitDistance, NyeSimilarity),
function (Func) expect_equal(0, Func(treeSym8, polytomy8)))
})
#Func <- ClusteringInfoDistance # FUNC =
test_that('Output dimensions are correct', {
list1 <- list(sym = treeSym8, bal = treeBal8)
list2 <- list(sym = treeSym8, abc = treeAbc.Defgh, abcd = treeAbcd.Efgh)
dimNames <- list(c('sym', 'bal'), c('sym', 'abc', 'abcd'))
Test <- function (Func) {
allPhylo <- matrix(
c(Func(treeSym8, treeSym8), Func(treeBal8, treeSym8),
Func(treeSym8, treeAbc.Defgh), Func(treeBal8, treeAbc.Defgh),
Func(treeSym8, treeAbcd.Efgh), Func(treeBal8, treeAbcd.Efgh)),
2L, 3L, dimnames = dimNames)
phylo1 <- matrix(c(Func(treeSym8, list2), Func(treeBal8, list2)),
byrow = TRUE, 2L, 3L, dimnames = dimNames)
phylo2 <- matrix(c(Func(list1, treeSym8), Func(list1, treeAbc.Defgh),
Func(list1, treeAbcd.Efgh)), 2L, 3L, dimnames = dimNames)
noPhylo <- Func(list1, list2)
expect_equal(allPhylo, phylo1)
expect_equal(allPhylo, phylo2)
expect_equal(allPhylo, noPhylo)
}
lapply(methodsToTest, Test)
})
test_that('Robinson Foulds Distance is correctly calculated', {
RFTest <- function (t1, t2) {
expect_equal(suppressMessages(phangorn::RF.dist(t1, t2)),
RobinsonFoulds(t1, t2))
expected <- RobinsonFoulds(t1, t2, reportMatching = TRUE, similarity = TRUE)
attr(expected, 'pairScores') <- attr(expected, 'pairScores') == 0L
expect_equal(expected, RobinsonFouldsMatching(t1, t2))
}
RFTest(treeSym8, treeSym8)
RFTest(treeSym8, treeStar8)
RFTest(treeStar8, treeStar8)
RFTest(treeAb.Cdefgh, treeAbc.Defgh)
RFTest(treeAb.Cdefgh, treeAbcd.Efgh)
# at 2020-10, RF uses Day algorithm if tree2 = null; old algo if tree2 = tree1.
expect_equal(RobinsonFoulds(testTrees, testTrees),
as.matrix(RobinsonFoulds(testTrees)),
ignore_attr = TRUE)
# Invariant to tree description order
sq_pectinate <- ape::read.tree(text='((((((1, 2), 3), 4), 5), 6), (7, (8, (9, (10, 11)))));')
shuffle1 <- ape::read.tree(text='(((((1, 5), 2), 6), (3, 4)), ((8, (7, 9)), (10, 11)));')
shuffle2 <- ape::read.tree(text='(((8, (7, 9)), (10, 11)), ((((1, 5), 2), 6), (3, 4)));')
RFTest(shuffle1, sq_pectinate)
RFTest(sq_pectinate, shuffle1)
RFTest(shuffle1, shuffle2)
RFTest(shuffle1, sq_pectinate)
RFTest(shuffle2, sq_pectinate)
})
test_that('Shared Phylogenetic Info is correctly calculated', {
expect_equal(5.529821, tolerance = 1e-7,
cpp_shared_phylo(
as.Splits(as.logical(c(1, 1, 1, 1, 0, 0, 0, 0))),
as.Splits(as.logical(c(1, 1, 1, 1, 0, 0, 0, 0))),
8L)$score)
expect_equal(0.2895066, tolerance = 1e-7,
cpp_shared_phylo(
as.Splits(as.logical(c(1, 1, 0, 0, 0, 0, 0, 0))),
as.Splits(as.logical(c(0, 0, 1, 1, 0, 0, 0, 0))),
8L)$score)
expect_equal(1.137504, tolerance = 1e-6,
cpp_shared_phylo(
as.Splits(as.logical(c(1, 1, 0, 0, 0, 0, 0, 0))),
as.Splits(as.logical(c(1, 1, 1, 1, 0, 0, 0, 0))),
8L)$score)
expect_equal(3.45943, tolerance = 1e-6,
cpp_shared_phylo(
as.Splits(as.logical(c(1, 1, 0, 0, 0, 0, 0, 0))),
as.Splits(as.logical(c(1, 1, 0, 0, 0, 0, 0, 0))),
8L)$score)
expect_equal(22.53747, tolerance = 1e-05,
SharedPhylogeneticInfo(treeSym8, treeSym8, normalize = FALSE))
expect_equal(1, tolerance = 1e-05,
SharedPhylogeneticInfo(treeSym8, treeSym8, normalize = TRUE))
expect_equal(0,
SharedPhylogeneticInfo(treeSym8, treeStar8, normalize = TRUE))
expect_equal(0,
SharedPhylogeneticInfo(treeStar8, treeStar8, normalize = FALSE))
expect_equal(NaN, # Division by zero
SharedPhylogeneticInfo(treeStar8, treeStar8, normalize = TRUE))
expect_equal(13.75284, SharedPhylogeneticInfo(treeSym8, treeBal8), tolerance=1e-05)
expect_equal(DifferentPhylogeneticInfo(treeSym8, treeAcd.Befgh),
DifferentPhylogeneticInfo(treeAcd.Befgh, treeSym8), tolerance=1e-05)
expect_equal(0, DifferentPhylogeneticInfo(treeSym8, treeSym8, normalize = TRUE))
infoSymBal <- SplitwiseInfo(treeSym8) + SplitwiseInfo(treeBal8)
expect_equal(infoSymBal - 13.75284 - 13.75284, tolerance = 1e-05,
DifferentPhylogeneticInfo(treeSym8, treeBal8, normalize = TRUE) * infoSymBal)
expect_equal(22.53747 + SharedPhylogeneticInfo(treeAcd.Befgh, treeAcd.Befgh) -
(2 * SharedPhylogeneticInfo(treeSym8, treeAcd.Befgh)),
DifferentPhylogeneticInfo(treeSym8, treeAcd.Befgh),
tolerance=1e-06)
expect_equal(-log2(945/10395),
SharedPhylogeneticInfo(treeSym8, treeAb.Cdefgh),
tolerance = 1e-06)
expect_equal(22.53747 + SharedPhylogeneticInfo(treeBal8, treeBal8) - 13.75284 - 13.75284,
DifferentPhylogeneticInfo(treeSym8, treeBal8), tolerance=1e-05)
expect_equal(-log2(945/10395),
SharedPhylogeneticInfo(treeSym8, treeAb.Cdefgh),
tolerance = 1e-06)
expect_equal(-log2(315/10395),
SharedPhylogeneticInfo(treeSym8, treeAbc.Defgh),
tolerance = 1e-06)
expect_equal(0, DifferentPhylogeneticInfo(treeSym8, treeSym8))
expect_equal(SplitwiseInfo(treeSym8) - SplitwiseInfo(treeAcd.Befgh),
DifferentPhylogeneticInfo(treeSym8, treeAbc.Defgh),
tolerance = 1e-06)
# Test symmetry of small vs large splits
expect_equal(SharedPhylogeneticInfo(treeSym8, treeAbc.Defgh),
SharedPhylogeneticInfo(treeAbc.Defgh, treeSym8))
expect_equal(-log2(225/10395), SharedPhylogeneticInfo(treeSym8, treeAbcd.Efgh))
expect_equal(-log2(225/10395) - log2(945/10395),
SharedPhylogeneticInfo(treeSym8, treeTwoSplits),
tolerance = 1e-7)
expect_equal(SplitSharedInformation(8, 4, 3),
SharedPhylogeneticInfo(treeTwoSplits, treeAbc.Defgh),
tolerance = 1e-7)
expect_equal(SplitInformation(4, 4) + SplitInformation (3, 5) -
(2 * SplitSharedInformation(8, 4, 3)),
SplitDifferentInformation(8, 4, 3),
tolerance=1e-07)
expect_equal(SharedPhylogeneticInfo(treeSym8, list(treeSym8, treeBal8)),
SharedPhylogeneticInfo(list(treeSym8, treeBal8), treeSym8),
tolerance = 1e-7)
# Test tree too large to cache
set.seed(101)
t1 <- ape::rtree(101)
t2 <- ape::rtree(101, rooted = FALSE)
expect_equal(SharedPhylogeneticInfo(t1, t2),
SharedPhylogeneticInfo(t2, t1))
})
test_that('MatchingSplitInfo() is correctly calculated', {
BinaryToSplit <- function (binary) matrix(as.logical(binary))
expect_equal(log2(3),
MatchingSplitInfoSplits(
as.Splits(c(rep(TRUE, 2), rep(FALSE, 6))),
as.Splits(c(FALSE, FALSE, rep(TRUE, 2), rep(FALSE, 4)))),
tolerance = 1e-7)
expect_equal(log2(3),
MatchingSplitInfoSplits(
as.Splits(c(rep(FALSE, 6), rep(TRUE, 2))),
as.Splits(c(FALSE, FALSE, rep(TRUE, 2), rep(FALSE, 4)))),
tolerance = 1e-7)
expect_equal(log2(3), cpp_msi_distance(
as.Splits(c(rep(TRUE, 2), rep(FALSE, 6))),
as.Splits(c(FALSE, FALSE, rep(TRUE, 2), rep(FALSE, 4))),
8L)$score, tolerance = 1e-7)
expect_equal(log2(3), cpp_msi_distance(
as.Splits(rep(c(FALSE, TRUE), each = 4L)),
as.Splits(rep(c(FALSE, TRUE), 4L)),
8L)$score, tolerance = 1e-7)
expect_equal(SharedPhylogeneticInfo(treeSym8, treeSym8),
MatchingSplitInfo(treeSym8, treeSym8), tolerance = 1e-05)
expect_equal(0, MatchingSplitInfo(treeSym8, treeStar8))
expect_equal(0, MatchingSplitInfo(treeStar8, treeStar8))
expect_equal(MatchingSplitInfo(treeAb.Cdefgh, treeAbc.Defgh),
MatchingSplitInfo(treeAbc.Defgh, treeAb.Cdefgh))
expect_equal(MatchingSplitInfo(treeAbcd.Efgh, treeAb.Cdefgh),
MatchingSplitInfo(treeAb.Cdefgh, treeAbcd.Efgh))
expect_equal(-(TreeTools::Log2TreesMatchingSplit(2, 5) - Log2Unrooted.int(7)),
MatchingSplitInfo(treeAb.Cdefgh, treeAbc.Defgh),
tolerance = 1e-7)
expect_true(MatchingSplitInfo(treeSym8, treeBal8) >
MatchingSplitInfo(treeSym8, treeOpp8))
expect_equal(0, MatchingSplitInfoDistance(treeSym8, treeSym8))
NormalizationTest(MatchingSplitInfo)
})
test_that("Shared Phylogenetic Information is correctly estimated", {
exp <- ExpectedVariation(treeSym8, treeAbc.Defgh, samples = 1000L)
tol <- exp[, 'Std. Err.'] * 2
# Expected values calculated with 100k samples
expect_equal(1.175422, exp['SharedPhylogeneticInfo', 'Estimate'],
tolerance = tol[1])
expect_equal(3.099776, exp['MatchingSplitInfo', 'Estimate'],
tolerance = tol[2])
expect_equal(25.231023, exp['DifferentPhylogeneticInfo', 'Estimate'],
tolerance = tol[3])
expect_equal(21.382314, exp['MatchingSplitInfoDistance', 'Estimate'],
tolerance = tol[4])
expect_equal(exp[, 'sd'], exp[, 'Std. Err.'] * sqrt(exp[, 'n']))
})
test_that('Clustering information is correctly calculated', {
expect_equal(Entropy(c(3, 5) / 8) * 2 - Entropy(c(0, 0, 3, 5) / 8),
cpp_mutual_clustering(
as.Splits(as.logical(c(1, 1, 1, 0, 0, 0, 0, 0))),
as.Splits(as.logical(c(1, 1, 1, 0, 0, 0, 0, 0))),
8L)$score,
tolerance = 1e-7)
expect_equal(Entropy(c(2, 6) / 8) * 2 - Entropy(c(0, 2, 2, 4) / 8),
cpp_mutual_clustering(
as.Splits(as.logical(c(1, 1, 0, 0, 0, 0, 0, 0))),
as.Splits(as.logical(c(0, 0, 1, 1, 0, 0, 0, 0))),
8L)$score, tolerance = 1e-7)
expect_equal(Entropy(c(5, 4) / 9) + Entropy(c(3, 6) / 9) -
Entropy(c(3, 2, 0, 4) / 9),
cpp_mutual_clustering(
as.Splits(as.logical(c(1, 1, 1, 1, 1, 0, 0, 0, 0))),
as.Splits(as.logical(c(0, 0, 1, 1, 1, 0, 0, 0, 0))),
9L)$score,
tolerance = 1e-7)
expect_equal(Entropy(c(4, 4) / 8) * 2 - Entropy(c(2, 2, 2, 2) / 8),
cpp_mutual_clustering(
as.Splits(as.logical(c(1, 1, 1, 1, 0, 0, 0, 0))),
as.Splits(as.logical(c(1, 0, 1, 0, 1, 0, 1, 0))),
8L)$score,
tolerance = 1e-7)
expect_equal(Entropy(c(4, 4) / 8) * 2 - Entropy(c(0, 0, 4, 4) / 8),
cpp_mutual_clustering(
as.Splits(as.logical(c(1, 1, 1, 1, 0, 0, 0, 0))),
as.Splits(as.logical(c(1, 1, 1, 1, 0, 0, 0, 0))),
8L)$score,
tolerance = 1e-7)
expect_equal(ClusteringEntropy(treeSym8),
MutualClusteringInfo(treeSym8, treeSym8),
tolerance = 1e-05)
expect_equal(8 * ClusteringEntropy(treeSym8), ClusteringInfo(treeSym8))
expect_equal(0, MutualClusteringInfo(treeSym8, treeStar8))
expect_equal(0, MutualClusteringInfo(treeStar8, treeStar8))
expect_equal(TreeDistance(treeSym8, treeBal8),
ClusteringInfoDistance(treeSym8, treeBal8, normalize = TRUE))
expect_equal(1, MutualClusteringInfo(treeSym8, treeSym8, normalize = TRUE),
tolerance = 1e-7)
expect_true(MutualClusteringInfo(treeSym8, treeBal8, normalize = pmin) >
MutualClusteringInfo(treeSym8, treeBal8, normalize = pmax))
expect_equal(ClusteringEntropy(treeSym8) + ClusteringEntropy(treeBal8) -
(2 * MutualClusteringInfo(treeBal8, treeSym8)),
ClusteringInfoDistance(treeSym8, treeBal8), tolerance = 1e-05)
expect_equal(MutualClusteringInfo(treeAb.Cdefgh, treeAbc.Defgh),
MutualClusteringInfo(treeAbc.Defgh, treeAb.Cdefgh),
tolerance = 1e-05)
# Different resolution
randomBif20 <- structure(list(
edge = structure(c(21L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L, 30L,
31L, 32L, 32L, 31L, 30L, 29L, 33L, 34L, 34L, 33L, 28L,
35L, 36L, 36L, 35L, 27L, 26L, 37L, 37L, 25L, 38L, 38L,
39L, 39L, 24L, 23L, 22L, 1L, 22L, 23L, 24L, 25L, 26L,
27L, 28L, 29L, 30L, 31L, 32L, 2L, 14L, 7L, 10L, 33L, 34L,
4L, 6L, 8L, 35L, 36L, 13L, 16L, 18L, 17L, 37L, 5L, 15L,
38L, 11L, 39L, 12L, 19L, 9L, 3L, 20L),
.Dim = c(38L, 2L)), Nnode = 19L,
tip.label = c("t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9", "t10",
"t11", "t12", "t13", "t14", "t15", "t16", "t17", "t18", "t19",
"t20"), br = NULL), class = "phylo")
threeAwayPoly <- structure(
list(edge = structure(c(21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L, 29L,
28L, 27L, 26L, 30L, 30L, 30L, 26L, 31L, 31L, 25L,
32L, 33L, 33L, 32L, 25L, 25L, 24L, 34L, 34L, 34L,
23L, 22L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L,
29L, 2L, 8L, 14L, 10L, 30L, 13L, 16L, 18L, 31L, 4L,
6L, 32L, 33L, 15L, 20L, 5L, 7L, 17L, 34L, 11L, 12L,
19L, 9L, 3L, 1L), .Dim = c(33L, 2L)),
tip.label = c("t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9",
"t10", "t11", "t12", "t13", "t14", "t15", "t16", "t17",
"t18", "t19", "t20"),
Nnode = 14L), class = "phylo")
expect_equal(
MutualClusteringInfo(threeAwayPoly, randomBif20),
MutualClusteringInfo(randomBif20, threeAwayPoly))
match <- MutualClusteringInfo(randomBif20, threeAwayPoly, reportMatching = TRUE)
expect_equal(c(NA, NA, 1, 2, NA, 3, 7, 11, 10, 4, 6, 9, 8, NA, 5, 12, NA),
attr(match, 'matching'))
# Multiple bins, calculated expectation
library('TreeTools', quietly = TRUE, warn.conflicts = FALSE)
b65m <- lapply(c(1, 2, 70), AddTip, tree = BalancedTree(64))
self <- ClusteringEntropy(b65m)
diff <- ClusteringEntropy(b65m[[1]], sum = FALSE)["72"]
# Copied from C:
ic_element <- function (nkK, nk, nK, n) {
if (nkK && nk && nK) {
if (nkK == nk && nkK == nK && nkK + nkK == n) return (nkK);
numerator = nkK * n
denominator = nk * nK
if (numerator == denominator) return (0);
nkK * (log2(numerator) - log2(denominator));
} else 0;
}
expect_equal(diff,
(ic_element(63, 63, 63, 65) +
ic_element(00, 63, 02, 65) +
ic_element(00, 02, 63, 65) +
ic_element(02, 02, 02, 65)) / 65,
ignore_attr = TRUE)
new <- (ic_element(65-3, 63, 63, 65) +
ic_element(1, 63, 02, 65) +
ic_element(1, 02, 63, 65) +
ic_element(1, 02, 02, 65)) / 65
other <- self[1] - diff[1] + new # Calc'd = 20.45412
expect_equal(other, MutualClusteringInfo(b65m[[1]], b65m[[2]]),
ignore_attr = TRUE)
expectation <- matrix(other, 3, 3)
diag(expectation) <- self
expect_equal(expectation, MutualClusteringInfo(b65m), ignore_attr = TRUE)
expect_equal(ClusteringEntropy(BalancedTree(64)),
MutualClusteringInfo(BalancedTree(64), BalancedTree(64)))
expect_equal(ClusteringEntropy(BalancedTree(644)),
MutualClusteringInfo(BalancedTree(644), BalancedTree(644)))
expect_gt(ClusteringEntropy(BalancedTree(64)),
MutualClusteringInfo(BalancedTree(64), PectinateTree(64)))
expect_gt(ClusteringEntropy(BalancedTree(644)),
MutualClusteringInfo(BalancedTree(644), PectinateTree(644)))
NormalizationTest(MutualClusteringInfo)
})
test_that("Matchings are correct", {
# Different resolution: used to cause memory leak
randomBif20 <- structure(list(
edge = structure(c(21L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L, 30L,
31L, 32L, 32L, 31L, 30L, 29L, 33L, 34L, 34L, 33L, 28L,
35L, 36L, 36L, 35L, 27L, 26L, 37L, 37L, 25L, 38L, 38L,
39L, 39L, 24L, 23L, 22L, 1L, 22L, 23L, 24L, 25L, 26L,
27L, 28L, 29L, 30L, 31L, 32L, 2L, 14L, 7L, 10L, 33L, 34L,
4L, 6L, 8L, 35L, 36L, 13L, 16L, 18L, 17L, 37L, 5L, 15L,
38L, 11L, 39L, 12L, 19L, 9L, 3L, 20L),
.Dim = c(38L, 2L)), Nnode = 19L,
tip.label = c("t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9", "t10",
"t11", "t12", "t13", "t14", "t15", "t16", "t17", "t18", "t19",
"t20"), br = NULL), class = "phylo")
threeAwayPoly <- structure(
list(edge = structure(c(21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L, 29L,
28L, 27L, 26L, 30L, 30L, 30L, 26L, 31L, 31L, 25L,
32L, 33L, 33L, 32L, 25L, 25L, 24L, 34L, 34L, 34L,
23L, 22L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L,
29L, 2L, 8L, 14L, 10L, 30L, 13L, 16L, 18L, 31L, 4L,
6L, 32L, 33L, 15L, 20L, 5L, 7L, 17L, 34L, 11L, 12L,
19L, 9L, 3L, 1L), .Dim = c(33L, 2L)),
tip.label = c("t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9",
"t10", "t11", "t12", "t13", "t14", "t15", "t16", "t17",
"t18", "t19", "t20"),
Nnode = 14L), class = "phylo")
expect_equal(
MutualClusteringInfo(threeAwayPoly, randomBif20),
MutualClusteringInfo(randomBif20, threeAwayPoly))
t1 <- PectinateTree(letters[1:11])
t2 <- ape::read.tree(text = '(a, (c, (b, (d, e, ((g, h, f), (k, (j, i)))))));')
t3 <- CollapseNode(PectinateTree(c(letters[11], letters[1:10])), 16:19)
s1 <- as.Splits(t1)
s2 <- as.Splits(t2, t1)
s3 <- as.Splits(t3, t1)
n1 <- dim(s1)[1]
n2 <- dim(s2)[1]
n3 <- dim(s3)[1]
n <- NTip(s1)
# Plot
# par(mfrow = 2:1, cex = 0.9, mar = rep(0,4))
# JRF2T <- function(...) JaccardRobinsonFoulds(..., k = 2)
# JRF2F <- function(...) JaccardRobinsonFoulds(..., k = 2, allowConflict = FALSE)
# VisualizeMatching(MatchingSplitDistance, t1, t2, setPar=F)
# LabelSplits(t2, setNames(1:6, names(s2)), adj = 2)
# VisualizeMatching(MatchingSplitDistance, t2, t1, setPar=F)
# LabelSplits(t1, setNames(1:8, names(s1)), adj = 2)
Test <- function (CppFn, x12, x21, ...) {
r12 <- CppFn(s1, s2, n, ...)
r21 <- CppFn(s2, s1, n, ...)
r13 <- CppFn(s1, s3, n, ...)
r31 <- CppFn(s3, s1, n, ...)
expect_equal(r12$score, r21$score)
expect_equal(r13$score, r31$score)
m12 <- r12$matching
m21 <- r21$matching
expect_equal(n1, length(m12))
expect_equal(length(m12[!is.na(m12)]), length(unique(m12[!is.na(m12)])))
expect_equal(n2, length(m21))
expect_equal(length(m21[!is.na(m21)]), length(unique(m21[!is.na(m21)])))
expect_lte(dim(s1)[1] - dim(s2)[1], sum(is.na(m12)))
m13 <- r13$matching
m31 <- r31$matching
expect_equal(n1, length(m13))
expect_equal(length(m13[!is.na(m13)]), length(unique(m13[!is.na(m13)])))
expect_equal(n3, length(m31))
expect_equal(length(m31[!is.na(m31)]), length(unique(m31[!is.na(m31)])))
expect_lte(dim(s1)[1] - dim(s3)[1], sum(is.na(m13)))
for (i in seq_along(m12)) expect_true(m12[i] %in% x12[[i]])
for (i in seq_along(m21)) expect_true(m21[i] %in% x21[[i]])
}
Test(TreeDist:::cpp_robinson_foulds_distance,
list(NA, 2, NA, 3, NA, NA, 5, NA),
list(NA, 2, 4, NA, 7, NA)
)
Test(TreeDist:::cpp_robinson_foulds_info,
list(NA, 2, NA, 3, NA, NA, 5, NA),
list(NA, 2, 4, NA, 7, NA)
)
Test(TreeDist:::cpp_matching_split_distance,
list(1, 2, 4, 3, NA, NA, 5, 6),
list(1, 2, 5, 4, 7, 6)
)
Test(TreeDist:::cpp_jaccard_similarity,
list(NA, 2, 1, 3, 4, 6, 5, NA),
list(3, 2, 4, 5, 7, 6),
k = 2,
allowConflict = TRUE)
Test(TreeDist:::cpp_jaccard_similarity,
list(NA, 2, 1, 3, NA, 6, 5, 4),
list(3, 2, 4, 1, 7, 6),
k = 2,
allowConflict = FALSE)
Test(TreeDist:::cpp_msi_distance,
list(NA, 2, 1, 4, 3, 6, 5, NA),
list(3, 2, c(4, 5), c(4, 5), c(6, 7), c(7, 6))
)
Test(TreeDist:::cpp_shared_phylo,
list(NA, 2, 4, 3, 1, 6, 5, NA),
list(5, 2, 4, 3, 7, 6)
)
Test(TreeDist:::cpp_mutual_clustering,
list(4, 2, NA, 3, 6, NA, 5, 1),
list(8, 2, 4, 5, 7, 1)
)
})
test_that('Matching Split Distance is correctly calculated', {
expect_equal(0L, MatchingSplitDistance(treeSym8, treeSym8))
expect_equal(0L, MatchingSplitDistance(treeStar8, treeSym8))
expect_equal(0L, MatchingSplitDistance(treeStar8, treeStar8))
match0 <- MatchingSplitDistance(treeStar8, treeStar8, reportMatching = TRUE)
expect_equal(rep(0L, 4), c(match0, vapply(attributes(match0), length, 0)),
ignore_attr = TRUE)
expect_equal(1L, MatchingSplitDistance(treeAb.Cdefgh, treeAbc.Defgh))
expect_equal(2L, MatchingSplitDistance(treeAb.Cdefgh, treeAbcd.Efgh))
splitAB <- as.Splits(c(rep(TRUE, 2), rep(FALSE, 7)))
splitABC <- as.Splits(c(rep(TRUE, 3), rep(FALSE, 6)))
splitAEF <- as.Splits(c(TRUE, rep(FALSE, 3), TRUE, TRUE, rep(FALSE, 3)))
splitABCD <- as.Splits(c(rep(TRUE, 4), rep(FALSE, 5)))
splitABCDE <- as.Splits(c(rep(TRUE, 5), rep(FALSE, 4)))
splitAI <- as.Splits(c(TRUE, rep(FALSE, 7), TRUE))
expect_equal(2L, MatchingSplitDistanceSplits(splitAB, splitAI))
expect_equal(2L, MatchingSplitDistanceSplits(splitAB, splitABCD))
expect_equal(3L, MatchingSplitDistanceSplits(splitAB, splitABCDE))
expect_equal(4L, MatchingSplitDistanceSplits(splitABC, splitAEF))
expect_equal(MatchingSplitDistanceSplits(splitABC, splitAEF),
MatchingSplitDistanceSplits(splitAEF, splitABC))
# Invariant to tree description order
sq_pectinate <- ape::read.tree(text='((((((1, 2), 3), 4), 5), 6), (7, (8, (9, (10, 11)))));')
shuffle1 <- ape::read.tree(text='(((((1, 5), 2), 6), (3, 4)), ((8, (7, 9)), (10, 11)));')
shuffle2 <- ape::read.tree(text='(((8, (7, 9)), (10, 11)), ((((1, 5), 2), 6), (3, 4)));')
expect_equal(MatchingSplitDistance(shuffle1, sq_pectinate),
MatchingSplitDistance(sq_pectinate, shuffle1))
expect_equal(0L, MatchingSplitDistance(shuffle1, shuffle2))
expect_equal(MatchingSplitDistance(shuffle1, sq_pectinate),
MatchingSplitDistance(shuffle2, sq_pectinate))
})
test_that('NyeSimilarity is correctly calculated, and matches JRF', {
listBalSym <- list(treeBal8, treeSym8)
JRF <- function (..., sim = TRUE)
JaccardRobinsonFoulds(..., k = 1, similarity = sim, allowConflict = TRUE)
expect_equal(5L, NyeSimilarity(as.Splits(treeSym8), treeSym8))
expect_equal(1, NyeSimilarity(treeSym8, treeSym8, normalize = TRUE))
expect_equal(1, JRF(treeSym8, treeSym8, normalize = TRUE))
expect_equal(0, NyeSimilarity(treeSym8, treeStar8, normalize = FALSE))
expect_equal(0, NyeSimilarity(treeSym8, treeStar8, normalize = TRUE))
expect_equal(0, JRF(treeSym8, treeStar8, normalize = TRUE))
expect_equal(0, NyeSimilarity(treeStar8, treeStar8, normalize = FALSE))
expect_equal(NaN, NyeSimilarity(treeStar8, treeStar8, normalize = TRUE,
normalizeMax = FALSE))
expect_equal(c(3.8, 5), NyeSimilarity(treeSym8, listBalSym))
expect_equal(2 / 3, NyeSimilarity(treeAb.Cdefgh, treeAbc.Defgh),
tolerance = 1e-7)
expect_equal(2 * (1 / 3), tolerance = 1e-7,
NyeSimilarity(treeAb.Cdefgh, treeAbc.Defgh, similarity = FALSE))
expect_equal(1L, NyeSimilarity(treeSym8, treeAbcd.Efgh, normalize = FALSE))
expect_equal(1L / 5L, NyeSimilarity(treeSym8, treeAbcd.Efgh, normalize = 5L))
expect_equal(0.2, JRF(treeSym8, treeAbcd.Efgh, normalize = 5L * 2L))
expect_equal(1/3, NyeSimilarity(treeSym8, treeAbcd.Efgh, normalize = TRUE))
expect_equal(1/3, JRF(treeSym8, treeAbcd.Efgh, normalize = TRUE))
expect_equal(2/3, NyeSimilarity(treeSym8, treeAbcd.Efgh, similarity = FALSE,
normalize = TRUE))
expect_equal(2/3, JRF(treeSym8, treeAbcd.Efgh, sim = FALSE, normalize = TRUE))
expect_equal(1L / ((5L + 1L) / 2L),
NyeSimilarity(treeSym8, treeAbcd.Efgh, normalize = TRUE))
expect_true(NyeSimilarity(treeSym8, treeBal8) >
NyeSimilarity(treeSym8, treeOpp8))
NormalizationTest(NyeSimilarity)
})
test_that('Jaccard RF extremes tend to equivalent functions', {
expect_equal(JaccardRobinsonFoulds(treeSym8, list(treeBal8, treeSym8),
similarity = TRUE, k = 1L,
allowConflict = TRUE),
NyeSimilarity(treeSym8, list(treeBal8, treeSym8)) * 2L)
expect_equal(JaccardRobinsonFoulds(treeSym8, list(treeBal8, treeSym8),
similarity = FALSE, k = Inf),
RobinsonFoulds(treeSym8, list(treeBal8, treeSym8)))
expect_equal(JaccardRobinsonFoulds(treeSym8, list(treeBal8, treeSym8),
similarity = FALSE, k = 999999),
RobinsonFoulds(treeSym8, list(treeBal8, treeSym8)))
})
test_that('Jaccard RF is correctly calculated', {
expect_equal(5L * 2L, JaccardRobinsonFoulds(treeSym8, treeSym8,
k = 2, similarity = TRUE))
expect_equal(c(3.32, 5) * 2L,
JaccardRobinsonFoulds(treeSym8, list(treeBal8, treeSym8),
similarity = TRUE, k = 2))
expect_equal(2 * 2, 3 * JaccardRobinsonFoulds(treeAb.Cdefgh, treeAbc.Defgh,
similarity = TRUE),
tolerance = 1e-7)
expect_equal(1, JaccardRobinsonFoulds(treeSym8, treeSym8,
similarity = TRUE, normalize = TRUE))
expect_equal(0, JaccardRobinsonFoulds(treeSym8, treeSym8,
similarity = FALSE, normalize = TRUE))
expect_equal(1L * 2L,
JaccardRobinsonFoulds(treeSym8, treeAbcd.Efgh, similarity = TRUE,
normalize = FALSE, k = 2))
expect_equal(1L * 2L / 6L,
JaccardRobinsonFoulds(treeSym8, treeAbcd.Efgh, similarity = TRUE,
normalize = TRUE, k = 2))
expect_lt(JaccardRobinsonFoulds(treeSym8, treeBal8, k = 2),
JaccardRobinsonFoulds(treeSym8, treeOpp8, k = 2))
expect_lt(JaccardRobinsonFoulds(treeSym8, treeBal8, k = 3L),
JaccardRobinsonFoulds(treeSym8, treeBal8, k = 4L))
expect_lt(JaccardRobinsonFoulds(treeCat8, treeTac8, allowConflict = TRUE),
JaccardRobinsonFoulds(treeCat8, treeTac8, allowConflict = FALSE))
expect_equal(0, JaccardRobinsonFoulds(BalancedTree(64), BalancedTree(64)))
expect_lt(0, JaccardRobinsonFoulds(BalancedTree(64), PectinateTree(64)))
expect_equal(0, JaccardRobinsonFoulds(BalancedTree(264), BalancedTree(264)))
expect_lt(0, JaccardRobinsonFoulds(BalancedTree(264), PectinateTree(264)))
})
test_that('RobinsonFoulds() is correctly calculated', {
RF <- function (tree1, tree2) {
suppressMessages(phangorn::RF.dist(tree1, tree2))
}
RFTest <- function (tree1, tree2) {
expect_equal(RF(tree1, tree2), RobinsonFoulds(tree1, tree2))
}
RFTest(treeSym8, treeSym8)
RFTest(treeBal8, treeSym8)
expect_equal(c(4, 0), RobinsonFoulds(treeSym8, list(treeBal8, treeSym8)))
RFTest(treeAb.Cdefgh, treeAbc.Defgh)
expect_equal(0, RobinsonFoulds(treeSym8, treeSym8, normalize = TRUE))
expect_equal(4L / 6L,
RobinsonFoulds(treeSym8, treeAbcd.Efgh, normalize = TRUE))
RFTest(treeSym8, treeOpp8)
RFNtipTest <- function (nTip) {
backLeaves <- paste0('t', rev(seq_len(nTip)))
RFTest(TreeTools::PectinateTree(backLeaves),
TreeTools::BalancedTree(nTip))
}
RFNtipTest(10)
RFNtipTest(32)
RFNtipTest(50)
RFNtipTest(64)
RFNtipTest(67)
RFNtipTest(128)
RFNtipTest(1024)
RFNtipTest(1027)
NormalizationTest(RobinsonFoulds, similarity = TRUE)
#TODO we may wish to revise this test once we implement diag = TRUE to
#allow similarities to be calculated on the diagonal.
expect_equal(numeric(0), RobinsonFoulds(treeSym8, normalize = TRUE))
})
test_that('Robinson Foulds Info is correctly calculated', {
expect_equal(22.53747 * 2L, tolerance = 1e-05,
InfoRobinsonFoulds(treeSym8, treeSym8, similarity = TRUE,
normalize = FALSE))
expect_equal(0, tolerance = 1e-05,
InfoRobinsonFoulds(treeSym8, treeSym8, normalize = TRUE))
expect_equal(1, tolerance = 1e-05,
InfoRobinsonFoulds(treeSym8, treeSym8, similarity = TRUE,
normalize = TRUE))
expect_equal(24.9, tolerance = 0.01,
InfoRobinsonFoulds(treeSym8, treeBal8, similarity = TRUE))
expect_equal(SplitwiseInfo(treeSym8) + SplitwiseInfo(treeBal8) -
InfoRobinsonFoulds(treeSym8, treeBal8, similarity = FALSE),
InfoRobinsonFoulds(treeSym8, treeBal8, similarity = TRUE))
expect_equal(-log2(945/10395) * 2,
InfoRobinsonFoulds(treeSym8, treeAb.Cdefgh, similarity = TRUE))
expect_equal(-log2(945/10395) * 2,
InfoRobinsonFoulds(treeSym8, treeAb.Cdefgh, similarity = TRUE))
expect_equal(-log2(315/10395) * 2,
InfoRobinsonFoulds(treeSym8, treeAbc.Defgh, similarity = TRUE))
# Test symmetry of small vs large splits
expect_equal(InfoRobinsonFoulds(treeSym8, treeAbc.Defgh),
InfoRobinsonFoulds(treeAbc.Defgh, treeSym8))
expect_equal(-log2(225/10395) * 2,
InfoRobinsonFoulds(treeSym8, treeAbcd.Efgh, similarity = TRUE))
expect_equal((-log2(225/10395) - log2(945/10395)) * 2,
InfoRobinsonFoulds(treeSym8, treeTwoSplits, similarity = TRUE))
expect_equal(InfoRobinsonFoulds(treeSym8, list(treeSym8, treeBal8)),
RobinsonFouldsInfo(list(treeSym8, treeBal8), treeSym8))
# Check that large trees work
expect_equal(0, InfoRobinsonFoulds(BalancedTree(64), BalancedTree(64)))
expect_lt(0, InfoRobinsonFoulds(BalancedTree(64), PectinateTree(64)))
expect_equal(0, InfoRobinsonFoulds(BalancedTree(129), BalancedTree(129)))
expect_lt(0, InfoRobinsonFoulds(BalancedTree(129), PectinateTree(129)))
})
test_that('Kendall-Colijn distance is correctly calculated', {
# Expected values calculated using treespace::treeDist(treeSym8, treeBal8)
expect_equal(2.828427, KendallColijn(treeSym8, treeBal8), tolerance=1e-06)
expect_equal(2.828427, KendallColijn(treeCat8, treeBal8), tolerance=1e-06)
expect_equal(7.211103, KendallColijn(treeSym8, treeOpp8), tolerance=1e-06)
expect_equal(matrix(c(0L, 8L), nrow=2, ncol=2, byrow=TRUE),
KendallColijn(list(treeSym8, treeCat8), list(treeCat8, treeTac8)), tolerance=1e-06)
expect_equal(8L, KendallColijn(treeCat8, treeTac8), tolerance=1e-06)
expect_equal(0L, KendallColijn(treeSym8, treeCat8), tolerance=1e-06)
expect_equal(8L, KendallColijn(treeSym8, treeTac8), tolerance=1e-06)
expect_equal(8L, KendallColijn(treeCat8, treeTac8), tolerance=1e-06)
expect_equal(5.291503, KendallColijn(treeSym8, treeAb.Cdefgh), tolerance=1e-06)
expect_equal(4.358899, KendallColijn(treeSym8, treeAbc.Defgh), tolerance=1e-06)
expect_equal(5L, KendallColijn(treeSym8, treeAcd.Befgh), tolerance=1e-06)
expect_equal(3.464102, KendallColijn(treeSym8, treeAbcd.Efgh), tolerance=1e-06)
expect_equal(3L, KendallColijn(treeSym8, treeTwoSplits), tolerance=1e-06)
expect_equal(2.828427, KendallColijn(treeAbc.Defgh, treeTwoSplits), tolerance=1e-06)
})
test_that('Multiple comparisons are correctly ordered', {
nTrees <- 6L
nTip <- 16L
set.seed(0)
trees <- lapply(rep(nTip, nTrees), ape::rtree, br=NULL)
trees[[1]] <- TreeTools::BalancedTree(nTip)
trees[[nTrees - 1L]] <- TreeTools::PectinateTree(nTip)
class(trees) <- 'multiPhylo'
expect_equal(phangorn::RF.dist(trees), RobinsonFoulds(trees),
ignore_attr = TRUE)
# Test CompareAll
expect_equal(as.matrix(phangorn::RF.dist(trees)),
as.matrix(CompareAll(trees, phangorn::RF.dist, 0L)),
ignore_attr = TRUE)
NNILoose <- function (x, y) NNIDist(x, y)['loose_upper']
expect_equal(CompareAll(trees, NNILoose),
CompareAll(trees, NNIDist)$loose_upper,
ignore_attr = TRUE)
})
test_that('Normalization occurs as documented', {
library('TreeTools')
tree1 <- BalancedTree(8)
tree2 <- CollapseNode(PectinateTree(8), 12:13)
info1 <- SplitwiseInfo(tree1) # 19.367
info2 <- SplitwiseInfo(tree2) # 11.963
ent1 <- ClusteringEntropy(tree1) # 4.245
ent2 <- ClusteringEntropy(tree2) # 2.577
# Phylogenetic information
spi <- SharedPhylogeneticInfo(tree1, tree2, normalize = FALSE) # 9.64
dpi <- DifferentPhylogeneticInfo(tree1, tree2, normalize = FALSE) # 12.04
expect_equal(spi + spi + dpi, info1 + info2)
expect_equal(SharedPhylogeneticInfo(tree1, tree2, normalize = TRUE),
(spi + spi) / (info1 + info2))
expect_equal(PhylogeneticInfoDistance(tree1, tree2, normalize = TRUE),
dpi / (info1 + info2))
# Matching split information
mmsi <- MatchingSplitInfo(tree1, tree2, normalize = FALSE)
msid <- MatchingSplitInfoDistance(tree1, tree2, normalize = FALSE)
expect_equal(mmsi + mmsi + msid, info1 + info2)
expect_equal(MatchingSplitInfo(tree1, tree2, normalize = TRUE),
(mmsi + mmsi) / (info1 + info2))
expect_equal(MatchingSplitInfoDistance(tree1, tree2, normalize = TRUE),
msid / (info1 + info2))
# Clustering information
mci <- MutualClusteringInfo(tree1, tree2, normalize = FALSE)
cid <- ClusteringInfoDistance(tree1, tree2, normalize = FALSE)
expect_equal(mci + mci + cid, ent1 + ent2)
expect_equal(MutualClusteringInfo(tree1, tree2, normalize = TRUE),
(mci + mci) / (ent1 + ent2))
expect_equal(ClusteringInfoDistance(tree1, tree2, normalize = TRUE),
cid / (ent1 + ent2))
})
test_that("Independent of root position", {
library('TreeTools')
bal8 <- BalancedTree(8)
pec8 <- PectinateTree(8)
trees <- lapply(list(bal8, RootTree(bal8, 't4'),
pec8, RootTree(pec8, 't4')), UnrootTree)
lapply(methodsToTest[-length(methodsToTest)], function (Method) {
dists <- as.matrix(Method(trees))
expect_equal(dists[1, 1], dists[1, 2])
expect_equal(dists[1, 3], dists[1, 4])
expect_equal(dists[1, 3], dists[2, 4])
expect_equal(dists[2, 3], dists[2, 4])
expect_equal(dists[3, 3], dists[3, 4])
})
Test <- function(Method, score = 0L, ...) {
expect_equal(score, Method(trees[[1]], trees[[1]], ...))
expect_equal(score, Method(trees[[1]], trees[[2]], ...))
expect_equal(score, Method(trees[[3]], trees[[3]], ...))
}
Test(MASTSize, 8L, rooted = FALSE)
# Tested further for NNIDist in test-tree_distance_nni.R
Test(NNIDist, c(lower = 0, best_lower = 0, tight_upper = 0, best_upper = 0,
loose_upper = 0, fack_upper = 0, li_upper = 0))
Test(SPRDist, c(spr = 0))
})
|
#' @title Perform a preliminary simulation study to estimate number of replicates
#' @description This is a function that, given lists of parameters, performs a preliminary simulation study to estimate number of replicates. It functions similiarly to to the main simulation study function simstudy, except it computes the standard deviations of the simulated quantities of interest to approximate the number of replicates necessary for estimating these parameters to a certain precision.
#' @param tol The maximum standard deviation tolerable for all parameters of interest.
#' @param parameternames The names of the parameter combinations supplied. Used only for identification.
#' @param nsims A list giving the number of simulations for each parameter setting.
#' @param seed seed for reproducibility
#' @param cellcounts A list giving the number of cells for each parameter setting.
#' @param genecounts A list giving the number of genes for each parameter setting.
#' @param xmeans A list giving the vectors of mean x-coordinates of the three cell types for each parameter setting.
#' @param xsdss A list giving the vectors of the standard deviations of x-coordinates of the three cell types for each parameter setting
#' @param ymeans A list giving the vectors of mean y-coordinates of the three cell types for each parameter setting.
#' @param ysdss A list giving the vectors of the standard deviations of y-coordinates of the three cell types for each parameter setting
#' @param propsbatch1 A list giving the vectors of proportions of the three cell types in the first batch for each parameter setting.
#' @param propsbatch2 A list giving the vectors of proportions of the three cell types in the second batch for each parameter setting.
#' @param pkeep Whether or not to keep "bad" simulation replicates where a cell type is represented by less than a certain number of cells in a batch: see mycutoff. By default =F.
#' @param mycutoff A number: if the number of cells for any cell type is represented by fewer than cutoff cells in a simulated batch, then this simulation replicate is deemed to be of bad quality. By default=5.
#' @param mycore The number of computing cores to use for parallelizing the simulation.
#' @param dgeneratedata The function to use for generating data. By default this equals generatedata. Highly recommended not to modify this argument.
#' @param ddocluster The function to use for clustering data. By default this equals docluster. Highly recommended not to modify this argument.
#' @export
#' @return A list of simulation replicate numbers calculated such that the standard deviation of each quantity (mean of silhoutte scores for all cells, mean of solhoutte scores for each cell type) for each batch correction method are all no greater than the tol argument for each parameter setting.
#' @examples \dontrun{
#' parameternames=list('Original', 'Smaller Differences', 'More Genes')
#' nsims=list(50,50,50)
#' seed=0
#' cellcounts=list(500,500,500)
#' genecounts=list(100,100,5000)
#' xmeans=list(c(0,5,5),c(0,2,2),c(0,5,5))
#' xsdss=list(c(1,0.1,1),c(1,0.1,1),c(1,0.1,1))
#' ymeans=list(c(5,5,0),c(2,2,0),c(5,5,0))
#' ysdss=list(c(1,0.1,1),c(1,0.1,1),c(1,0.1,1))
#' propsbatch1=list(c(0.3,0.5,0.2),c(0.3,0.5,0.2),c(0.3,0.5,0.2))
#' propsbatch2=list(c(0.65,0.3,0.05),c(0.65,0.3,0.05),c(0.65,0.3,0.05))
#'
#' nsims=prelimstudy(
#' tol=0.01,parameternames=parameternames,
#' nsims=nsims,seed=seed,
#' cellcounts=cellcounts,
#' genecounts=genecounts,
#' xmeans=xmeans,xsdss=xsdss,ymeans=ymeans,
#' ysdss=ysdss,propsbatch1=propsbatch1,
#' propsbatch2=propsbatch2,mycore=1
#' )
#'
#' nsims
#' }
#' @importFrom stats rnorm sd
#'
prelimstudy=function(tol=0.01,parameternames,nsims,seed,cellcounts,genecounts,xmeans,xsdss,ymeans,ysdss,propsbatch1,propsbatch2,pkeep=F,mycutoff=5,mycore=1,dgeneratedata=generatedata,ddocluster=docluster) {
if(sd(sapply(list(parameternames,nsims,cellcounts,genecounts,xmeans,xsdss,ymeans,ysdss,propsbatch1,propsbatch2),length))!=0) {
return(warning('All parameter lists should be the same length. Check arguments to make sure this is true.'))
}
set.seed(seed)
seedset=abs(round(rnorm(length(nsims),1000,200)))
sduc=sdmnn=sdlm=sdcombat=vector(length=length(nsims))
names(sduc)=names(sdmnn)=names(sdlm)=names(sdcombat)=as.vector(parameternames)
failedsims=c(1:length(nsims))
for(i in c(1:length(nsims))) {
subseed=seedset[i]
mysim=dosim(nsim=nsims[[i]],
ncells=cellcounts[[i]],ngenes=genecounts[[i]],
xmus=xmeans[[i]],xsds=xsdss[[i]],
ymus=ymeans[[i]],ysds=ysdss[[i]],
prop1=propsbatch1[[i]],prop2=propsbatch2[[i]],
keep=pkeep,cutoff=mycutoff,ncore=mycore,s.seed=subseed,
dgeneratedata = dgeneratedata,ddocluster=ddocluster)
if(nrow(mysim[[1]])>1) {
sduc[i]=max(apply(mysim[[1]],MARGIN=2,sd))
sdmnn[i]=max(apply(mysim[[2]],MARGIN=2,sd))
sdlm[i]=max(apply(mysim[[3]],MARGIN=2,sd))
sdcombat[i]=max(apply(mysim[[4]],MARGIN=2,sd))
failedsims=setdiff(failedsims,i)
} else {
warning(paste0('Preliminary Simulation Study Failed for Parameter Set: "', parameternames[[i]] ,'" Due to insufficient valid replicates \n Please Check Parameters \n Returning Supplied replicate number for this Parameter Set'))
}
}
sds=data.frame(sduc,sdmnn,sdlm,sdcombat)
maxsds=apply(sds,MARGIN=1,max)
newnsims=ceiling((maxsds/tol)^2)
if(length(failedsims>0)) {
newnsims[failedsims]=unlist(nsims)[failedsims]
}
return(as.list(newnsims))
}
| /R/prelimstudy.R | no_license | jlakkis/mnnsim | R | false | false | 5,719 | r | #' @title Perform a preliminary simulation study to estimate number of replicates
#' @description This is a function that, given lists of parameters, performs a preliminary simulation study to estimate number of replicates. It functions similiarly to to the main simulation study function simstudy, except it computes the standard deviations of the simulated quantities of interest to approximate the number of replicates necessary for estimating these parameters to a certain precision.
#' @param tol The maximum standard deviation tolerable for all parameters of interest.
#' @param parameternames The names of the parameter combinations supplied. Used only for identification.
#' @param nsims A list giving the number of simulations for each parameter setting.
#' @param seed seed for reproducibility
#' @param cellcounts A list giving the number of cells for each parameter setting.
#' @param genecounts A list giving the number of genes for each parameter setting.
#' @param xmeans A list giving the vectors of mean x-coordinates of the three cell types for each parameter setting.
#' @param xsdss A list giving the vectors of the standard deviations of x-coordinates of the three cell types for each parameter setting
#' @param ymeans A list giving the vectors of mean y-coordinates of the three cell types for each parameter setting.
#' @param ysdss A list giving the vectors of the standard deviations of y-coordinates of the three cell types for each parameter setting
#' @param propsbatch1 A list giving the vectors of proportions of the three cell types in the first batch for each parameter setting.
#' @param propsbatch2 A list giving the vectors of proportions of the three cell types in the second batch for each parameter setting.
#' @param pkeep Whether or not to keep "bad" simulation replicates where a cell type is represented by less than a certain number of cells in a batch: see mycutoff. By default =F.
#' @param mycutoff A number: if the number of cells for any cell type is represented by fewer than cutoff cells in a simulated batch, then this simulation replicate is deemed to be of bad quality. By default=5.
#' @param mycore The number of computing cores to use for parallelizing the simulation.
#' @param dgeneratedata The function to use for generating data. By default this equals generatedata. Highly recommended not to modify this argument.
#' @param ddocluster The function to use for clustering data. By default this equals docluster. Highly recommended not to modify this argument.
#' @export
#' @return A list of simulation replicate numbers calculated such that the standard deviation of each quantity (mean of silhoutte scores for all cells, mean of solhoutte scores for each cell type) for each batch correction method are all no greater than the tol argument for each parameter setting.
#' @examples \dontrun{
#' parameternames=list('Original', 'Smaller Differences', 'More Genes')
#' nsims=list(50,50,50)
#' seed=0
#' cellcounts=list(500,500,500)
#' genecounts=list(100,100,5000)
#' xmeans=list(c(0,5,5),c(0,2,2),c(0,5,5))
#' xsdss=list(c(1,0.1,1),c(1,0.1,1),c(1,0.1,1))
#' ymeans=list(c(5,5,0),c(2,2,0),c(5,5,0))
#' ysdss=list(c(1,0.1,1),c(1,0.1,1),c(1,0.1,1))
#' propsbatch1=list(c(0.3,0.5,0.2),c(0.3,0.5,0.2),c(0.3,0.5,0.2))
#' propsbatch2=list(c(0.65,0.3,0.05),c(0.65,0.3,0.05),c(0.65,0.3,0.05))
#'
#' nsims=prelimstudy(
#' tol=0.01,parameternames=parameternames,
#' nsims=nsims,seed=seed,
#' cellcounts=cellcounts,
#' genecounts=genecounts,
#' xmeans=xmeans,xsdss=xsdss,ymeans=ymeans,
#' ysdss=ysdss,propsbatch1=propsbatch1,
#' propsbatch2=propsbatch2,mycore=1
#' )
#'
#' nsims
#' }
#' @importFrom stats rnorm sd
#'
prelimstudy=function(tol=0.01,parameternames,nsims,seed,cellcounts,genecounts,xmeans,xsdss,ymeans,ysdss,propsbatch1,propsbatch2,pkeep=F,mycutoff=5,mycore=1,dgeneratedata=generatedata,ddocluster=docluster) {
if(sd(sapply(list(parameternames,nsims,cellcounts,genecounts,xmeans,xsdss,ymeans,ysdss,propsbatch1,propsbatch2),length))!=0) {
return(warning('All parameter lists should be the same length. Check arguments to make sure this is true.'))
}
set.seed(seed)
seedset=abs(round(rnorm(length(nsims),1000,200)))
sduc=sdmnn=sdlm=sdcombat=vector(length=length(nsims))
names(sduc)=names(sdmnn)=names(sdlm)=names(sdcombat)=as.vector(parameternames)
failedsims=c(1:length(nsims))
for(i in c(1:length(nsims))) {
subseed=seedset[i]
mysim=dosim(nsim=nsims[[i]],
ncells=cellcounts[[i]],ngenes=genecounts[[i]],
xmus=xmeans[[i]],xsds=xsdss[[i]],
ymus=ymeans[[i]],ysds=ysdss[[i]],
prop1=propsbatch1[[i]],prop2=propsbatch2[[i]],
keep=pkeep,cutoff=mycutoff,ncore=mycore,s.seed=subseed,
dgeneratedata = dgeneratedata,ddocluster=ddocluster)
if(nrow(mysim[[1]])>1) {
sduc[i]=max(apply(mysim[[1]],MARGIN=2,sd))
sdmnn[i]=max(apply(mysim[[2]],MARGIN=2,sd))
sdlm[i]=max(apply(mysim[[3]],MARGIN=2,sd))
sdcombat[i]=max(apply(mysim[[4]],MARGIN=2,sd))
failedsims=setdiff(failedsims,i)
} else {
warning(paste0('Preliminary Simulation Study Failed for Parameter Set: "', parameternames[[i]] ,'" Due to insufficient valid replicates \n Please Check Parameters \n Returning Supplied replicate number for this Parameter Set'))
}
}
sds=data.frame(sduc,sdmnn,sdlm,sdcombat)
maxsds=apply(sds,MARGIN=1,max)
newnsims=ceiling((maxsds/tol)^2)
if(length(failedsims>0)) {
newnsims[failedsims]=unlist(nsims)[failedsims]
}
return(as.list(newnsims))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/commonSV_source.R
\name{sampleAR1}
\alias{sampleAR1}
\title{Sample the AR(1) coefficient(s)}
\usage{
sampleAR1(h_yc, h_phi, h_sigma_eta_t, prior_dhs_phi = NULL)
}
\arguments{
\item{h_yc}{the \code{T x p} matrix of centered log-volatilities
(i.e., the log-vols minus the unconditional means \code{dhs_mean})}
\item{h_phi}{the \code{p x 1} vector of previous AR(1) coefficient(s)}
\item{h_sigma_eta_t}{the \code{T x p} matrix of log-vol innovation standard deviations}
\item{prior_dhs_phi}{the parameters of the prior for the log-volatilty AR(1) coefficient \code{dhs_phi};
either \code{NULL} for uniform on [-1,1] or a 2-dimensional vector of (shape1, shape2) for a Beta prior
on \code{[(dhs_phi + 1)/2]}}
}
\value{
\code{p x 1} vector of sampled AR(1) coefficient(s)
}
\description{
Compute one draw of the AR(1) coefficient in a model with Gaussian innovations
and time-dependent innovation variances. In particular, we use the sampler for the
log-volatility AR(1) process with the parameter-expanded Polya-Gamma sampler. The sampler also applies
to a multivariate case with independent components.
}
\note{
For the standard AR(1) case, \code{p = 1}. However, the function applies more
generally for sampling \code{p > 1} independent AR(1) processes (jointly).
}
| /man/sampleAR1.Rd | no_license | drkowal/dfosr | R | false | true | 1,345 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/commonSV_source.R
\name{sampleAR1}
\alias{sampleAR1}
\title{Sample the AR(1) coefficient(s)}
\usage{
sampleAR1(h_yc, h_phi, h_sigma_eta_t, prior_dhs_phi = NULL)
}
\arguments{
\item{h_yc}{the \code{T x p} matrix of centered log-volatilities
(i.e., the log-vols minus the unconditional means \code{dhs_mean})}
\item{h_phi}{the \code{p x 1} vector of previous AR(1) coefficient(s)}
\item{h_sigma_eta_t}{the \code{T x p} matrix of log-vol innovation standard deviations}
\item{prior_dhs_phi}{the parameters of the prior for the log-volatilty AR(1) coefficient \code{dhs_phi};
either \code{NULL} for uniform on [-1,1] or a 2-dimensional vector of (shape1, shape2) for a Beta prior
on \code{[(dhs_phi + 1)/2]}}
}
\value{
\code{p x 1} vector of sampled AR(1) coefficient(s)
}
\description{
Compute one draw of the AR(1) coefficient in a model with Gaussian innovations
and time-dependent innovation variances. In particular, we use the sampler for the
log-volatility AR(1) process with the parameter-expanded Polya-Gamma sampler. The sampler also applies
to a multivariate case with independent components.
}
\note{
For the standard AR(1) case, \code{p = 1}. However, the function applies more
generally for sampling \code{p > 1} independent AR(1) processes (jointly).
}
|
\name{CTF}
\alias{CTF}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Cease-to-flow (CTF) spell statistics
}
\description{
Calculates summary statistics describing cease-to-flow spell characteristics.}
\usage{
CTF(flow.ts, threshold = 0.1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{flow.ts}{
Dataframe with date and discharge data in columns named "Date" and "Q" respectively. Date must be in POSIX format (see ts.format).}
\item{threshold}{
values below this threshold (default 0.1) are treated as zero for the purpose of defining cease to flow spells to account for the fact that cease to flow levels are poorly defined for many gauging sites.}
}
\value{
A dataframe with 5 columns (see below).
\item{p.CTF }{Fraction time cease to flows occur}
\item{avg.CTF }{Average cease-to-flow spell duration}
\item{med.CTF }{Median cease-to-flow spell duration}
\item{min.CTF }{Minimum cease-to-flow spell duration}
\item{max.CTF }{Maximum cease-to-flow spell duration}
}
\author{
Nick Bond <n.bond@griffith.edu.au>
}
\examples{
data(Cooper)
Cooper<-ts.format(Cooper)
CTF(Cooper)
CTF(Cooper, threshold=0)
}
| /man/CTF.Rd | no_license | bheudorfer/hydrostats | R | false | false | 1,169 | rd | \name{CTF}
\alias{CTF}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Cease-to-flow (CTF) spell statistics
}
\description{
Calculates summary statistics describing cease-to-flow spell characteristics.}
\usage{
CTF(flow.ts, threshold = 0.1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{flow.ts}{
Dataframe with date and discharge data in columns named "Date" and "Q" respectively. Date must be in POSIX format (see ts.format).}
\item{threshold}{
values below this threshold (default 0.1) are treated as zero for the purpose of defining cease to flow spells to account for the fact that cease to flow levels are poorly defined for many gauging sites.}
}
\value{
A dataframe with 5 columns (see below).
\item{p.CTF }{Fraction time cease to flows occur}
\item{avg.CTF }{Average cease-to-flow spell duration}
\item{med.CTF }{Median cease-to-flow spell duration}
\item{min.CTF }{Minimum cease-to-flow spell duration}
\item{max.CTF }{Maximum cease-to-flow spell duration}
}
\author{
Nick Bond <n.bond@griffith.edu.au>
}
\examples{
data(Cooper)
Cooper<-ts.format(Cooper)
CTF(Cooper)
CTF(Cooper, threshold=0)
}
|
library(randomForest);library(forestFloor)
#simulate data
X = data.frame(replicate(6,4*(runif(3000)-.5)))
Xtest = data.frame(replicate(6,4*(runif(1500)-.5)))
y = with(X,X1^2+sin(X2*2*pi)+X3*X4) + rnorm(3000)/3
ytest = with(Xtest,X1^2+sin(X2*6*pi)+X3*X4) + rnorm(3000)/3
#define boosted tree wrapper
simpleBoost = function(
X,y, #training data
M=100, #boosting iterations and ntrees
v=.1, #learning rate
...) { #other parameters passed to randomForest
y_hat = y * 0 #latest ensemble prediction
res_hat = 0 #residuals hereof...
Fx = list() #list for trees
for(m in 1:M) {
y_hat = y_hat + res_hat * v #update prediction, by learning rate
res = y - y_hat #compute residuals
hx = randomForest(X,res,ntree=1,keep.inbag=T,...) #grow tree on residuals
res_hat = predict(hx,X) #predict residuals
cat("SD=",sd(res), "\n") #print
hx$forest$nodepred = hx$forest$nodepred * v #multiply nodepredictions by learning rate
Fx[[m]] = hx #append tree to forest
}
Fx = do.call(combine,Fx) #combine trees with randomForest::combine()
Fx$y = y #append y
Fx$oob.times = apply(Fx$inbag,1,function(x) sum(!x)) #update oob.times
class(Fx) = c("simpleBoost","randomForest") #make simpleBoost a subclass of randomForest
return(Fx)
}
predict.simpleBoost = function(Fx,X) {
class(Fx) = "randomForest"
predMatrix = predict(Fx,X,predict.all = T)$individual
ntrees = dim(predMatrix)[2]
return(apply(predMatrix,1,sum))
}
plot.simpleBoost = function(Fx,X,ytest,add=F,...) { #plots learning curve
class(Fx) = "randomForest"
predMatrix = predict(Fx,X,predict.all = T)$individual
ntrees = dim(predMatrix)[2]
allPreds = apply(predMatrix,1,cumsum)
preds = apply(allPreds,1,function(pred) sd(ytest-pred))
if(add) plot=points
plot(1:ntrees,preds,...)
return()
}
#build gradient boosted forest
rb = simpleBoost(X,y,M=300,replace=F,mtry=6,sampsize=500,v=0.005)
#make forestFloor plots
ffb = forestFloor(rb,X,Xtest)
#correct for that tree votes of gradient boosts are summed, not averaged.
#forestFloor will as default divide by the same number as here multiplied with
ffb$FCmatrix = ffb$FCmatrix * c(rb$oob.times,rep(rb$ntree,sum(!ffb$isTrain)))
#plot forestFloor for OOB-CV feature contributions and regular feature contributions
plot(ffb,plotTest=T,col=fcol(ffb,3,plotTest = TRUE))
plot(ffb,plotTest=F,col=fcol(ffb,1,plotTest = FALSE))
#validate model structure
pred = predict(rb,X)
predtest = predict(rb,Xtest)
plot(y,pred,col="#00000034")
plot(rb,Xtest,ytest,log="x")
vec.plot(rb,X,i.var=1:2)
#export plot
png(file = "ffGradientBoost.png", bg = "transparent",width=800,height = 500)
plot(ffb,plotTest=T,col=fcol(ffb,1))
rect(1, 5, 3, 7, col = "white")
dev.off() | /inst/examples/ffgradientBoost.R | no_license | dejavu2010/forestFloor | R | false | false | 2,850 | r | library(randomForest);library(forestFloor)
#simulate data
X = data.frame(replicate(6,4*(runif(3000)-.5)))
Xtest = data.frame(replicate(6,4*(runif(1500)-.5)))
y = with(X,X1^2+sin(X2*2*pi)+X3*X4) + rnorm(3000)/3
ytest = with(Xtest,X1^2+sin(X2*6*pi)+X3*X4) + rnorm(3000)/3
#define boosted tree wrapper
simpleBoost = function(
X,y, #training data
M=100, #boosting iterations and ntrees
v=.1, #learning rate
...) { #other parameters passed to randomForest
y_hat = y * 0 #latest ensemble prediction
res_hat = 0 #residuals hereof...
Fx = list() #list for trees
for(m in 1:M) {
y_hat = y_hat + res_hat * v #update prediction, by learning rate
res = y - y_hat #compute residuals
hx = randomForest(X,res,ntree=1,keep.inbag=T,...) #grow tree on residuals
res_hat = predict(hx,X) #predict residuals
cat("SD=",sd(res), "\n") #print
hx$forest$nodepred = hx$forest$nodepred * v #multiply nodepredictions by learning rate
Fx[[m]] = hx #append tree to forest
}
Fx = do.call(combine,Fx) #combine trees with randomForest::combine()
Fx$y = y #append y
Fx$oob.times = apply(Fx$inbag,1,function(x) sum(!x)) #update oob.times
class(Fx) = c("simpleBoost","randomForest") #make simpleBoost a subclass of randomForest
return(Fx)
}
predict.simpleBoost = function(Fx,X) {
class(Fx) = "randomForest"
predMatrix = predict(Fx,X,predict.all = T)$individual
ntrees = dim(predMatrix)[2]
return(apply(predMatrix,1,sum))
}
plot.simpleBoost = function(Fx,X,ytest,add=F,...) { #plots learning curve
class(Fx) = "randomForest"
predMatrix = predict(Fx,X,predict.all = T)$individual
ntrees = dim(predMatrix)[2]
allPreds = apply(predMatrix,1,cumsum)
preds = apply(allPreds,1,function(pred) sd(ytest-pred))
if(add) plot=points
plot(1:ntrees,preds,...)
return()
}
#build gradient boosted forest
rb = simpleBoost(X,y,M=300,replace=F,mtry=6,sampsize=500,v=0.005)
#make forestFloor plots
ffb = forestFloor(rb,X,Xtest)
#correct for that tree votes of gradient boosts are summed, not averaged.
#forestFloor will as default divide by the same number as here multiplied with
ffb$FCmatrix = ffb$FCmatrix * c(rb$oob.times,rep(rb$ntree,sum(!ffb$isTrain)))
#plot forestFloor for OOB-CV feature contributions and regular feature contributions
plot(ffb,plotTest=T,col=fcol(ffb,3,plotTest = TRUE))
plot(ffb,plotTest=F,col=fcol(ffb,1,plotTest = FALSE))
#validate model structure
pred = predict(rb,X)
predtest = predict(rb,Xtest)
plot(y,pred,col="#00000034")
plot(rb,Xtest,ytest,log="x")
vec.plot(rb,X,i.var=1:2)
#export plot
png(file = "ffGradientBoost.png", bg = "transparent",width=800,height = 500)
plot(ffb,plotTest=T,col=fcol(ffb,1))
rect(1, 5, 3, 7, col = "white")
dev.off() |
# read column labels
col_labels <- read.table("./features.txt")
# Read training data
train_data <- read.table("./train//X_train.txt",header=F)
train_subject <- read.table("./train/subject_train.txt",header=F)
train_y <- read.table("./train//y_train.txt",header=F)
# Read test data
test_data <- read.table("./test/X_test.txt",header=F)
test_subject <- read.table("./test//subject_test.txt",header=F)
test_y <- read.table("./test//y_test.txt",header=F)
# merging both the training data and the test data
training_data <- cbind(train_subject,train_y,train_data)
testing_data <- cbind(test_subject,test_y,test_data)
full_data <- rbind(training_data,testing_data)
mean_index <- grep("mean|std",col_labels$V2)
mean_index_p_2 <- mean_index + 2
subset_data <- full_data[,c(1,2,mean_index_p_2)]
colnames(subset_data) <- c("subject","y",as.character(col_labels[mean_index,2]))
subset_data$subject <- as.factor(subset_data$subject)
activity_labels <- read.table("./activity_labels.txt",header=F)
subset_data$activity_label <- factor(subset_data$y,labels=activity_labels$V2)
require(reshape2)
melted_data <- melt(subset_data,id=c("subject","y","activity_label"),measure.vars=colnames(subset_data)[-c(1,2,82)])
casted_data <- dcast(melted_data,subject+activity_label+y~variable,mean)
write.table(casted_data,file="./tidy_data.txt",col.names=T,row.names=F,sep="\t")
| /run_analysis.R | no_license | shoaibm/peerassignment | R | false | false | 1,356 | r | # read column labels
col_labels <- read.table("./features.txt")
# Read training data
train_data <- read.table("./train//X_train.txt",header=F)
train_subject <- read.table("./train/subject_train.txt",header=F)
train_y <- read.table("./train//y_train.txt",header=F)
# Read test data
test_data <- read.table("./test/X_test.txt",header=F)
test_subject <- read.table("./test//subject_test.txt",header=F)
test_y <- read.table("./test//y_test.txt",header=F)
# merging both the training data and the test data
training_data <- cbind(train_subject,train_y,train_data)
testing_data <- cbind(test_subject,test_y,test_data)
full_data <- rbind(training_data,testing_data)
mean_index <- grep("mean|std",col_labels$V2)
mean_index_p_2 <- mean_index + 2
subset_data <- full_data[,c(1,2,mean_index_p_2)]
colnames(subset_data) <- c("subject","y",as.character(col_labels[mean_index,2]))
subset_data$subject <- as.factor(subset_data$subject)
activity_labels <- read.table("./activity_labels.txt",header=F)
subset_data$activity_label <- factor(subset_data$y,labels=activity_labels$V2)
require(reshape2)
melted_data <- melt(subset_data,id=c("subject","y","activity_label"),measure.vars=colnames(subset_data)[-c(1,2,82)])
casted_data <- dcast(melted_data,subject+activity_label+y~variable,mean)
write.table(casted_data,file="./tidy_data.txt",col.names=T,row.names=F,sep="\t")
|
context("test status")
source("constants.R")
# real tests
testthat::test_that("test status API call", {
testthat::skip_if(SKIP_REAL_JPREDAPI, "Skipping tests that hit the real JPred API server.")
submit_response <- jpredapir::submit(mode="single", user_format="raw", seq="MQVWPIEGIKKFETLSYLPP")
result_url <- httr::headers(submit_response)$location
jobid <- stringr::str_match(string = result_url, pattern = "(jp_.*)$")[2]
status_response <- jpredapir::status(jobid = jobid, results_dir_path = NULL, extract = FALSE, silent = FALSE, host = HOST, jpred4 = JPRED4)
testthat::expect_equal(status_response$status_code, 200)
})
testthat::test_that("test status API call", {
testthat::skip_if(SKIP_REAL_JPREDAPI, "Skipping tests that hit the real JPred API server.")
submit_response <- jpredapir::submit(mode="single", user_format="raw", seq="MQVWPIEGIKKFETLSYLPP")
result_url <- httr::headers(submit_response)$location
jobid <- stringr::str_match(string = result_url, pattern = "(jp_.*)$")[2]
status_response <- jpredapir::status(jobid = jobid, results_dir_path = "jpred_results", extract = FALSE, silent = FALSE, host = HOST, jpred4 = JPRED4)
testthat::expect_equal(status_response$status_code, 200)
})
testthat::test_that("test status API call", {
testthat::skip_if(SKIP_REAL_JPREDAPI, "Skipping tests that hit the real JPred API server.")
submit_response <- jpredapir::submit(mode="single", user_format="raw", seq="MQVWPIEGIKKFETLSYLPP")
result_url <- httr::headers(submit_response)$location
jobid <- stringr::str_match(string = result_url, pattern = "(jp_.*)$")[2]
status_response <- jpredapir::status(jobid = jobid, results_dir_path = "jpred_results", extract = TRUE, silent = FALSE, host = HOST, jpred4 = JPRED4)
testthat::expect_equal(status_response$status_code, 200)
})
# mock tests
testthat::test_that("test mock status API call", {
testthat::with_mock(
status = function(jobid, results_dir_path, extract, silent, host, jpred4) {return(list(success = TRUE, status_code = 200))},
status_response <- status(jobid = "jp_mock", results_dir_path = NULL, extract = FALSE, silent = FALSE, host = HOST, jpred4 = JPRED4),
testthat::expect_equal(status_response$status_code, 200))
})
testthat::test_that("test mock status API call", {
testthat::with_mock(
status = function(jobid, results_dir_path, extract, silent, host, jpred4) {return(list(success = TRUE, status_code = 200))},
status_response <- status(jobid = "jp_mock", results_dir_path = "jpred_results", extract = FALSE, silent = FALSE, host = HOST, jpred4 = JPRED4),
testthat::expect_equal(status_response$status_code, 200))
})
testthat::test_that("test mock status API call", {
testthat::with_mock(
status = function(jobid, results_dir_path, extract, silent, host, jpred4) {return(list(success = TRUE, status_code = 200))},
status_response <- status(jobid = "jp_mock", results_dir_path = "jpred_results", extract = TRUE, silent = FALSE, host = HOST, jpred4 = JPRED4),
testthat::expect_equal(status_response$status_code, 200))
})
| /tests/testthat/test_status.R | permissive | MoseleyBioinformaticsLab/jpredapir | R | false | false | 3,124 | r | context("test status")
source("constants.R")
# real tests
testthat::test_that("test status API call", {
testthat::skip_if(SKIP_REAL_JPREDAPI, "Skipping tests that hit the real JPred API server.")
submit_response <- jpredapir::submit(mode="single", user_format="raw", seq="MQVWPIEGIKKFETLSYLPP")
result_url <- httr::headers(submit_response)$location
jobid <- stringr::str_match(string = result_url, pattern = "(jp_.*)$")[2]
status_response <- jpredapir::status(jobid = jobid, results_dir_path = NULL, extract = FALSE, silent = FALSE, host = HOST, jpred4 = JPRED4)
testthat::expect_equal(status_response$status_code, 200)
})
testthat::test_that("test status API call", {
testthat::skip_if(SKIP_REAL_JPREDAPI, "Skipping tests that hit the real JPred API server.")
submit_response <- jpredapir::submit(mode="single", user_format="raw", seq="MQVWPIEGIKKFETLSYLPP")
result_url <- httr::headers(submit_response)$location
jobid <- stringr::str_match(string = result_url, pattern = "(jp_.*)$")[2]
status_response <- jpredapir::status(jobid = jobid, results_dir_path = "jpred_results", extract = FALSE, silent = FALSE, host = HOST, jpred4 = JPRED4)
testthat::expect_equal(status_response$status_code, 200)
})
testthat::test_that("test status API call", {
testthat::skip_if(SKIP_REAL_JPREDAPI, "Skipping tests that hit the real JPred API server.")
submit_response <- jpredapir::submit(mode="single", user_format="raw", seq="MQVWPIEGIKKFETLSYLPP")
result_url <- httr::headers(submit_response)$location
jobid <- stringr::str_match(string = result_url, pattern = "(jp_.*)$")[2]
status_response <- jpredapir::status(jobid = jobid, results_dir_path = "jpred_results", extract = TRUE, silent = FALSE, host = HOST, jpred4 = JPRED4)
testthat::expect_equal(status_response$status_code, 200)
})
# mock tests
testthat::test_that("test mock status API call", {
testthat::with_mock(
status = function(jobid, results_dir_path, extract, silent, host, jpred4) {return(list(success = TRUE, status_code = 200))},
status_response <- status(jobid = "jp_mock", results_dir_path = NULL, extract = FALSE, silent = FALSE, host = HOST, jpred4 = JPRED4),
testthat::expect_equal(status_response$status_code, 200))
})
testthat::test_that("test mock status API call", {
testthat::with_mock(
status = function(jobid, results_dir_path, extract, silent, host, jpred4) {return(list(success = TRUE, status_code = 200))},
status_response <- status(jobid = "jp_mock", results_dir_path = "jpred_results", extract = FALSE, silent = FALSE, host = HOST, jpred4 = JPRED4),
testthat::expect_equal(status_response$status_code, 200))
})
testthat::test_that("test mock status API call", {
testthat::with_mock(
status = function(jobid, results_dir_path, extract, silent, host, jpred4) {return(list(success = TRUE, status_code = 200))},
status_response <- status(jobid = "jp_mock", results_dir_path = "jpred_results", extract = TRUE, silent = FALSE, host = HOST, jpred4 = JPRED4),
testthat::expect_equal(status_response$status_code, 200))
})
|
library(cmdstanr)
library(LambertW)
# normal h transform -----------------------------------------------------------
N <- 100
mu <- 0
sigma <- 1
y <- LambertW::rLambertW(N, "normal", theta=list(beta=c(mu, sigma), gamma=0.1, alpha=1, delta=c(0,0)))
fp <- file.path(paste(getwd(), "/week37/lambertw_normal_h.stan", sep=""))
mod <- cmdstan_model(fp, force_recompile = F)
mod_out <- mod$sample(data=list(N=N, y=y, mu=mu, sigma=sigma), parallel_chains=4)
mod_out$summary()
| /test_many_models.R | no_license | SteveBronder/gsoc | R | false | false | 473 | r | library(cmdstanr)
library(LambertW)
# normal h transform -----------------------------------------------------------
N <- 100
mu <- 0
sigma <- 1
y <- LambertW::rLambertW(N, "normal", theta=list(beta=c(mu, sigma), gamma=0.1, alpha=1, delta=c(0,0)))
fp <- file.path(paste(getwd(), "/week37/lambertw_normal_h.stan", sep=""))
mod <- cmdstan_model(fp, force_recompile = F)
mod_out <- mod$sample(data=list(N=N, y=y, mu=mu, sigma=sigma), parallel_chains=4)
mod_out$summary()
|
\name{hist.massvector}
\alias{hist.massvector}
\title{ Histograms}
\description{ Histograms }
\usage{\method{hist}{massvector}(x,accur = 0.1,abund = 0, main=info(x) ,xlab="m/z",xlim=c(min(mass(x)),max(mass(x))),add=FALSE,col=1,...)}
\arguments{
\item{...}{ further plotting arguments.}
\item{abund}{ draws a horizontal line at the frequency given by abund.}
\item{accur}{ sets the bin width of the histogramm.}
\item{add}{ T-adds the histogram to an existing image.}
\item{col}{ the color of the histogram.}
\item{main}{}
\item{x}{}
\item{xlab}{ sets the xlabels.}
\item{xlim}{ sets the min and max value to be displayed.}
}
\author{Witold Wolski \email{wolski@molgen.mpg.de}}
\seealso{\code{\link{hist}},
}
\examples{
data(mv1)
hist(mv1)
}
\keyword{misc}
| /man/hist.massvector.Rd | no_license | cran/mscalib | R | false | false | 782 | rd | \name{hist.massvector}
\alias{hist.massvector}
\title{ Histograms}
\description{ Histograms }
\usage{\method{hist}{massvector}(x,accur = 0.1,abund = 0, main=info(x) ,xlab="m/z",xlim=c(min(mass(x)),max(mass(x))),add=FALSE,col=1,...)}
\arguments{
\item{...}{ further plotting arguments.}
\item{abund}{ draws a horizontal line at the frequency given by abund.}
\item{accur}{ sets the bin width of the histogramm.}
\item{add}{ T-adds the histogram to an existing image.}
\item{col}{ the color of the histogram.}
\item{main}{}
\item{x}{}
\item{xlab}{ sets the xlabels.}
\item{xlim}{ sets the min and max value to be displayed.}
}
\author{Witold Wolski \email{wolski@molgen.mpg.de}}
\seealso{\code{\link{hist}},
}
\examples{
data(mv1)
hist(mv1)
}
\keyword{misc}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.