content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
###### Load packages ######
# Load necessary packages for single cell RNA-Seq analysis including packages for downstream Gene Ontology Analysis
suppressPackageStartupMessages({
library(devtools)
library(stringr)
library(scales)
library(dtw)
library(monocle)
library(reshape2)
library(GSA)
library(limma)
library(DBI)
library(MASS)
library(plyr)
library(dplyr)
library(tidyr)
library(matrixStats)
library(cluster)
library(pheatmap)
library(grid)
library(RColorBrewer)
library(viridis)
library(ggrepel)})
##### Load and define necessary functions #####
source("Pseudospace_support_functions.R")
preprocess_cds <- function(cds){
cds <- detectGenes(cds, min_expr = 0.1)
cds <- estimateSizeFactors(cds)
cds <- estimateDispersions(cds)
return(cds)
}
getPseudospaceTrajectory <- function(cds, sig_genes){
cds <- setOrderingFilter(cds, sig_genes)
cds <- reduceDimension(cds, max_components = 2, norm_method = "log")
cds <- orderCells(cds, reverse = FALSE)
return(cds)
}
## Need to update function in pseudospace_support_functions to specify which columns of pData to keep after alignment
getDTWcds <- function(query_cds, ref_cds, ref, query, expressed_genes, cores = 1){
alignment_genes <- intersect(row.names(subset(fData(ref_cds), use_for_ordering)),
row.names(subset(fData(query_cds), use_for_ordering)))
ref_align_cds <- ref_cds[alignment_genes]
query_align_cds <- query_cds[alignment_genes]
### Set a consistent Pseudospace between both ordering sets
message("Normalizing pseudospace for each sample")
pData(ref_align_cds)$cell_id <- row.names(pData(ref_align_cds))
pData(ref_align_cds)$Pseudotime <- 100 * pData(ref_align_cds)$Pseudotime / max(pData(ref_align_cds)$Pseudotime)
ref_align_cds <- ref_align_cds[alignment_genes,as.character(arrange(pData(ref_align_cds), Pseudotime)$cell_id)]
pData(query_align_cds)$cell_id <- row.names(pData(query_align_cds))
pData(query_align_cds)$Pseudotime <- 100 * pData(query_align_cds)$Pseudotime / max(pData(query_align_cds)$Pseudotime)
query_align_cds <- query_align_cds[alignment_genes,as.character(arrange(pData(query_align_cds), Pseudotime)$cell_id)]
# Fits a smoothed curve to alignment genes accross Pseudotime
message("Fitting smooth curves across pseudospace")
#closeAllConnections()
smoothed_ref_exprs <- genSmoothCurves(ref_align_cds[alignment_genes], data.frame(Pseudotime=seq(0,100, by=1)), cores= cores)
smoothed_ref_exprs <- smoothed_ref_exprs[rowSums(is.na(smoothed_ref_exprs)) == 0,]
vst_smoothed_ref_exprs <- vstExprs(ref_cds, expr_matrix=smoothed_ref_exprs)
#closeAllConnections()
smoothed_query_exprs <- genSmoothCurves(query_align_cds[alignment_genes], data.frame(Pseudotime=seq(0,100, by=1)), cores= cores)
smoothed_query_exprs <- smoothed_query_exprs[rowSums(is.na(smoothed_query_exprs)) == 0,]
vst_smoothed_query_exprs <- vstExprs(query_cds, expr_matrix=smoothed_query_exprs)
alignment_genes <- intersect(row.names(vst_smoothed_ref_exprs), row.names(vst_smoothed_query_exprs))
ref_matrix <- t(scale(t(vst_smoothed_ref_exprs[alignment_genes,])))
query_matrix <- t(scale(t(vst_smoothed_query_exprs[alignment_genes,])))
message("Aligning pseudopsatial trajectories with dynamic time warping")
ref_query_dtw <- align_cells(ref_matrix, query_matrix, step_pattern=rabinerJuangStepPattern(3, "c"), open.begin=F, open.end=F)
message("Warping pseudospace")
align_res <- warp_pseudotime(ref_align_cds, query_align_cds, ref_query_dtw)
query_ref_aligned <- align_res$query_cds
pData(query_ref_aligned)$Pseudotime <- pData(query_ref_aligned)$Alignment_Pseudotime
ref_aligned_cell_ids <- setdiff(row.names(pData(ref_align_cds)), "duplicate_root")
query_aligned_cell_ids <- setdiff(row.names(pData(query_align_cds)), "duplicate_root")
combined_exprs <- cBind(Biobase::exprs(query_cds[expressed_genes,query_aligned_cell_ids]),
Biobase::exprs(ref_cds[expressed_genes,ref_aligned_cell_ids]))
pData_ref <- pData(ref_align_cds)[,c("gene","all_gene","barcode","proportion","guide_count","condition","treatment", "position", "Pseudotime")]
pData_ref$Cell.Type <- ref
pData_query_aligned <- pData(query_ref_aligned)[,c("gene","all_gene","barcode","proportion","guide_count","condition","treatment", "position", "Pseudotime")]
pData_query_aligned$Cell.Type <- query
combined_pData <- rbind(pData_query_aligned, pData_ref)
combined_pData <- combined_pData[colnames(combined_exprs),]
combined_pd <- new("AnnotatedDataFrame", data = combined_pData)
fd <- new("AnnotatedDataFrame", data = fData(ref_cds)[row.names(combined_exprs),1:2])
message("Creating a new cds object with a common pseudospatial axes")
ref_queryToRef_combined_cds <- newCellDataSet(combined_exprs,
phenoData = combined_pd,
featureData = fd,
expressionFamily=negbinomial.size(),
lowerDetectionLimit=1)
pData(ref_queryToRef_combined_cds)$cell_id <- row.names(pData(ref_queryToRef_combined_cds))
return(ref_queryToRef_combined_cds)
}
# Expectation maximiation model t0 correct for different efficiencies across sgRNAs
get.guide.weights = function(mat, ntc.dist, n.iterations = 30) {
n.guides = nrow(mat)
n.cells = rowSums(mat)
empirical.dist = sweep(mat, 1, n.cells, "/")
lof.prop = rep(0.5, n.guides)
expected.n.lof = n.cells * lof.prop
for (i in 1:n.iterations) {
lof.dist = sapply(1:n.guides, function(guide) {
p = lof.prop[guide]
(empirical.dist[guide,] - (1-p) * ntc.dist) / p
})
lof.dist = rowSums(sweep(lof.dist, 2, expected.n.lof / sum(expected.n.lof), "*"))
lof.dist = ifelse(lof.dist < 0, 0, lof.dist)
lof.dist = lof.dist / sum(lof.dist)
lof.prop = sapply(1:n.guides, function(guide) {
optimize(function(p) dmultinom(mat[guide,], prob = p * lof.dist + (1-p) * ntc.dist, log = T),
c(0.0, 1.0), maximum = T)$maximum
})
expected.n.lof = n.cells * lof.prop
}
return(lof.prop)
}
calculate_ntc_empirical_fdr <- function(cds, iterations){
chisq_qval.list <- list()
median_NTC.list <- list()
median_NTC.list[["Mock"]] <- median((pData(cds.aligned.list[["Mock"]]) %>%
group_by(gene) %>%
summarize(n = n()))$n)
median_NTC.list[["TGFB"]] <- median((pData(cds.aligned.list[["TGFB"]]) %>%
group_by(gene) %>%
summarize(n = n()))$n)
NTC_cell_subset.list <- list()
for(sample in names(cds.aligned.list)){
NTC_cell_subset.list[[sample]] <- row.names(subset(pData(cds.aligned.list[[sample]]),
gene == "NONTARGETING"))
}
for(i in 1:iterations){
if(i %in% c(1,10,100,250,500,750,100)){message(paste0("Iteration ", i," of ",as.character(iterations)))}
cds_list <- cds
random_NTC_subset.list <- list()
for(sample in names(cds_list)){
set.seed(i)
random_NTC_subset.list[[sample]] <- sample(NTC_cell_subset.list[[sample]], 50, replace = FALSE)
}
new_gene_assignments.list <- list()
for(sample in names(cds_list)){
new_gene_assignments.list[[sample]] <- sapply(pData(cds_list[[sample]])$cell,function(x){
if(x %in% random_NTC_subset.list[[sample]]) return("NTC_decoy")
return(pData(cds_list[[sample]])[x,]$gene)
})
}
pData(cds_list[["Mock"]])$gene <- new_gene_assignments.list[["Mock"]]
pData(cds_list[["TGFB"]])$gene <- new_gene_assignments.list[["TGFB"]]
analysis.targets = list()
analysis.targets[["Mock"]] = as.data.frame(pData(cds_list[["Mock"]]) %>% filter(gene != "NONTARGETING") %>%
group_by(gene) %>% summarize(
n.cells = n(),
n.guides = length(intersect(unique(barcode), analysis.guides[["Mock"]]))) %>%
filter(n.cells >= 15, n.guides >= 1) %>% dplyr::select(gene))[,1]
analysis.targets[["TGFB"]] = as.data.frame(pData(cds_list[["TGFB"]]) %>% filter(gene != "NONTARGETING") %>%
group_by(gene) %>% summarize(
n.cells = n(),
n.guides = length(intersect(unique(barcode), analysis.guides[["TGFB"]]))) %>%
filter(n.cells >= 15, n.guides >= 1) %>% dplyr::select(gene))[,1]
target.to.guide.map <- list()
for (target in analysis.targets[["Mock"]]) {
target.to.guide.map[["Mock"]][[target]] =
sort(unique(as.data.frame(pData(cds_list[["Mock"]]) %>%
filter(gene == target, barcode %in% analysis.guides[["Mock"]]) %>%
dplyr::select(barcode))[, 1]))
}
for (target in analysis.targets[["TGFB"]]) {
target.to.guide.map[["TGFB"]][[target]] =
sort(unique(as.data.frame(pData(cds_list[["TGFB"]]) %>%
filter(gene == target, barcode %in% analysis.guides[["TGFB"]]) %>%
dplyr::select(barcode))[, 1]))
}
guide.to.target.map = list()
for(sample in names(cds_list)){
guide.to.target.map[[sample]] = list()
for (target in analysis.targets[[sample]]) {
for (guide in target.to.guide.map[[sample]][[target]]) {
guide.to.target.map[[sample]][[guide]] = target
}
}
}
target.region.mat = list()
target.region.mat[["Mock"]] = acast(
pData(cds_list[["Mock"]]) %>%
filter(barcode %in% analysis.guides[["Mock"]] | gene == "NONTARGETING") %>%
mutate(dummy = 1) %>% dplyr::select(gene, region, dummy),
gene ~ region, value.var = "dummy", fun.aggregate = sum, fill = 0)
target.region.mat[["TGFB"]] = acast(
pData(cds_list[["TGFB"]]) %>%
filter(barcode %in% analysis.guides[["TGFB"]] | gene == "NONTARGETING") %>%
mutate(dummy = 1) %>% dplyr::select(gene, region, dummy),
gene ~ region, value.var = "dummy", fun.aggregate = sum, fill = 0)
NTC_decoy.region.mat <- list()
NTC_decoy.region.mat[["Mock"]] <- matrix(target.region.mat[["Mock"]][row.names(target.region.mat[["Mock"]]) == "NTC_decoy",],
nrow = 1)
row.names(NTC_decoy.region.mat[["Mock"]]) <- "NTC_decoy"
NTC_decoy.region.mat[["TGFB"]] <- matrix(target.region.mat[["TGFB"]][row.names(target.region.mat[["TGFB"]]) == "NTC_decoy",],
nrow = 1)
row.names(NTC_decoy.region.mat[["TGFB"]]) <- "NTC_decoy"
weighted.target.region.mat[["Mock"]] <- rbind(weighted.target.region.mat[["Mock"]],
NTC_decoy.region.mat[["Mock"]])
weighted.target.region.mat[["TGFB"]] <- rbind(weighted.target.region.mat[["TGFB"]],
NTC_decoy.region.mat[["TGFB"]])
NTC.region.p = list()
for(sample in names(cds_list)){
pData(cds_list[[sample]])$gene <- as.factor(pData(cds_list[[sample]])$gene)
pData(cds_list[[sample]])$region <- as.factor(pData(cds_list[[sample]])$region)
NTC.region.p[[sample]] <- pData(cds_list[[sample]]) %>%
group_by(gene, region) %>%
summarize(n = n()) %>%
tidyr::complete(region, fill = list(n = 0.1)) %>%
filter(gene == "NONTARGETING")
}
ntc.distribution = list()
for(sample in names(cds_list)){
ntc.distribution[[sample]] = weighted.target.region.mat[[sample]]["NONTARGETING",]
ntc.distribution[[sample]] = ntc.distribution[[sample]] / sum(ntc.distribution[[sample]])
}
initial.target.level.chisq.pval = list()
for(sample in names(cds_list)){
set.seed(42)
initial.target.level.chisq.pval[[sample]] = sapply(
analysis.targets[[sample]], function(target) {
suppressWarnings({chisq.test(
weighted.target.region.mat[[sample]][target,],
p = NTC.region.p[[sample]]$n,
simulate.p.value = F, rescale.p = T, B = 1000)$p.value})
})
}
initial.target.level.chisq.qval <- list()
for(sample in names(cds_list)){
initial.target.level.chisq.qval[[sample]] <- p.adjust(initial.target.level.chisq.pval[[sample]], method = "BH")
initial.target.level.chisq.qval[[sample]] <- sapply(initial.target.level.chisq.qval[[sample]],
function(x){if(x < 1e-50){return(1e-50)}else{return(x)}})
}
chisq_qval.list[[i]] <- initial.target.level.chisq.qval
}
return(chisq_qval.list)
}
#### Load data ####
Pseudospace_lof_cds <- readRDS("CROPseq_pseudospace_cds.rds")
# Create a cds subset for each stimulation condition that contains spatially isolated cells
cds.list <- list()
cds.list[["Mock"]] <- Pseudospace_lof_cds[,!is.na(pData(Pseudospace_lof_cds)$proportion) &
pData(Pseudospace_lof_cds)$guide_count == 1 &
pData(Pseudospace_lof_cds)$treatment == "mock"]
cds.list[["TGFB"]] <- Pseudospace_lof_cds[,!is.na(pData(Pseudospace_lof_cds)$proportion) &
pData(Pseudospace_lof_cds)$guide_count == 1 &
pData(Pseudospace_lof_cds)$treatment == "tgfb"]
for(sample in names(cds.list)){
print(pData(cds.list[[sample]]) %>%
group_by(sample) %>% summarize(n = n()))
}
# Identify genes that are expressed in at least 50 of cells
expressed_genes.list <- list()
expressed_genes.list[["Mock"]] <- row.names(fData(cds.list[["Mock"]])[Matrix::rowSums(Biobase::exprs(cds.list[["Mock"]]) > 0) > 50 ,])
length(expressed_genes.list[["Mock"]])
expressed_genes.list[["TGFB"]] <- row.names(fData(cds.list[["TGFB"]])[Matrix::rowSums(Biobase::exprs(cds.list[["TGFB"]]) > 0) > 50 ,])
length(expressed_genes.list[["TGFB"]])
for(sample in names(cds.list)) {
cds.list[[sample]] <- preprocess_cds(cds.list[[sample]])
}
# Identify genes that vary significantly between inner and outer CROPseq cell fractions
Spatial.DEG.test.list <- list()
for(sample in names(cds.list)){
Spatial.DEG.test.list[[sample]] <- differentialGeneTest(cds.list[[sample]][expressed_genes.list[[sample]]],
fullModelFormulaStr = "~position",
reducedModelFormulaStr = "~1",
cores = 1)
}
# Calculate fold change in expression levels of significant genes between CROPseq cell fractions isolated by space
for(sample in names(Spatial.DEG.test.list)){
diff_test_genes <- row.names(Spatial.DEG.test.list[[sample]])
diff_cds <- cds.list[[sample]][diff_test_genes]
diff_FC <- diff_foldChange(diff_cds, "position","inner")
Spatial.DEG.test.list[[sample]]$log2_foldChange <- diff_FC$log2FC_outer
rm(diff_test_genes,diff_cds,diff_FC)
}
Spatial_sig_genes.list <- list()
for(sample in names(Spatial.DEG.test.list)){
Spatial_sig_genes.list[[sample]] <- row.names(subset(Spatial.DEG.test.list[[sample]], qval <= 1e-6 &
abs(log2_foldChange) >= 1))
print(length(Spatial_sig_genes.list[[sample]]))
}
# Create pseudospatial trajectories and examine the distribution of inner and outer cells within them
for(sample in names(cds.list)){
cds.list[[sample]] <- getPseudospaceTrajectory(cds.list[[sample]],
Spatial_sig_genes.list[[sample]])
}
cds.list[["Mock"]] <- orderCells(cds.list[["Mock"]], reverse = F)
cds.list[["TGFB"]] <- orderCells(cds.list[["TGFB"]], reverse = F)
plot_cell_trajectory(cds.list[["Mock"]], color_by = "position",show_branch_points = FALSE) +
theme(legend.position="top", text=element_text(size=20), legend.direction = "vertical") +
scale_color_manual(labels = c("inner colony", "outer colony"), values = c("#0075F2", "#D62828"),
name = "Spatial Context") +
ggsave(file = "MCF10A_Mock_loss_of_function_PseudospatialTrajectory.png", height = 6, width = 6)
ggplot(pData(cds.list[["Mock"]]), aes(x = Pseudotime, fill = position, color = position)) +
geom_density() +
facet_wrap(~position, ncol = 1) +
theme_classic() +
scale_color_manual("Spatial Context", labels = c("inner colony", "outer colony"),
values = c("#000000","#000000")) +
scale_fill_manual("Spatial Context", labels = c("inner colony", "outer colony")
, values = c("#0075F2","#D62828")) +
xlab("Pseudospace") +
ylab("Cell density") +
monocle:::monocle_theme_opts() +
theme(legend.position = "top", legend.direction = "vertical", text=element_text(size=20)) +
ggsave("Mock_loss_of_function_cell_density_accross_pseudospace_geom_density.png", height = 6, width = 5)
plot_cell_trajectory(cds.list[["TGFB"]], color_by = "position",show_branch_points = FALSE) +
theme(legend.position="top", text=element_text(size=20), legend.direction = "vertical") +
scale_color_manual(labels = c("inner colony", "outer colony"), values = c("#70163C", "#38726C"),
name = "Spatial Context") +
ggsave(file = "MCF10A_TGFB_loss_of_function_PseudospatialTrajectory.png", height = 6, width = 6)
ggplot(pData(cds.list[["TGFB"]]), aes(x = Pseudotime, fill = position, color = position)) +
geom_density() +
facet_wrap(~position, ncol = 1) +
theme_classic() +
scale_color_manual("Spatial Context", labels = c("inner colony", "outer colony"),
values = c("#000000","#000000")) +
scale_fill_manual("Spatial Context", labels = c("inner colony", "outer colony")
, values = c("#70163C", "#38726C")) +
xlab("Pseudospace") +
ylab("Cell density") +
monocle:::monocle_theme_opts() +
theme(legend.position = "top", legend.direction = "vertical", text=element_text(size=20)) +
ggsave("TGFB_loss_of_function_cell_density_accross_pseudospace_geom_density.png", height = 6, width = 5)
plot_genes_in_pseudotime(cds.list[["Mock"]][fData(cds.list[["Mock"]])$gene_short_name == "CDH1",],
color_by = "position", min_expr = 0.1) +
theme(text=element_text(size=20)) +
scale_color_manual(labels = c("inner colony", "outer colony"), values = c("#0075F2", "#D62828"),
name = "Spatial Context")
plot_genes_in_pseudotime(cds.list[["TGFB"]][fData(cds.list[["TGFB"]])$gene_short_name == "CDH1",],
color_by = "position", min_expr = 0.1)+
theme(text=element_text(size=20)) +
scale_color_manual(labels = c("inner colony", "outer colony"), values = c("#38726C", "#70163C"),
name = "Spatial Context")
expressed_genes <- unique(union(expressed_genes.list[["Mock"]],expressed_genes.list[["TGFB"]]))
# Plot the expression of known EMT markers across pseudospace
Mock_Figure1_Mar <- cds.list[["Mock"]][row.names(subset(fData(cds.list[["Mock"]]), gene_short_name %in%
c("CDH1","CRB3","DSP", "CDH2","FN1","VIM"))),]
plot_genes_in_pseudotime(Mock_Figure1_Mar, color_by = "spatial_id", ncol = 2, min_expr = 0.1,
panel_order = c("CDH1","CDH2","CRB3","FN1","DSP","VIM")) +
xlab("Pseudospace") +
theme(legend.position = "none",text=element_text(size=20)) +
scale_color_manual(values = c("inner" = "#0075F2","outer"="#D62828")) +
ggsave("MCF10A_Mock_CFG_Figure1Markers_byPseudospace.png", width = 6, height =5)
TGFB_Figure1_Mar <- cds.list[["TGFB"]][row.names(subset(fData(cds.list[["TGFB"]]), gene_short_name %in%
c("CDH1","CRB3","DSP", "CDH2","FN1","VIM"))),]
plot_genes_in_pseudotime(TGFB_Figure1_Mar, color_by = "spatial_id", ncol = 2, min_expr = 0.1,
panel_order = c("CDH1","CDH2","CRB3","FN1","DSP","VIM")) +
xlab("Pseudospace") +
theme(legend.position = "none",text=element_text(size=20)) +
scale_color_manual(values = c("inner" = "#70163C","outer"="#38726C")) +
ggsave("MCF10A_TGFB_CFG_Figure1Markers_byPseudospace.png", width = 6, height =5)
# Use dynamic time warping to align Mock and TGFB pseudospatial trajectories and create a cds object of aligned trajectories
TGFB.to.Mock.CFG.aligned.cds <- getDTWcds(cds.list[["TGFB"]],cds.list[["Mock"]],
ref = "Mock", query = "TGFB",
expressed_genes = expressed_genes, cores = 1)
TGFB.to.Mock.CFG.aligned.cds <- estimateSizeFactors(TGFB.to.Mock.CFG.aligned.cds)
# Divide the aligned cds by treatment to test accumulation of knockouts along pseudospace independently
cds.aligned.list <- list()
cds.aligned.list[["Mock"]] <- TGFB.to.Mock.CFG.aligned.cds[,pData(TGFB.to.Mock.CFG.aligned.cds)$Cell.Type == "Mock"]
cds.aligned.list[["TGFB"]] <- TGFB.to.Mock.CFG.aligned.cds[,pData(TGFB.to.Mock.CFG.aligned.cds)$Cell.Type == "TGFB"]
for(sample in names(cds.aligned.list)){
cds.aligned.list[[sample]] <- preprocess_cds(cds.aligned.list[[sample]])
}
for(sample in names(cds.aligned.list)){
cds.aligned.list[[sample]]@reducedDimA <- t(as.matrix(pData(cds.aligned.list[[sample]])$Pseudotime))
colnames(cds.aligned.list[[sample]]@reducedDimA) <- row.names(pData(cds.aligned.list[[sample]]))
}
for(sample in names(cds.aligned.list)){
cds.aligned.list[[sample]] <- clusterCells(cds.aligned.list[[sample]], method = "densityPeak")
}
plot_rho_delta(cds.aligned.list[["Mock"]],rho_threshold = 50, delta_threshold = 5) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 50)) +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10))
plot_rho_delta(cds.aligned.list[["TGFB"]], rho_threshold = 10, delta_threshold = 5) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 50)) +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10))
rho_delta.list <- list()
rho_delta.list[["Mock"]] <- c(50,5)
rho_delta.list[["TGFB"]] <- c(10,5)
for(sample in names(cds.aligned.list)){
cds.aligned.list[[sample]] <- clusterCells(cds.aligned.list[[sample]], method = "densityPeak",
verbose = T,
rho_threshold = rho_delta.list[[sample]][1],
delta_threshold = rho_delta.list[[sample]][2],
skip_rho_sigma = T)
}
ggplot(pData(cds.aligned.list[["Mock"]]), aes(x = Pseudotime, fill = Cluster)) +
geom_density() + monocle:::monocle_theme_opts()
ggplot(pData(cds.aligned.list[["TGFB"]]), aes(x = Pseudotime, fill = Cluster)) +
geom_density() + monocle:::monocle_theme_opts()
# Re-order regions to be in order from low to high pseudospace
region.list <- list()
region.list[["Mock"]] <- sapply(pData(cds.aligned.list[["Mock"]])$Cluster, function(x){
if(x == "3")return("1")
if(x == "6")return("2")
if(x == "7")return("3")
if(x == "1")return("4")
if(x == "4")return("5")
if(x == "5")return("6")
if(x == "2")return("7")
})
region.list[["TGFB"]] <- sapply(pData(cds.aligned.list[["TGFB"]])$Cluster, function(x){
if(x == "3")return("1")
if(x == "7")return("2")
if(x == "8")return("3")
if(x == "6")return("4")
if(x == "4")return("5")
if(x == "1")return("6")
if(x == "5")return("7")
if(x == "2")return("8")
})
for(sample in names(cds.aligned.list)){
pData(cds.aligned.list[[sample]])$region <- region.list[[sample]]
}
ggplot(pData(cds.aligned.list[["Mock"]]), aes(x = Pseudotime, fill = region)) +
geom_density() + monocle:::monocle_theme_opts()
ggplot(pData(cds.aligned.list[["TGFB"]]), aes(x = Pseudotime, fill = region)) +
geom_density() + monocle:::monocle_theme_opts()
mock_regions <- sapply(row.names(pData(cds.list[["Mock"]])), function(x){
return(pData(cds.aligned.list[["Mock"]])[x,]$region)
})
tgfb_regions <- sapply(row.names(pData(cds.list[["TGFB"]])), function(x){
return(pData(cds.aligned.list[["TGFB"]])[x,]$region)
})
pData(cds.list[["Mock"]])$region <- mock_regions
pData(cds.list[["TGFB"]])$region <- tgfb_regions
analysis.guides = list()
for(sample in names(cds.aligned.list)){
pData(cds.aligned.list[[sample]]) %>% filter(guide_count == 1) %>% group_by(gene, barcode) %>%
summarize(n.guide.cells = n()) %>% group_by(gene) %>% mutate(n.target.cells = sum(n.guide.cells)) %>%
filter(n.guide.cells >= 10) %>% ungroup() %>%
arrange(-n.target.cells, -n.guide.cells) %>% head(10)
analysis.guides[[sample]] =
(pData(cds.aligned.list[[sample]]) %>% filter(guide_count == 1) %>% group_by(gene, barcode) %>%
summarize(n.guide.cells = n()) %>% group_by(gene) %>% mutate(n.target.cells = sum(n.guide.cells)) %>%
filter(n.guide.cells >= 10) %>% ungroup())$barcode
}
analysis.targets = list()
analysis.targets[["Mock"]] = as.data.frame(pData(cds.aligned.list[["Mock"]]) %>% filter(gene != "NONTARGETING") %>%
group_by(gene) %>% summarize(
n.cells = n(),
n.guides = length(intersect(unique(barcode), analysis.guides[["Mock"]]))) %>%
filter(n.cells >= 15, n.guides >= 1) %>% dplyr::select(gene))[,1]
analysis.targets[["TGFB"]] = as.data.frame(pData(cds.aligned.list[["TGFB"]]) %>% filter(gene != "NONTARGETING") %>%
group_by(gene) %>% summarize(
n.cells = n(),
n.guides = length(intersect(unique(barcode), analysis.guides[["TGFB"]]))) %>%
filter(n.cells >= 15, n.guides >= 1) %>% dplyr::select(gene))[,1]
target.region.mat = list()
target.region.mat[["Mock"]] = acast(
pData(cds.aligned.list[["Mock"]]) %>%
filter(barcode %in% analysis.guides[["Mock"]] | gene == "NONTARGETING") %>%
mutate(dummy = 1) %>% dplyr::select(gene, region, dummy),
gene ~ region, value.var = "dummy", fun.aggregate = sum, fill = 0)
target.region.mat[["TGFB"]] = acast(
pData(cds.aligned.list[["TGFB"]]) %>%
filter(barcode %in% analysis.guides[["TGFB"]] | gene == "NONTARGETING") %>%
mutate(dummy = 1) %>% dplyr::select(gene, region, dummy),
gene ~ region, value.var = "dummy", fun.aggregate = sum, fill = 0)
target.to.guide.map <- list()
for (target in analysis.targets[["Mock"]]) {
target.to.guide.map[["Mock"]][[target]] =
sort(unique(as.data.frame(pData(cds.aligned.list[["Mock"]]) %>%
filter(gene == target, barcode %in% analysis.guides[["Mock"]]) %>%
dplyr::select(barcode))[, 1]))
}
for (target in analysis.targets[["TGFB"]]) {
target.to.guide.map[["TGFB"]][[target]] =
sort(unique(as.data.frame(pData(cds.aligned.list[["TGFB"]]) %>%
filter(gene == target, barcode %in% analysis.guides[["TGFB"]]) %>%
dplyr::select(barcode))[, 1]))
}
NTC.guides <- unique(pData(cds.aligned.list[["Mock"]])[pData(cds.aligned.list[["Mock"]])$gene == "NONTARGETING",]$barcode)
guide.region.mat = list()
guide.region.mat[["Mock"]] = acast(
pData(cds.aligned.list[["Mock"]]) %>% filter(barcode %in% analysis.guides[["Mock"]][!(analysis.guides[["Mock"]] %in% NTC.guides)]) %>%
mutate(dummy = 1) %>% dplyr::select(barcode, region, dummy),
barcode ~ region, value.var = "dummy", fun.aggregate = sum, fill = 0)
guide.region.mat[["TGFB"]] = acast(
pData(cds.aligned.list[["TGFB"]]) %>% filter(barcode %in% analysis.guides[["TGFB"]][!(analysis.guides[["TGFB"]] %in% NTC.guides)]) %>%
mutate(dummy = 1) %>% dplyr::select(barcode, region, dummy),
barcode ~ region, value.var = "dummy", fun.aggregate = sum, fill = 0)
ntc.distribution = list()
for(sample in names(cds.aligned.list)){
ntc.distribution[[sample]] = target.region.mat[[sample]]["NONTARGETING",]
ntc.distribution[[sample]] = ntc.distribution[[sample]] / sum(ntc.distribution[[sample]])
}
weighted.target.region.mat <- list()
for (condition in c("Mock", "TGFB")) {
weighted.target.region.mat[[condition]] = t(sapply(analysis.targets[[condition]],
function(target) {
guides = target.to.guide.map[[condition]][[target]]
if (length(guides) == 1) {
return(target.region.mat[[condition]][target,])
} else {
mat = guide.region.mat[[condition]][guides,]
guide.weights = get.guide.weights(mat, ntc.distribution[[condition]])
guide.weights = guide.weights / max(guide.weights)
print(condition)
print(target)
print(round(guide.weights, 3))
#return(target.cluster.mat[[condition]][target,])
return(round(colSums(sweep(mat, 1, guide.weights, "*"))))
}
}))
}
NTC.region.mat <- list()
NTC.region.mat[["Mock"]] <- matrix(target.region.mat[["Mock"]][row.names(target.region.mat[["Mock"]]) == "NONTARGETING",],
nrow = 1)
row.names(NTC.region.mat[["Mock"]]) <- "NONTARGETING"
NTC.region.mat[["TGFB"]] <- matrix(target.region.mat[["TGFB"]][row.names(target.region.mat[["TGFB"]]) == "NONTARGETING",],
nrow = 1)
row.names(NTC.region.mat[["TGFB"]]) <- "NONTARGETING"
weighted.target.region.mat[["Mock"]] <- rbind(weighted.target.region.mat[["Mock"]],
NTC.region.mat[["Mock"]])
weighted.target.region.mat[["TGFB"]] <- rbind(weighted.target.region.mat[["TGFB"]],
NTC.region.mat[["TGFB"]])
median((pData(cds.aligned.list[["TGFB"]]) %>%
group_by(gene) %>%
summarize(n = n()))$n)
row.names(weighted.target.region.mat[["Mock"]])
# Calculate empirical FDR
chisq_qval.list <- calculate_ntc_empirical_fdr(cds.aligned.list, 1000)
mock_chisq_pval.list <- lapply(chisq_qval.list, `[[`, 1)
tgfb_chisq_pval.list <- lapply(chisq_qval.list, `[[`, 2)
mock_chisq_pval_df <- do.call("rbind",mock_chisq_pval.list)
mock_chisq_pval_df <- t(mock_chisq_pval_df)
tgfb_chisq_pval_df <- do.call("rbind",tgfb_chisq_pval.list)
tgfb_chisq_pval_df <- t(tgfb_chisq_pval_df)
mock_chisq_pval_df_test <- as.data.frame(mock_chisq_pval_df)
tgfb_chisq_pval_df_test <- as.data.frame(tgfb_chisq_pval_df)
mock_chisq_pval_NTC_df <- t(mock_chisq_pval_df_test["NTC_decoy",])
colnames(mock_chisq_pval_NTC_df) <- "mock_NTC_decoy"
tgfb_chisq_pval_NTC_df <- t(tgfb_chisq_pval_df_test["NTC_decoy",])
colnames(tgfb_chisq_pval_NTC_df) <- "tgfb_NTC_decoy"
mock_chisq_pval_df_test["NTC_decoy",]
chisq_pval_NTC_df <- merge(mock_chisq_pval_NTC_df,tgfb_chisq_pval_NTC_df, by = "row.names")
met <- tgfb_chisq_pval_df[22,]
length(met[met < 0.005])
ggplot(chisq_pval_NTC_df, aes(x = mock_NTC_decoy)) +
geom_histogram() +
xlim(0,1)
ggplot(chisq_pval_NTC_df, aes(x = tgfb_NTC_decoy)) +
geom_histogram() +
xlim(0,1)
mock_empirical_FDR_df <- apply(mock_chisq_pval_df,2,function(x){
x >= x["NTC_decoy"]
})
tgfb_empirical_FDR_df <- apply(tgfb_chisq_pval_df,2,function(x){
x >= x["NTC_decoy"]
})
mock_empirical_FDR <- rowSums(mock_empirical_FDR_df)/1000
tgfb_empirical_FDR <- rowSums(tgfb_empirical_FDR_df)/1000
empirical_FDR_df <- as.data.frame(cbind(names(mock_empirical_FDR),mock_empirical_FDR,tgfb_empirical_FDR))
empirical_FDR_df <- melt(empirical_FDR_df)
colnames(empirical_FDR_df) <- c("target", "mock_FDR","TGFB_FDR")
empirical_FDR_df$target <- as.character(empirical_FDR_df$target)
empirical_FDR_df$mock_FDR <- as.numeric(as.character(empirical_FDR_df$mock_FDR))
empirical_FDR_df$TGFB_FDR <- as.numeric(as.character(empirical_FDR_df$TGFB_FDR))
# Plot FDR by KO at the target and individual guide levels
empirical_FDR_df <- empirical_FDR_df[order(empirical_FDR_df$mock_FDR, decreasing = FALSE),]
empirical_FDR_df$target <- factor(empirical_FDR_df$target, levels = empirical_FDR_df$target)
ggplot(empirical_FDR_df,aes( x = as.factor(target), y = mock_FDR, fill = mock_FDR < 0.1)) +
geom_bar(stat = "identity") +
geom_hline(yintercept = 0.1, linetype = "dashed", color = "dimgrey") +
xlab("Target") +
ylab("FDR\n(empirically determined)") +
ggtitle("Spontaneous EMT") +
scale_fill_manual("FDR < 0.1", values = c("TRUE" = "red","FALSE" = "black")) +
theme(text = element_text(size = 6),
axis.text.x = element_text(angle = 90, hjust = 1),
plot.title = element_text(hjust = 0.5)) +
monocle:::monocle_theme_opts() +
ggsave("Mock_empirical_target_level_FDR.png", height = 2, width = 4, units = "in")
empirical_FDR_df <- empirical_FDR_df[order(empirical_FDR_df$TGFB_FDR, decreasing = FALSE),]
empirical_FDR_df$target <- factor(empirical_FDR_df$target, levels = empirical_FDR_df$target)
ggplot(empirical_FDR_df,aes( x = as.factor(target), y = TGFB_FDR, fill = TGFB_FDR < 0.1)) +
geom_bar(stat = "identity") +
geom_hline(yintercept = 0.1, linetype = "dashed", color = "dimgrey") +
xlab("Target") +
ylab("FDR\n(empirically determined)") +
ggtitle("TGF-B-driven EMT") +
scale_fill_manual("FDR < 0.1",values = c("TRUE" = "red","FALSE" = "black")) +
theme(text = element_text(size = 6),
axis.text.x = element_text(angle = 90, hjust = 1),
plot.title = element_text(hjust = 0.5)) +
monocle:::monocle_theme_opts() +
ggsave("TGFB_empirical_target_level_FDR.png", height = 2, width = 4, units = "in")
NTC.region.p = list()
for(sample in names(cds.aligned.list)){
NTC.region.p[[sample]] <- pData(cds.aligned.list[[sample]]) %>%
group_by(region) %>%
filter(gene == "NONTARGETING") %>%
summarize(n = n()) %>%
complete(region, fill = list(n = 0.1))
}
pass.target.level.screen = list()
pass.target.level.screen[["Mock"]] <- as.character(empirical_FDR_df[empirical_FDR_df$mock_FDR < 0.1,]$target)
pass.target.level.screen[["TGFB"]] <- as.character(empirical_FDR_df[empirical_FDR_df$TGFB_FDR < 0.1,]$target)
print(pass.target.level.screen[["Mock"]])
print(pass.target.level.screen[["TGFB"]])
targets.passing.initial.screen = list()
for(sample in names(cds.aligned.list)){
targets.passing.initial.screen[[sample]] = pass.target.level.screen[[sample]]
}
length(targets.passing.initial.screen[["Mock"]])
length(targets.passing.initial.screen[["TGFB"]])
length(intersect(targets.passing.initial.screen[["Mock"]],
targets.passing.initial.screen[["TGFB"]]))
length(unique(union(targets.passing.initial.screen[["Mock"]],
targets.passing.initial.screen[["TGFB"]])))
# Re-weigh guides for calculating log2 odds ratios and for plotting enrichemnt heatmaps
weighted.target.region.mat = list()
for (condition in c("Mock", "TGFB")) {
weighted.target.region.mat[[condition]] = t(sapply(targets.passing.initial.screen[[condition]],
function(target) {
guides = target.to.guide.map[[condition]][[target]]
if (length(guides) == 1) {
return(target.region.mat[[condition]][target,])
} else {
mat = guide.region.mat[[condition]][guides,]
guide.weights = get.guide.weights(mat, ntc.distribution[[condition]])
guide.weights = guide.weights / max(guide.weights)
print(condition)
print(target)
print(round(guide.weights, 3))
#return(target.cluster.mat[[condition]][target,])
return(round(colSums(sweep(mat, 1, guide.weights, "*"))))
}
}))
}
region.enrichment.df = list()
for (condition in c("Mock", "TGFB")) {
weighted.mat = weighted.target.region.mat[[condition]]
ntc.counts = target.region.mat[[condition]]["NONTARGETING",]
region.enrichment.df[[condition]] = do.call(rbind, lapply(rownames(weighted.mat), function(target) {
do.call(rbind, lapply(1:ncol(weighted.mat), function(region) {
test = fisher.test(cbind(
c(weighted.mat[target, region], sum(weighted.mat[target, -region])),
c(ntc.counts[region], sum(ntc.counts[-region]))))
data.frame(
target = target,
region = region,
odds.ratio = unname(test$estimate),
p.value = test$p.value)
}))
}))
region.enrichment.df[[condition]]$q.value = p.adjust(region.enrichment.df[[condition]]$p.value, "BH")
region.enrichment.df[[condition]]$log2.odds = with(region.enrichment.df[[condition]],
ifelse(odds.ratio == 0, -5, round(log2(odds.ratio),2)))
}
region.enrichment.df[["Mock"]]$target <- as.character(region.enrichment.df[["Mock"]]$target)
region.enrichment.df[["TGFB"]]$target <- as.character(region.enrichment.df[["TGFB"]]$target)
region.enrichment.heatmap.df <- region.enrichment.df
for(sample in names(region.enrichment.heatmap.df)){
region.enrichment.heatmap.df[[sample]]$log2.odds[region.enrichment.heatmap.df[[sample]]$log2.odds < -2] <- -2
region.enrichment.heatmap.df[[sample]]$log2.odds[region.enrichment.heatmap.df[[sample]]$log2.odds > 2] <- 2
}
region.enrichment.heatmap.matrix <- list()
for(sample in names(region.enrichment.heatmap.df)){
region.enrichment.heatmap.matrix[[sample]] <- recast(region.enrichment.heatmap.df[[sample]],
target ~ region, measure.var = "log2.odds")
row.names(region.enrichment.heatmap.matrix[[sample]]) <- region.enrichment.heatmap.matrix[[sample]]$target
region.enrichment.heatmap.matrix[[sample]] <- region.enrichment.heatmap.matrix[[sample]][,-1]
}
# Plot enrichment heatmaps
pheatmap(region.enrichment.heatmap.matrix[["Mock"]],
clustering_method = "ward.D2",
show_rownames = T,
show_colanmes = T,
na_col = "grey90",
col = colorspace::diverge_hsv(30),
cluster_col= FALSE,
cluster_row = TRUE,
useRaster = TRUE,
width = 4,
height = 6,
file = "Mock_region_enrichment_heatmap.png")
pheatmap(region.enrichment.heatmap.matrix[["TGFB"]],
clustering_method = "ward.D2",
show_rownames = T,
show_colanmes = T,
na_col = "grey90",
col = colorspace::diverge_hsv(30),
cluster_col= FALSE,
cluster_row = TRUE,
useRaster = TRUE,
width = 4,
height = 6,
file = "TGFB_region_enrichment_heatmap.png")
# Highlight examples of accumulation across pseudospace for EGFR and MET in spontaneous EMT and TGFBRs in TGF-B driven EMT
region_3_min_pseudospace_value <- min(pData(cds.aligned.list[["Mock"]])[pData(cds.aligned.list[["Mock"]])$region == "3",]$Pseudotime)
region_3_max_pseudospace_value <- max(pData(cds.aligned.list[["Mock"]])[pData(cds.aligned.list[["Mock"]])$region == "3",]$Pseudotime)
ggplot(pData(cds.aligned.list[["Mock"]])[pData(cds.aligned.list[["Mock"]])$gene %in% c("NONTARGETING", "EGFR", "MET"),],
aes(x = Pseudotime, fill = gene)) +
geom_rect(xmin = region_3_min_pseudospace_value, xmax = region_3_max_pseudospace_value, ymin = 0, ymax = Inf,
fill = "slategray1", alpha = 0.01) +
geom_density() +
facet_wrap(~factor(gene, levels = c("EGFR", "MET","NONTARGETING")), scales = "free_y", ncol = 1) +
theme(legend.position = "none", strip.text.x=element_text(size=18),
axis.text.x = element_text(size=10), axis.text.y = element_text(size=10),
axis.title.x = element_text(size=18), axis.title.y = element_text(size=18)) +
scale_fill_manual(values = c("EGFR" = "firebrick3", "MET" = "brown4", "TGFBR2" = "navy", "NONTARGETING" = "dimgrey")) +
monocle:::monocle_theme_opts() +
xlab("Pseudospace") +
ggsave("Density_of_EGFR_MET_NTC_across_spontaneous_EMT.png", width = 5, height = 10)
region_4_max_pseudospace_value <- max(pData(cds.aligned.list[["TGFB"]])[pData(cds.aligned.list[["TGFB"]])$region == "4",]$Pseudotime)
ggplot(pData(cds.aligned.list[["TGFB"]])[pData(cds.aligned.list[["TGFB"]])$gene %in% c("NONTARGETING", "TGFBR1", "TGFBR2"),],
aes(x = Pseudotime, fill = gene)) +
geom_rect(xmin = 0, xmax = region_4_max_pseudospace_value, ymin = 0, ymax = Inf,
fill = "slategray1", alpha = 0.01) +
geom_density() +
facet_wrap(~factor(gene, levels = c("TGFBR2", "TGFBR1", "NONTARGETING")), scales = "free_y", ncol = 1) +
theme(legend.position = "none", strip.text.x=element_text(size=18),
axis.text.x = element_text(size=10), axis.text.y = element_text(size=10),
axis.title.x = element_text(size=18), axis.title.y = element_text(size=18)) +
scale_fill_manual(values = c("TGFBR2" = "firebrick3", "TGFBR1" = "brown4", "ITGAV" = "navy", "NONTARGETING" = "dimgrey")) +
monocle:::monocle_theme_opts() +
xlab("Pseudospace") +
ggsave("Density_of_TGFBR1_TGFBR2_NTC_across_TGFB_driven_EMT.png", width = 5, height = 10)
# FOr supplemental
# Determine the fraction of CDH1 single positive, CDH1/VIM double positive and VIM single positive cells upon confluence dependent EMT
mock_CDH1_VIM_cds_subset <- cds.aligned.list[["Mock"]][fData(cds.aligned.list[["Mock"]])$gene_short_name %in% c("CDH1","VIM"),
!is.na(pData(cds.aligned.list[["Mock"]])$region)]
mock_CDH1_VIM_cds_exprs <- Biobase::exprs(mock_CDH1_VIM_cds_subset)
mock_CDH1_VIM_cds_exprs <- Matrix::t(Matrix::t(mock_CDH1_VIM_cds_exprs)/sizeFactors(mock_CDH1_VIM_cds_subset))
tgfb_CDH1_VIM_cds_subset <- cds.aligned.list[["TGFB"]][fData(cds.aligned.list[["TGFB"]])$gene_short_name %in% c("CDH1","VIM"),
!is.na(pData(cds.aligned.list[["TGFB"]])$region)]
tgfb_CDH1_VIM_cds_exprs <- Biobase::exprs(tgfb_CDH1_VIM_cds_subset)
tgfb_CDH1_VIM_cds_exprs <- Matrix::t(Matrix::t(tgfb_CDH1_VIM_cds_exprs)/sizeFactors(tgfb_CDH1_VIM_cds_subset))
CDH1_expression_cutoff <- mean(mock_CDH1_VIM_cds_exprs[2,])
VIM_expression_cutoff <- mean(mock_CDH1_VIM_cds_exprs[1,])
CDH1_expression_cutoff
VIM_expression_cutoff
mock_CDH1_VIM_double_positive_cells <- colnames(mock_CDH1_VIM_cds_exprs[,mock_CDH1_VIM_cds_exprs[1,] > VIM_expression_cutoff &
mock_CDH1_VIM_cds_exprs[2,] > CDH1_expression_cutoff])
mock_CDH1_positive_cells <- colnames(mock_CDH1_VIM_cds_exprs[,mock_CDH1_VIM_cds_exprs[2,] > CDH1_expression_cutoff &
!(colnames(mock_CDH1_VIM_cds_exprs) %in% mock_CDH1_VIM_double_positive_cells)])
mock_VIM_positive_cells <- colnames(mock_CDH1_VIM_cds_exprs[,mock_CDH1_VIM_cds_exprs[1,] > VIM_expression_cutoff &
!(colnames(mock_CDH1_VIM_cds_exprs) %in% mock_CDH1_VIM_double_positive_cells)])
tgfb_CDH1_VIM_double_positive_cells <- colnames(tgfb_CDH1_VIM_cds_exprs[,tgfb_CDH1_VIM_cds_exprs[1,] > VIM_expression_cutoff &
tgfb_CDH1_VIM_cds_exprs[2,] > CDH1_expression_cutoff])
tgfb_CDH1_positive_cells <- colnames(tgfb_CDH1_VIM_cds_exprs[,tgfb_CDH1_VIM_cds_exprs[2,] > CDH1_expression_cutoff &
!(colnames(tgfb_CDH1_VIM_cds_exprs) %in% tgfb_CDH1_VIM_double_positive_cells)])
tgfb_VIM_positive_cells <- colnames(tgfb_CDH1_VIM_cds_exprs[,tgfb_CDH1_VIM_cds_exprs[1,] > VIM_expression_cutoff &
!(colnames(tgfb_CDH1_VIM_cds_exprs) %in% tgfb_CDH1_VIM_double_positive_cells)])
mock_pData <- pData(cds.aligned.list[["Mock"]][,!is.na(pData(cds.aligned.list[["Mock"]])$region)])
mock_pData$positive_marker <- sapply(mock_pData$cell, function(x){
if(x %in% mock_CDH1_VIM_double_positive_cells){
return("CDH1/VIM double positive")
}
if(x %in% mock_CDH1_positive_cells){
return("CDH1 single positive")
}
if(x %in% mock_VIM_positive_cells){
return("VIM single positive")
}
return(NA)
})
tgfb_pData <- pData(cds.aligned.list[["TGFB"]][,!is.na(pData(cds.aligned.list[["TGFB"]])$region)])
tgfb_pData$positive_marker <- sapply(tgfb_pData$cell, function(x){
if(x %in% tgfb_CDH1_VIM_double_positive_cells){
return("CDH1/VIM double positive")
}
if(x %in% tgfb_CDH1_positive_cells){
return("CDH1 single positive")
}
if(x %in% tgfb_VIM_positive_cells){
return("VIM single positive")
}
return(NA)
})
ggplot(pData(cds.aligned.list[["Mock"]])[mock_CDH1_positive_cells,], aes(x = Pseudotime)) +
geom_density(fill = "gray70", color = "black") +
theme_classic() +
monocle:::monocle_theme_opts() +
xlab("Pseudospace") +
ylab("Cell density") +
ggtitle("CDH1 single positive cells") +
theme(legend.position = "none",
axis.title.y = element_text(size = 14),axis.title.x = element_text(size = 24),
axis.text.y = element_text(size = 14), plot.title = element_text(size = 24, hjust = 0.5)) +
ggsave("Mock_loss_of_function_cell_density_accross_pseudospace_CDH1_positive_cells_geom_density.png", height = 3, width = 6)
ggplot(pData(cds.aligned.list[["Mock"]])[mock_CDH1_VIM_double_positive_cells,], aes(x = Pseudotime)) +
geom_density(fill = "gray70", color = "black") +
theme_classic() +
monocle:::monocle_theme_opts() +
xlab("Pseudospace") +
ylab("Cell density") +
ggtitle("CDH1/VIM double positive cells") +
theme(legend.position = "none",
axis.title.y = element_text(size = 14),axis.title.x = element_text(size = 24),
axis.text.y = element_text(size = 14), plot.title = element_text(size = 24, hjust = 0.5)) +
ggsave("Mock_loss_of_function_cell_density_accross_pseudospace_CDH1_VIM_double_positive_cells_geom_density.png", height = 3, width = 6)
ggplot(pData(cds.aligned.list[["Mock"]])[mock_VIM_positive_cells,], aes(x = Pseudotime)) +
geom_density(fill = "gray70", color = "black") +
theme_classic() +
monocle:::monocle_theme_opts() +
xlab("Pseudospace") +
ylab("Cell density") +
ggtitle("VIM single positive cells") +
theme(legend.position = "none",
axis.title.y = element_text(size = 14), axis.title.x = element_text(size = 24),
axis.text.y = element_text(size = 14), plot.title = element_text(size = 24, hjust = 0.5)) +
ggsave("Mock_loss_of_function_cell_density_accross_pseudospace_VIM_positive_cells_geom_density.png", height = 3, width = 6)
ggplot(pData(cds.aligned.list[["TGFB"]])[tgfb_CDH1_positive_cells,], aes(x = Pseudotime)) +
geom_density(fill = "gray70", color = "black") +
theme_classic() +
monocle:::monocle_theme_opts() +
xlab("Pseudospace") +
ylab("Cell density") +
ggtitle("CDH1 single positive cells") +
theme(legend.position = "none",
axis.title.y = element_text(size = 14),axis.title.x = element_text(size = 24),
axis.text.y = element_text(size = 14), plot.title = element_text(size = 24, hjust = 0.5)) +
ggsave("TGFB_loss_of_function_cell_density_accross_pseudospace_CDH1_positive_cells_geom_density.png", height = 3, width = 6)
ggplot(pData(cds.aligned.list[["TGFB"]])[tgfb_CDH1_VIM_double_positive_cells,], aes(x = Pseudotime)) +
geom_density(fill = "gray70", color = "black") +
theme_classic() +
monocle:::monocle_theme_opts() +
xlab("Pseudospace") +
ylab("Cell density") +
ggtitle("CDH1/VIM double positive cells") +
theme(legend.position = "none",
axis.title.y = element_text(size = 14),axis.title.x = element_text(size = 24),
axis.text.y = element_text(size = 14), plot.title = element_text(size = 24, hjust = 0.5)) +
ggsave("TGFB_loss_of_function_cell_density_accross_pseudospace_CDH1_VIM_double_positive_cells_geom_density.png", height = 3, width = 6)
ggplot(pData(cds.aligned.list[["TGFB"]])[tgfb_VIM_positive_cells,], aes(x = Pseudotime)) +
geom_density(fill = "gray70", color = "black") +
theme_classic() +
monocle:::monocle_theme_opts() +
xlab("Pseudospace") +
ylab("Cell density") +
ggtitle("VIM single positive cells") +
theme(legend.position = "none",
axis.title.y = element_text(size = 14), axis.title.x = element_text(size = 24),
axis.text.y = element_text(size = 14), plot.title = element_text(size = 24, hjust = 0.5)) +
ggsave("TGFB_loss_of_function_cell_density_accross_pseudospace_VIM_positive_cells_geom_density.png", height = 3, width = 6)
# Identify differentially expressed genes across Pseudospace for every ko vs NTC combination
mock_target_NTC_diff_test.list <- list()
for(target in analysis.targets[["Mock"]]){
message("Obtaining differentially expressed genes between ",target, " and NTC")
subset_cds <- cds.aligned.list[["Mock"]][,pData(cds.aligned.list[["Mock"]])$gene == target |
pData(cds.aligned.list[["Mock"]])$gene == "NONTARGETING"]
subset_cds <- estimateDispersions(subset_cds)
mock_target_NTC_diff_test.list[[target]] <- myDifferentialGeneTest(subset_cds[expressed_genes.list[["Mock"]]],
fullModelFormulaStr = "~sm.ns(Pseudotime, df=3)+gene",
reducedModelFormulaStr = "~sm.ns(Pseudotime, df=3)",
cores = 1)
rm(subset_cds)
message("Done")
}
tgfb_target_NTC_diff_test.list <- list()
for(target in analysis.targets[["TGFB"]]){
message("Obtaining differentially expressed genes between ",target, " and NTC")
subset_cds <- cds.aligned.list[["TGFB"]][,pData(cds.aligned.list[["TGFB"]])$gene == target |
pData(cds.aligned.list[["TGFB"]])$gene == "NONTARGETING"]
subset_cds <- estimateDispersions(subset_cds)
tgfb_target_NTC_diff_test.list[[target]] <- myDifferentialGeneTest(subset_cds[expressed_genes.list[["TGFB"]]],
fullModelFormulaStr = "~sm.ns(Pseudotime, df=3)+gene",
reducedModelFormulaStr = "~sm.ns(Pseudotime, df=3)",
cores = 1)
rm(subset_cds)
message("Done")
}
for(target in names(mock_target_NTC_diff_test.list)){
mock_target_NTC_diff_test.list[[target]]$target <- rep(target,
length(row.names(mock_target_NTC_diff_test.list[[target]])))
}
for(target in names(tgfb_target_NTC_diff_test.list)){
tgfb_target_NTC_diff_test.list[[target]]$target <- rep(target,
length(row.names(tgfb_target_NTC_diff_test.list[[target]])))
}
mock_target_NTC_diff_test <- do.call("rbind", mock_target_NTC_diff_test.list)
tgfb_target_NTC_diff_test <- do.call("rbind", tgfb_target_NTC_diff_test.list)
mock_target_NTC_diff_test$qval <- p.adjust(mock_target_NTC_diff_test$pval, method = "BH")
tgfb_target_NTC_diff_test$qval <- p.adjust(tgfb_target_NTC_diff_test$pval, method = "BH")
mock_target_NTC_sig_genes <- unique(subset(mock_target_NTC_diff_test, qval < 0.05)$id)
length(mock_target_NTC_sig_genes)
tgfb_target_NTC_sig_genes <- unique(subset(tgfb_target_NTC_diff_test, qval < 0.05)$id)
length(tgfb_target_NTC_sig_genes)
mock_target_NTC_dif_test_sig_subset <- mock_target_NTC_diff_test[mock_target_NTC_diff_test$id %in% mock_target_NTC_sig_genes,]
tgfb_target_NTC_dif_test_sig_subset <- tgfb_target_NTC_diff_test[tgfb_target_NTC_diff_test$id %in% tgfb_target_NTC_sig_genes,]
mock_diff_test_summary <- mock_target_NTC_diff_test %>% filter(qval < 0.05) %>% group_by(target) %>%
summarize(n = n()) %>% arrange(desc(n))
colnames(mock_diff_test_summary) <- c("target","total_degs")
mock_cell_number_summary <- pData(cds.aligned.list[["Mock"]]) %>% filter(gene != "NONTARGETING") %>%
group_by(gene) %>% summarize(n = n()) %>% arrange(desc(n))
tgfb_diff_test_summary <- tgfb_target_NTC_diff_test %>% filter(qval < 0.05) %>% group_by(target) %>%
summarize(n = n()) %>% arrange(desc(n))
colnames(tgfb_diff_test_summary) <- c("target","total_degs")
tgfb_cell_number_summary <- pData(cds.aligned.list[["TGFB"]]) %>% filter(gene != "NONTARGETING") %>%
group_by(gene) %>% summarize(n = n()) %>% arrange(desc(n))
mock_summary_df <- merge(mock_diff_test_summary, mock_cell_number_summary, by.x = "target", by.y = "gene")
tgfb_summary_df <- merge(tgfb_diff_test_summary, tgfb_cell_number_summary, by.x = "target", by.y = "gene")
# Plot number of DEGs for every KO vs number of KO cells in the experiment
ggplot(mock_summary_df, aes(x = n, y = total_degs, label = target)) +
geom_point() +
monocle:::monocle_theme_opts() +
theme(text = element_text(size = 24)) +
xlab("Total number of target cells") +
ylab("Total number of DEGs") +
ggsave("Mock_CROPseq_TargetvsNTC_degs.png", width = 6, height = 6)
ggplot(tgfb_summary_df, aes(x = n, y = total_degs, label = target)) +
geom_point() +
monocle:::monocle_theme_opts() +
theme(text = element_text(size = 24)) +
xlab("Total number of target cells") +
ylab("Total number of DEGs") +
ggsave("TGFB_CROPseq_TargetvsNTC_degs.png", width = 6, height = 6)
ggplot(tgfb_summary_df[!(tgfb_summary_df$target %in% c("TGFBR1","TGFBR2")),], aes(x = n, y = total_degs, label = target)) +
geom_point() +
monocle:::monocle_theme_opts() +
theme(text = element_text(size = 24)) +
xlab("Total number of target cells") +
ylab("Total number of DEGs") +
ggsave("TGFB_CROPseq_TargetvsNTC_degs_woTGFRBs.png", width = 6, height = 6)
|
/code/Determining_enrichment_of_CROPseq_pertubed_MCF10A_cells_across_pseudospace.R
|
no_license
|
cole-trapnell-lab/pseudospace
|
R
| false
| false
| 54,496
|
r
|
###### Load packages ######
# Load necessary packages for single cell RNA-Seq analysis including packages for downstream Gene Ontology Analysis
suppressPackageStartupMessages({
library(devtools)
library(stringr)
library(scales)
library(dtw)
library(monocle)
library(reshape2)
library(GSA)
library(limma)
library(DBI)
library(MASS)
library(plyr)
library(dplyr)
library(tidyr)
library(matrixStats)
library(cluster)
library(pheatmap)
library(grid)
library(RColorBrewer)
library(viridis)
library(ggrepel)})
##### Load and define necessary functions #####
source("Pseudospace_support_functions.R")
preprocess_cds <- function(cds){
cds <- detectGenes(cds, min_expr = 0.1)
cds <- estimateSizeFactors(cds)
cds <- estimateDispersions(cds)
return(cds)
}
getPseudospaceTrajectory <- function(cds, sig_genes){
cds <- setOrderingFilter(cds, sig_genes)
cds <- reduceDimension(cds, max_components = 2, norm_method = "log")
cds <- orderCells(cds, reverse = FALSE)
return(cds)
}
## Need to update function in pseudospace_support_functions to specify which columns of pData to keep after alignment
getDTWcds <- function(query_cds, ref_cds, ref, query, expressed_genes, cores = 1){
alignment_genes <- intersect(row.names(subset(fData(ref_cds), use_for_ordering)),
row.names(subset(fData(query_cds), use_for_ordering)))
ref_align_cds <- ref_cds[alignment_genes]
query_align_cds <- query_cds[alignment_genes]
### Set a consistent Pseudospace between both ordering sets
message("Normalizing pseudospace for each sample")
pData(ref_align_cds)$cell_id <- row.names(pData(ref_align_cds))
pData(ref_align_cds)$Pseudotime <- 100 * pData(ref_align_cds)$Pseudotime / max(pData(ref_align_cds)$Pseudotime)
ref_align_cds <- ref_align_cds[alignment_genes,as.character(arrange(pData(ref_align_cds), Pseudotime)$cell_id)]
pData(query_align_cds)$cell_id <- row.names(pData(query_align_cds))
pData(query_align_cds)$Pseudotime <- 100 * pData(query_align_cds)$Pseudotime / max(pData(query_align_cds)$Pseudotime)
query_align_cds <- query_align_cds[alignment_genes,as.character(arrange(pData(query_align_cds), Pseudotime)$cell_id)]
# Fits a smoothed curve to alignment genes accross Pseudotime
message("Fitting smooth curves across pseudospace")
#closeAllConnections()
smoothed_ref_exprs <- genSmoothCurves(ref_align_cds[alignment_genes], data.frame(Pseudotime=seq(0,100, by=1)), cores= cores)
smoothed_ref_exprs <- smoothed_ref_exprs[rowSums(is.na(smoothed_ref_exprs)) == 0,]
vst_smoothed_ref_exprs <- vstExprs(ref_cds, expr_matrix=smoothed_ref_exprs)
#closeAllConnections()
smoothed_query_exprs <- genSmoothCurves(query_align_cds[alignment_genes], data.frame(Pseudotime=seq(0,100, by=1)), cores= cores)
smoothed_query_exprs <- smoothed_query_exprs[rowSums(is.na(smoothed_query_exprs)) == 0,]
vst_smoothed_query_exprs <- vstExprs(query_cds, expr_matrix=smoothed_query_exprs)
alignment_genes <- intersect(row.names(vst_smoothed_ref_exprs), row.names(vst_smoothed_query_exprs))
ref_matrix <- t(scale(t(vst_smoothed_ref_exprs[alignment_genes,])))
query_matrix <- t(scale(t(vst_smoothed_query_exprs[alignment_genes,])))
message("Aligning pseudopsatial trajectories with dynamic time warping")
ref_query_dtw <- align_cells(ref_matrix, query_matrix, step_pattern=rabinerJuangStepPattern(3, "c"), open.begin=F, open.end=F)
message("Warping pseudospace")
align_res <- warp_pseudotime(ref_align_cds, query_align_cds, ref_query_dtw)
query_ref_aligned <- align_res$query_cds
pData(query_ref_aligned)$Pseudotime <- pData(query_ref_aligned)$Alignment_Pseudotime
ref_aligned_cell_ids <- setdiff(row.names(pData(ref_align_cds)), "duplicate_root")
query_aligned_cell_ids <- setdiff(row.names(pData(query_align_cds)), "duplicate_root")
combined_exprs <- cBind(Biobase::exprs(query_cds[expressed_genes,query_aligned_cell_ids]),
Biobase::exprs(ref_cds[expressed_genes,ref_aligned_cell_ids]))
pData_ref <- pData(ref_align_cds)[,c("gene","all_gene","barcode","proportion","guide_count","condition","treatment", "position", "Pseudotime")]
pData_ref$Cell.Type <- ref
pData_query_aligned <- pData(query_ref_aligned)[,c("gene","all_gene","barcode","proportion","guide_count","condition","treatment", "position", "Pseudotime")]
pData_query_aligned$Cell.Type <- query
combined_pData <- rbind(pData_query_aligned, pData_ref)
combined_pData <- combined_pData[colnames(combined_exprs),]
combined_pd <- new("AnnotatedDataFrame", data = combined_pData)
fd <- new("AnnotatedDataFrame", data = fData(ref_cds)[row.names(combined_exprs),1:2])
message("Creating a new cds object with a common pseudospatial axes")
ref_queryToRef_combined_cds <- newCellDataSet(combined_exprs,
phenoData = combined_pd,
featureData = fd,
expressionFamily=negbinomial.size(),
lowerDetectionLimit=1)
pData(ref_queryToRef_combined_cds)$cell_id <- row.names(pData(ref_queryToRef_combined_cds))
return(ref_queryToRef_combined_cds)
}
# Expectation maximiation model t0 correct for different efficiencies across sgRNAs
get.guide.weights = function(mat, ntc.dist, n.iterations = 30) {
n.guides = nrow(mat)
n.cells = rowSums(mat)
empirical.dist = sweep(mat, 1, n.cells, "/")
lof.prop = rep(0.5, n.guides)
expected.n.lof = n.cells * lof.prop
for (i in 1:n.iterations) {
lof.dist = sapply(1:n.guides, function(guide) {
p = lof.prop[guide]
(empirical.dist[guide,] - (1-p) * ntc.dist) / p
})
lof.dist = rowSums(sweep(lof.dist, 2, expected.n.lof / sum(expected.n.lof), "*"))
lof.dist = ifelse(lof.dist < 0, 0, lof.dist)
lof.dist = lof.dist / sum(lof.dist)
lof.prop = sapply(1:n.guides, function(guide) {
optimize(function(p) dmultinom(mat[guide,], prob = p * lof.dist + (1-p) * ntc.dist, log = T),
c(0.0, 1.0), maximum = T)$maximum
})
expected.n.lof = n.cells * lof.prop
}
return(lof.prop)
}
calculate_ntc_empirical_fdr <- function(cds, iterations){
chisq_qval.list <- list()
median_NTC.list <- list()
median_NTC.list[["Mock"]] <- median((pData(cds.aligned.list[["Mock"]]) %>%
group_by(gene) %>%
summarize(n = n()))$n)
median_NTC.list[["TGFB"]] <- median((pData(cds.aligned.list[["TGFB"]]) %>%
group_by(gene) %>%
summarize(n = n()))$n)
NTC_cell_subset.list <- list()
for(sample in names(cds.aligned.list)){
NTC_cell_subset.list[[sample]] <- row.names(subset(pData(cds.aligned.list[[sample]]),
gene == "NONTARGETING"))
}
for(i in 1:iterations){
if(i %in% c(1,10,100,250,500,750,100)){message(paste0("Iteration ", i," of ",as.character(iterations)))}
cds_list <- cds
random_NTC_subset.list <- list()
for(sample in names(cds_list)){
set.seed(i)
random_NTC_subset.list[[sample]] <- sample(NTC_cell_subset.list[[sample]], 50, replace = FALSE)
}
new_gene_assignments.list <- list()
for(sample in names(cds_list)){
new_gene_assignments.list[[sample]] <- sapply(pData(cds_list[[sample]])$cell,function(x){
if(x %in% random_NTC_subset.list[[sample]]) return("NTC_decoy")
return(pData(cds_list[[sample]])[x,]$gene)
})
}
pData(cds_list[["Mock"]])$gene <- new_gene_assignments.list[["Mock"]]
pData(cds_list[["TGFB"]])$gene <- new_gene_assignments.list[["TGFB"]]
analysis.targets = list()
analysis.targets[["Mock"]] = as.data.frame(pData(cds_list[["Mock"]]) %>% filter(gene != "NONTARGETING") %>%
group_by(gene) %>% summarize(
n.cells = n(),
n.guides = length(intersect(unique(barcode), analysis.guides[["Mock"]]))) %>%
filter(n.cells >= 15, n.guides >= 1) %>% dplyr::select(gene))[,1]
analysis.targets[["TGFB"]] = as.data.frame(pData(cds_list[["TGFB"]]) %>% filter(gene != "NONTARGETING") %>%
group_by(gene) %>% summarize(
n.cells = n(),
n.guides = length(intersect(unique(barcode), analysis.guides[["TGFB"]]))) %>%
filter(n.cells >= 15, n.guides >= 1) %>% dplyr::select(gene))[,1]
target.to.guide.map <- list()
for (target in analysis.targets[["Mock"]]) {
target.to.guide.map[["Mock"]][[target]] =
sort(unique(as.data.frame(pData(cds_list[["Mock"]]) %>%
filter(gene == target, barcode %in% analysis.guides[["Mock"]]) %>%
dplyr::select(barcode))[, 1]))
}
for (target in analysis.targets[["TGFB"]]) {
target.to.guide.map[["TGFB"]][[target]] =
sort(unique(as.data.frame(pData(cds_list[["TGFB"]]) %>%
filter(gene == target, barcode %in% analysis.guides[["TGFB"]]) %>%
dplyr::select(barcode))[, 1]))
}
guide.to.target.map = list()
for(sample in names(cds_list)){
guide.to.target.map[[sample]] = list()
for (target in analysis.targets[[sample]]) {
for (guide in target.to.guide.map[[sample]][[target]]) {
guide.to.target.map[[sample]][[guide]] = target
}
}
}
target.region.mat = list()
target.region.mat[["Mock"]] = acast(
pData(cds_list[["Mock"]]) %>%
filter(barcode %in% analysis.guides[["Mock"]] | gene == "NONTARGETING") %>%
mutate(dummy = 1) %>% dplyr::select(gene, region, dummy),
gene ~ region, value.var = "dummy", fun.aggregate = sum, fill = 0)
target.region.mat[["TGFB"]] = acast(
pData(cds_list[["TGFB"]]) %>%
filter(barcode %in% analysis.guides[["TGFB"]] | gene == "NONTARGETING") %>%
mutate(dummy = 1) %>% dplyr::select(gene, region, dummy),
gene ~ region, value.var = "dummy", fun.aggregate = sum, fill = 0)
NTC_decoy.region.mat <- list()
NTC_decoy.region.mat[["Mock"]] <- matrix(target.region.mat[["Mock"]][row.names(target.region.mat[["Mock"]]) == "NTC_decoy",],
nrow = 1)
row.names(NTC_decoy.region.mat[["Mock"]]) <- "NTC_decoy"
NTC_decoy.region.mat[["TGFB"]] <- matrix(target.region.mat[["TGFB"]][row.names(target.region.mat[["TGFB"]]) == "NTC_decoy",],
nrow = 1)
row.names(NTC_decoy.region.mat[["TGFB"]]) <- "NTC_decoy"
weighted.target.region.mat[["Mock"]] <- rbind(weighted.target.region.mat[["Mock"]],
NTC_decoy.region.mat[["Mock"]])
weighted.target.region.mat[["TGFB"]] <- rbind(weighted.target.region.mat[["TGFB"]],
NTC_decoy.region.mat[["TGFB"]])
NTC.region.p = list()
for(sample in names(cds_list)){
pData(cds_list[[sample]])$gene <- as.factor(pData(cds_list[[sample]])$gene)
pData(cds_list[[sample]])$region <- as.factor(pData(cds_list[[sample]])$region)
NTC.region.p[[sample]] <- pData(cds_list[[sample]]) %>%
group_by(gene, region) %>%
summarize(n = n()) %>%
tidyr::complete(region, fill = list(n = 0.1)) %>%
filter(gene == "NONTARGETING")
}
ntc.distribution = list()
for(sample in names(cds_list)){
ntc.distribution[[sample]] = weighted.target.region.mat[[sample]]["NONTARGETING",]
ntc.distribution[[sample]] = ntc.distribution[[sample]] / sum(ntc.distribution[[sample]])
}
initial.target.level.chisq.pval = list()
for(sample in names(cds_list)){
set.seed(42)
initial.target.level.chisq.pval[[sample]] = sapply(
analysis.targets[[sample]], function(target) {
suppressWarnings({chisq.test(
weighted.target.region.mat[[sample]][target,],
p = NTC.region.p[[sample]]$n,
simulate.p.value = F, rescale.p = T, B = 1000)$p.value})
})
}
initial.target.level.chisq.qval <- list()
for(sample in names(cds_list)){
initial.target.level.chisq.qval[[sample]] <- p.adjust(initial.target.level.chisq.pval[[sample]], method = "BH")
initial.target.level.chisq.qval[[sample]] <- sapply(initial.target.level.chisq.qval[[sample]],
function(x){if(x < 1e-50){return(1e-50)}else{return(x)}})
}
chisq_qval.list[[i]] <- initial.target.level.chisq.qval
}
return(chisq_qval.list)
}
#### Load data ####
Pseudospace_lof_cds <- readRDS("CROPseq_pseudospace_cds.rds")
# Create a cds subset for each stimulation condition that contains spatially isolated cells
cds.list <- list()
cds.list[["Mock"]] <- Pseudospace_lof_cds[,!is.na(pData(Pseudospace_lof_cds)$proportion) &
pData(Pseudospace_lof_cds)$guide_count == 1 &
pData(Pseudospace_lof_cds)$treatment == "mock"]
cds.list[["TGFB"]] <- Pseudospace_lof_cds[,!is.na(pData(Pseudospace_lof_cds)$proportion) &
pData(Pseudospace_lof_cds)$guide_count == 1 &
pData(Pseudospace_lof_cds)$treatment == "tgfb"]
for(sample in names(cds.list)){
print(pData(cds.list[[sample]]) %>%
group_by(sample) %>% summarize(n = n()))
}
# Identify genes that are expressed in at least 50 of cells
expressed_genes.list <- list()
expressed_genes.list[["Mock"]] <- row.names(fData(cds.list[["Mock"]])[Matrix::rowSums(Biobase::exprs(cds.list[["Mock"]]) > 0) > 50 ,])
length(expressed_genes.list[["Mock"]])
expressed_genes.list[["TGFB"]] <- row.names(fData(cds.list[["TGFB"]])[Matrix::rowSums(Biobase::exprs(cds.list[["TGFB"]]) > 0) > 50 ,])
length(expressed_genes.list[["TGFB"]])
for(sample in names(cds.list)) {
cds.list[[sample]] <- preprocess_cds(cds.list[[sample]])
}
# Identify genes that vary significantly between inner and outer CROPseq cell fractions
Spatial.DEG.test.list <- list()
for(sample in names(cds.list)){
Spatial.DEG.test.list[[sample]] <- differentialGeneTest(cds.list[[sample]][expressed_genes.list[[sample]]],
fullModelFormulaStr = "~position",
reducedModelFormulaStr = "~1",
cores = 1)
}
# Calculate fold change in expression levels of significant genes between CROPseq cell fractions isolated by space
for(sample in names(Spatial.DEG.test.list)){
diff_test_genes <- row.names(Spatial.DEG.test.list[[sample]])
diff_cds <- cds.list[[sample]][diff_test_genes]
diff_FC <- diff_foldChange(diff_cds, "position","inner")
Spatial.DEG.test.list[[sample]]$log2_foldChange <- diff_FC$log2FC_outer
rm(diff_test_genes,diff_cds,diff_FC)
}
Spatial_sig_genes.list <- list()
for(sample in names(Spatial.DEG.test.list)){
Spatial_sig_genes.list[[sample]] <- row.names(subset(Spatial.DEG.test.list[[sample]], qval <= 1e-6 &
abs(log2_foldChange) >= 1))
print(length(Spatial_sig_genes.list[[sample]]))
}
# Create pseudospatial trajectories and examine the distribution of inner and outer cells within them
for(sample in names(cds.list)){
cds.list[[sample]] <- getPseudospaceTrajectory(cds.list[[sample]],
Spatial_sig_genes.list[[sample]])
}
cds.list[["Mock"]] <- orderCells(cds.list[["Mock"]], reverse = F)
cds.list[["TGFB"]] <- orderCells(cds.list[["TGFB"]], reverse = F)
plot_cell_trajectory(cds.list[["Mock"]], color_by = "position",show_branch_points = FALSE) +
theme(legend.position="top", text=element_text(size=20), legend.direction = "vertical") +
scale_color_manual(labels = c("inner colony", "outer colony"), values = c("#0075F2", "#D62828"),
name = "Spatial Context") +
ggsave(file = "MCF10A_Mock_loss_of_function_PseudospatialTrajectory.png", height = 6, width = 6)
ggplot(pData(cds.list[["Mock"]]), aes(x = Pseudotime, fill = position, color = position)) +
geom_density() +
facet_wrap(~position, ncol = 1) +
theme_classic() +
scale_color_manual("Spatial Context", labels = c("inner colony", "outer colony"),
values = c("#000000","#000000")) +
scale_fill_manual("Spatial Context", labels = c("inner colony", "outer colony")
, values = c("#0075F2","#D62828")) +
xlab("Pseudospace") +
ylab("Cell density") +
monocle:::monocle_theme_opts() +
theme(legend.position = "top", legend.direction = "vertical", text=element_text(size=20)) +
ggsave("Mock_loss_of_function_cell_density_accross_pseudospace_geom_density.png", height = 6, width = 5)
plot_cell_trajectory(cds.list[["TGFB"]], color_by = "position",show_branch_points = FALSE) +
theme(legend.position="top", text=element_text(size=20), legend.direction = "vertical") +
scale_color_manual(labels = c("inner colony", "outer colony"), values = c("#70163C", "#38726C"),
name = "Spatial Context") +
ggsave(file = "MCF10A_TGFB_loss_of_function_PseudospatialTrajectory.png", height = 6, width = 6)
ggplot(pData(cds.list[["TGFB"]]), aes(x = Pseudotime, fill = position, color = position)) +
geom_density() +
facet_wrap(~position, ncol = 1) +
theme_classic() +
scale_color_manual("Spatial Context", labels = c("inner colony", "outer colony"),
values = c("#000000","#000000")) +
scale_fill_manual("Spatial Context", labels = c("inner colony", "outer colony")
, values = c("#70163C", "#38726C")) +
xlab("Pseudospace") +
ylab("Cell density") +
monocle:::monocle_theme_opts() +
theme(legend.position = "top", legend.direction = "vertical", text=element_text(size=20)) +
ggsave("TGFB_loss_of_function_cell_density_accross_pseudospace_geom_density.png", height = 6, width = 5)
plot_genes_in_pseudotime(cds.list[["Mock"]][fData(cds.list[["Mock"]])$gene_short_name == "CDH1",],
color_by = "position", min_expr = 0.1) +
theme(text=element_text(size=20)) +
scale_color_manual(labels = c("inner colony", "outer colony"), values = c("#0075F2", "#D62828"),
name = "Spatial Context")
plot_genes_in_pseudotime(cds.list[["TGFB"]][fData(cds.list[["TGFB"]])$gene_short_name == "CDH1",],
color_by = "position", min_expr = 0.1)+
theme(text=element_text(size=20)) +
scale_color_manual(labels = c("inner colony", "outer colony"), values = c("#38726C", "#70163C"),
name = "Spatial Context")
expressed_genes <- unique(union(expressed_genes.list[["Mock"]],expressed_genes.list[["TGFB"]]))
# Plot the expression of known EMT markers across pseudospace
Mock_Figure1_Mar <- cds.list[["Mock"]][row.names(subset(fData(cds.list[["Mock"]]), gene_short_name %in%
c("CDH1","CRB3","DSP", "CDH2","FN1","VIM"))),]
plot_genes_in_pseudotime(Mock_Figure1_Mar, color_by = "spatial_id", ncol = 2, min_expr = 0.1,
panel_order = c("CDH1","CDH2","CRB3","FN1","DSP","VIM")) +
xlab("Pseudospace") +
theme(legend.position = "none",text=element_text(size=20)) +
scale_color_manual(values = c("inner" = "#0075F2","outer"="#D62828")) +
ggsave("MCF10A_Mock_CFG_Figure1Markers_byPseudospace.png", width = 6, height =5)
TGFB_Figure1_Mar <- cds.list[["TGFB"]][row.names(subset(fData(cds.list[["TGFB"]]), gene_short_name %in%
c("CDH1","CRB3","DSP", "CDH2","FN1","VIM"))),]
plot_genes_in_pseudotime(TGFB_Figure1_Mar, color_by = "spatial_id", ncol = 2, min_expr = 0.1,
panel_order = c("CDH1","CDH2","CRB3","FN1","DSP","VIM")) +
xlab("Pseudospace") +
theme(legend.position = "none",text=element_text(size=20)) +
scale_color_manual(values = c("inner" = "#70163C","outer"="#38726C")) +
ggsave("MCF10A_TGFB_CFG_Figure1Markers_byPseudospace.png", width = 6, height =5)
# Use dynamic time warping to align Mock and TGFB pseudospatial trajectories and create a cds object of aligned trajectories
TGFB.to.Mock.CFG.aligned.cds <- getDTWcds(cds.list[["TGFB"]],cds.list[["Mock"]],
ref = "Mock", query = "TGFB",
expressed_genes = expressed_genes, cores = 1)
TGFB.to.Mock.CFG.aligned.cds <- estimateSizeFactors(TGFB.to.Mock.CFG.aligned.cds)
# Divide the aligned cds by treatment to test accumulation of knockouts along pseudospace independently
cds.aligned.list <- list()
cds.aligned.list[["Mock"]] <- TGFB.to.Mock.CFG.aligned.cds[,pData(TGFB.to.Mock.CFG.aligned.cds)$Cell.Type == "Mock"]
cds.aligned.list[["TGFB"]] <- TGFB.to.Mock.CFG.aligned.cds[,pData(TGFB.to.Mock.CFG.aligned.cds)$Cell.Type == "TGFB"]
for(sample in names(cds.aligned.list)){
cds.aligned.list[[sample]] <- preprocess_cds(cds.aligned.list[[sample]])
}
for(sample in names(cds.aligned.list)){
cds.aligned.list[[sample]]@reducedDimA <- t(as.matrix(pData(cds.aligned.list[[sample]])$Pseudotime))
colnames(cds.aligned.list[[sample]]@reducedDimA) <- row.names(pData(cds.aligned.list[[sample]]))
}
for(sample in names(cds.aligned.list)){
cds.aligned.list[[sample]] <- clusterCells(cds.aligned.list[[sample]], method = "densityPeak")
}
plot_rho_delta(cds.aligned.list[["Mock"]],rho_threshold = 50, delta_threshold = 5) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 50)) +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10))
plot_rho_delta(cds.aligned.list[["TGFB"]], rho_threshold = 10, delta_threshold = 5) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 50)) +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10))
rho_delta.list <- list()
rho_delta.list[["Mock"]] <- c(50,5)
rho_delta.list[["TGFB"]] <- c(10,5)
for(sample in names(cds.aligned.list)){
cds.aligned.list[[sample]] <- clusterCells(cds.aligned.list[[sample]], method = "densityPeak",
verbose = T,
rho_threshold = rho_delta.list[[sample]][1],
delta_threshold = rho_delta.list[[sample]][2],
skip_rho_sigma = T)
}
ggplot(pData(cds.aligned.list[["Mock"]]), aes(x = Pseudotime, fill = Cluster)) +
geom_density() + monocle:::monocle_theme_opts()
ggplot(pData(cds.aligned.list[["TGFB"]]), aes(x = Pseudotime, fill = Cluster)) +
geom_density() + monocle:::monocle_theme_opts()
# Re-order regions to be in order from low to high pseudospace
region.list <- list()
region.list[["Mock"]] <- sapply(pData(cds.aligned.list[["Mock"]])$Cluster, function(x){
if(x == "3")return("1")
if(x == "6")return("2")
if(x == "7")return("3")
if(x == "1")return("4")
if(x == "4")return("5")
if(x == "5")return("6")
if(x == "2")return("7")
})
region.list[["TGFB"]] <- sapply(pData(cds.aligned.list[["TGFB"]])$Cluster, function(x){
if(x == "3")return("1")
if(x == "7")return("2")
if(x == "8")return("3")
if(x == "6")return("4")
if(x == "4")return("5")
if(x == "1")return("6")
if(x == "5")return("7")
if(x == "2")return("8")
})
for(sample in names(cds.aligned.list)){
pData(cds.aligned.list[[sample]])$region <- region.list[[sample]]
}
ggplot(pData(cds.aligned.list[["Mock"]]), aes(x = Pseudotime, fill = region)) +
geom_density() + monocle:::monocle_theme_opts()
ggplot(pData(cds.aligned.list[["TGFB"]]), aes(x = Pseudotime, fill = region)) +
geom_density() + monocle:::monocle_theme_opts()
mock_regions <- sapply(row.names(pData(cds.list[["Mock"]])), function(x){
return(pData(cds.aligned.list[["Mock"]])[x,]$region)
})
tgfb_regions <- sapply(row.names(pData(cds.list[["TGFB"]])), function(x){
return(pData(cds.aligned.list[["TGFB"]])[x,]$region)
})
pData(cds.list[["Mock"]])$region <- mock_regions
pData(cds.list[["TGFB"]])$region <- tgfb_regions
analysis.guides = list()
for(sample in names(cds.aligned.list)){
pData(cds.aligned.list[[sample]]) %>% filter(guide_count == 1) %>% group_by(gene, barcode) %>%
summarize(n.guide.cells = n()) %>% group_by(gene) %>% mutate(n.target.cells = sum(n.guide.cells)) %>%
filter(n.guide.cells >= 10) %>% ungroup() %>%
arrange(-n.target.cells, -n.guide.cells) %>% head(10)
analysis.guides[[sample]] =
(pData(cds.aligned.list[[sample]]) %>% filter(guide_count == 1) %>% group_by(gene, barcode) %>%
summarize(n.guide.cells = n()) %>% group_by(gene) %>% mutate(n.target.cells = sum(n.guide.cells)) %>%
filter(n.guide.cells >= 10) %>% ungroup())$barcode
}
analysis.targets = list()
analysis.targets[["Mock"]] = as.data.frame(pData(cds.aligned.list[["Mock"]]) %>% filter(gene != "NONTARGETING") %>%
group_by(gene) %>% summarize(
n.cells = n(),
n.guides = length(intersect(unique(barcode), analysis.guides[["Mock"]]))) %>%
filter(n.cells >= 15, n.guides >= 1) %>% dplyr::select(gene))[,1]
analysis.targets[["TGFB"]] = as.data.frame(pData(cds.aligned.list[["TGFB"]]) %>% filter(gene != "NONTARGETING") %>%
group_by(gene) %>% summarize(
n.cells = n(),
n.guides = length(intersect(unique(barcode), analysis.guides[["TGFB"]]))) %>%
filter(n.cells >= 15, n.guides >= 1) %>% dplyr::select(gene))[,1]
target.region.mat = list()
target.region.mat[["Mock"]] = acast(
pData(cds.aligned.list[["Mock"]]) %>%
filter(barcode %in% analysis.guides[["Mock"]] | gene == "NONTARGETING") %>%
mutate(dummy = 1) %>% dplyr::select(gene, region, dummy),
gene ~ region, value.var = "dummy", fun.aggregate = sum, fill = 0)
target.region.mat[["TGFB"]] = acast(
pData(cds.aligned.list[["TGFB"]]) %>%
filter(barcode %in% analysis.guides[["TGFB"]] | gene == "NONTARGETING") %>%
mutate(dummy = 1) %>% dplyr::select(gene, region, dummy),
gene ~ region, value.var = "dummy", fun.aggregate = sum, fill = 0)
target.to.guide.map <- list()
for (target in analysis.targets[["Mock"]]) {
target.to.guide.map[["Mock"]][[target]] =
sort(unique(as.data.frame(pData(cds.aligned.list[["Mock"]]) %>%
filter(gene == target, barcode %in% analysis.guides[["Mock"]]) %>%
dplyr::select(barcode))[, 1]))
}
for (target in analysis.targets[["TGFB"]]) {
target.to.guide.map[["TGFB"]][[target]] =
sort(unique(as.data.frame(pData(cds.aligned.list[["TGFB"]]) %>%
filter(gene == target, barcode %in% analysis.guides[["TGFB"]]) %>%
dplyr::select(barcode))[, 1]))
}
NTC.guides <- unique(pData(cds.aligned.list[["Mock"]])[pData(cds.aligned.list[["Mock"]])$gene == "NONTARGETING",]$barcode)
guide.region.mat = list()
guide.region.mat[["Mock"]] = acast(
pData(cds.aligned.list[["Mock"]]) %>% filter(barcode %in% analysis.guides[["Mock"]][!(analysis.guides[["Mock"]] %in% NTC.guides)]) %>%
mutate(dummy = 1) %>% dplyr::select(barcode, region, dummy),
barcode ~ region, value.var = "dummy", fun.aggregate = sum, fill = 0)
guide.region.mat[["TGFB"]] = acast(
pData(cds.aligned.list[["TGFB"]]) %>% filter(barcode %in% analysis.guides[["TGFB"]][!(analysis.guides[["TGFB"]] %in% NTC.guides)]) %>%
mutate(dummy = 1) %>% dplyr::select(barcode, region, dummy),
barcode ~ region, value.var = "dummy", fun.aggregate = sum, fill = 0)
ntc.distribution = list()
for(sample in names(cds.aligned.list)){
ntc.distribution[[sample]] = target.region.mat[[sample]]["NONTARGETING",]
ntc.distribution[[sample]] = ntc.distribution[[sample]] / sum(ntc.distribution[[sample]])
}
weighted.target.region.mat <- list()
for (condition in c("Mock", "TGFB")) {
weighted.target.region.mat[[condition]] = t(sapply(analysis.targets[[condition]],
function(target) {
guides = target.to.guide.map[[condition]][[target]]
if (length(guides) == 1) {
return(target.region.mat[[condition]][target,])
} else {
mat = guide.region.mat[[condition]][guides,]
guide.weights = get.guide.weights(mat, ntc.distribution[[condition]])
guide.weights = guide.weights / max(guide.weights)
print(condition)
print(target)
print(round(guide.weights, 3))
#return(target.cluster.mat[[condition]][target,])
return(round(colSums(sweep(mat, 1, guide.weights, "*"))))
}
}))
}
NTC.region.mat <- list()
NTC.region.mat[["Mock"]] <- matrix(target.region.mat[["Mock"]][row.names(target.region.mat[["Mock"]]) == "NONTARGETING",],
nrow = 1)
row.names(NTC.region.mat[["Mock"]]) <- "NONTARGETING"
NTC.region.mat[["TGFB"]] <- matrix(target.region.mat[["TGFB"]][row.names(target.region.mat[["TGFB"]]) == "NONTARGETING",],
nrow = 1)
row.names(NTC.region.mat[["TGFB"]]) <- "NONTARGETING"
weighted.target.region.mat[["Mock"]] <- rbind(weighted.target.region.mat[["Mock"]],
NTC.region.mat[["Mock"]])
weighted.target.region.mat[["TGFB"]] <- rbind(weighted.target.region.mat[["TGFB"]],
NTC.region.mat[["TGFB"]])
median((pData(cds.aligned.list[["TGFB"]]) %>%
group_by(gene) %>%
summarize(n = n()))$n)
row.names(weighted.target.region.mat[["Mock"]])
# Calculate empirical FDR
chisq_qval.list <- calculate_ntc_empirical_fdr(cds.aligned.list, 1000)
mock_chisq_pval.list <- lapply(chisq_qval.list, `[[`, 1)
tgfb_chisq_pval.list <- lapply(chisq_qval.list, `[[`, 2)
mock_chisq_pval_df <- do.call("rbind",mock_chisq_pval.list)
mock_chisq_pval_df <- t(mock_chisq_pval_df)
tgfb_chisq_pval_df <- do.call("rbind",tgfb_chisq_pval.list)
tgfb_chisq_pval_df <- t(tgfb_chisq_pval_df)
mock_chisq_pval_df_test <- as.data.frame(mock_chisq_pval_df)
tgfb_chisq_pval_df_test <- as.data.frame(tgfb_chisq_pval_df)
mock_chisq_pval_NTC_df <- t(mock_chisq_pval_df_test["NTC_decoy",])
colnames(mock_chisq_pval_NTC_df) <- "mock_NTC_decoy"
tgfb_chisq_pval_NTC_df <- t(tgfb_chisq_pval_df_test["NTC_decoy",])
colnames(tgfb_chisq_pval_NTC_df) <- "tgfb_NTC_decoy"
mock_chisq_pval_df_test["NTC_decoy",]
chisq_pval_NTC_df <- merge(mock_chisq_pval_NTC_df,tgfb_chisq_pval_NTC_df, by = "row.names")
met <- tgfb_chisq_pval_df[22,]
length(met[met < 0.005])
ggplot(chisq_pval_NTC_df, aes(x = mock_NTC_decoy)) +
geom_histogram() +
xlim(0,1)
ggplot(chisq_pval_NTC_df, aes(x = tgfb_NTC_decoy)) +
geom_histogram() +
xlim(0,1)
mock_empirical_FDR_df <- apply(mock_chisq_pval_df,2,function(x){
x >= x["NTC_decoy"]
})
tgfb_empirical_FDR_df <- apply(tgfb_chisq_pval_df,2,function(x){
x >= x["NTC_decoy"]
})
mock_empirical_FDR <- rowSums(mock_empirical_FDR_df)/1000
tgfb_empirical_FDR <- rowSums(tgfb_empirical_FDR_df)/1000
empirical_FDR_df <- as.data.frame(cbind(names(mock_empirical_FDR),mock_empirical_FDR,tgfb_empirical_FDR))
empirical_FDR_df <- melt(empirical_FDR_df)
colnames(empirical_FDR_df) <- c("target", "mock_FDR","TGFB_FDR")
empirical_FDR_df$target <- as.character(empirical_FDR_df$target)
empirical_FDR_df$mock_FDR <- as.numeric(as.character(empirical_FDR_df$mock_FDR))
empirical_FDR_df$TGFB_FDR <- as.numeric(as.character(empirical_FDR_df$TGFB_FDR))
# Plot FDR by KO at the target and individual guide levels
empirical_FDR_df <- empirical_FDR_df[order(empirical_FDR_df$mock_FDR, decreasing = FALSE),]
empirical_FDR_df$target <- factor(empirical_FDR_df$target, levels = empirical_FDR_df$target)
ggplot(empirical_FDR_df,aes( x = as.factor(target), y = mock_FDR, fill = mock_FDR < 0.1)) +
geom_bar(stat = "identity") +
geom_hline(yintercept = 0.1, linetype = "dashed", color = "dimgrey") +
xlab("Target") +
ylab("FDR\n(empirically determined)") +
ggtitle("Spontaneous EMT") +
scale_fill_manual("FDR < 0.1", values = c("TRUE" = "red","FALSE" = "black")) +
theme(text = element_text(size = 6),
axis.text.x = element_text(angle = 90, hjust = 1),
plot.title = element_text(hjust = 0.5)) +
monocle:::monocle_theme_opts() +
ggsave("Mock_empirical_target_level_FDR.png", height = 2, width = 4, units = "in")
empirical_FDR_df <- empirical_FDR_df[order(empirical_FDR_df$TGFB_FDR, decreasing = FALSE),]
empirical_FDR_df$target <- factor(empirical_FDR_df$target, levels = empirical_FDR_df$target)
ggplot(empirical_FDR_df,aes( x = as.factor(target), y = TGFB_FDR, fill = TGFB_FDR < 0.1)) +
geom_bar(stat = "identity") +
geom_hline(yintercept = 0.1, linetype = "dashed", color = "dimgrey") +
xlab("Target") +
ylab("FDR\n(empirically determined)") +
ggtitle("TGF-B-driven EMT") +
scale_fill_manual("FDR < 0.1",values = c("TRUE" = "red","FALSE" = "black")) +
theme(text = element_text(size = 6),
axis.text.x = element_text(angle = 90, hjust = 1),
plot.title = element_text(hjust = 0.5)) +
monocle:::monocle_theme_opts() +
ggsave("TGFB_empirical_target_level_FDR.png", height = 2, width = 4, units = "in")
NTC.region.p = list()
for(sample in names(cds.aligned.list)){
NTC.region.p[[sample]] <- pData(cds.aligned.list[[sample]]) %>%
group_by(region) %>%
filter(gene == "NONTARGETING") %>%
summarize(n = n()) %>%
complete(region, fill = list(n = 0.1))
}
pass.target.level.screen = list()
pass.target.level.screen[["Mock"]] <- as.character(empirical_FDR_df[empirical_FDR_df$mock_FDR < 0.1,]$target)
pass.target.level.screen[["TGFB"]] <- as.character(empirical_FDR_df[empirical_FDR_df$TGFB_FDR < 0.1,]$target)
print(pass.target.level.screen[["Mock"]])
print(pass.target.level.screen[["TGFB"]])
targets.passing.initial.screen = list()
for(sample in names(cds.aligned.list)){
targets.passing.initial.screen[[sample]] = pass.target.level.screen[[sample]]
}
length(targets.passing.initial.screen[["Mock"]])
length(targets.passing.initial.screen[["TGFB"]])
length(intersect(targets.passing.initial.screen[["Mock"]],
targets.passing.initial.screen[["TGFB"]]))
length(unique(union(targets.passing.initial.screen[["Mock"]],
targets.passing.initial.screen[["TGFB"]])))
# Re-weigh guides for calculating log2 odds ratios and for plotting enrichemnt heatmaps
weighted.target.region.mat = list()
for (condition in c("Mock", "TGFB")) {
weighted.target.region.mat[[condition]] = t(sapply(targets.passing.initial.screen[[condition]],
function(target) {
guides = target.to.guide.map[[condition]][[target]]
if (length(guides) == 1) {
return(target.region.mat[[condition]][target,])
} else {
mat = guide.region.mat[[condition]][guides,]
guide.weights = get.guide.weights(mat, ntc.distribution[[condition]])
guide.weights = guide.weights / max(guide.weights)
print(condition)
print(target)
print(round(guide.weights, 3))
#return(target.cluster.mat[[condition]][target,])
return(round(colSums(sweep(mat, 1, guide.weights, "*"))))
}
}))
}
region.enrichment.df = list()
for (condition in c("Mock", "TGFB")) {
weighted.mat = weighted.target.region.mat[[condition]]
ntc.counts = target.region.mat[[condition]]["NONTARGETING",]
region.enrichment.df[[condition]] = do.call(rbind, lapply(rownames(weighted.mat), function(target) {
do.call(rbind, lapply(1:ncol(weighted.mat), function(region) {
test = fisher.test(cbind(
c(weighted.mat[target, region], sum(weighted.mat[target, -region])),
c(ntc.counts[region], sum(ntc.counts[-region]))))
data.frame(
target = target,
region = region,
odds.ratio = unname(test$estimate),
p.value = test$p.value)
}))
}))
region.enrichment.df[[condition]]$q.value = p.adjust(region.enrichment.df[[condition]]$p.value, "BH")
region.enrichment.df[[condition]]$log2.odds = with(region.enrichment.df[[condition]],
ifelse(odds.ratio == 0, -5, round(log2(odds.ratio),2)))
}
region.enrichment.df[["Mock"]]$target <- as.character(region.enrichment.df[["Mock"]]$target)
region.enrichment.df[["TGFB"]]$target <- as.character(region.enrichment.df[["TGFB"]]$target)
region.enrichment.heatmap.df <- region.enrichment.df
for(sample in names(region.enrichment.heatmap.df)){
region.enrichment.heatmap.df[[sample]]$log2.odds[region.enrichment.heatmap.df[[sample]]$log2.odds < -2] <- -2
region.enrichment.heatmap.df[[sample]]$log2.odds[region.enrichment.heatmap.df[[sample]]$log2.odds > 2] <- 2
}
region.enrichment.heatmap.matrix <- list()
for(sample in names(region.enrichment.heatmap.df)){
region.enrichment.heatmap.matrix[[sample]] <- recast(region.enrichment.heatmap.df[[sample]],
target ~ region, measure.var = "log2.odds")
row.names(region.enrichment.heatmap.matrix[[sample]]) <- region.enrichment.heatmap.matrix[[sample]]$target
region.enrichment.heatmap.matrix[[sample]] <- region.enrichment.heatmap.matrix[[sample]][,-1]
}
# Plot enrichment heatmaps
pheatmap(region.enrichment.heatmap.matrix[["Mock"]],
clustering_method = "ward.D2",
show_rownames = T,
show_colanmes = T,
na_col = "grey90",
col = colorspace::diverge_hsv(30),
cluster_col= FALSE,
cluster_row = TRUE,
useRaster = TRUE,
width = 4,
height = 6,
file = "Mock_region_enrichment_heatmap.png")
pheatmap(region.enrichment.heatmap.matrix[["TGFB"]],
clustering_method = "ward.D2",
show_rownames = T,
show_colanmes = T,
na_col = "grey90",
col = colorspace::diverge_hsv(30),
cluster_col= FALSE,
cluster_row = TRUE,
useRaster = TRUE,
width = 4,
height = 6,
file = "TGFB_region_enrichment_heatmap.png")
# Highlight examples of accumulation across pseudospace for EGFR and MET in spontaneous EMT and TGFBRs in TGF-B driven EMT
region_3_min_pseudospace_value <- min(pData(cds.aligned.list[["Mock"]])[pData(cds.aligned.list[["Mock"]])$region == "3",]$Pseudotime)
region_3_max_pseudospace_value <- max(pData(cds.aligned.list[["Mock"]])[pData(cds.aligned.list[["Mock"]])$region == "3",]$Pseudotime)
ggplot(pData(cds.aligned.list[["Mock"]])[pData(cds.aligned.list[["Mock"]])$gene %in% c("NONTARGETING", "EGFR", "MET"),],
aes(x = Pseudotime, fill = gene)) +
geom_rect(xmin = region_3_min_pseudospace_value, xmax = region_3_max_pseudospace_value, ymin = 0, ymax = Inf,
fill = "slategray1", alpha = 0.01) +
geom_density() +
facet_wrap(~factor(gene, levels = c("EGFR", "MET","NONTARGETING")), scales = "free_y", ncol = 1) +
theme(legend.position = "none", strip.text.x=element_text(size=18),
axis.text.x = element_text(size=10), axis.text.y = element_text(size=10),
axis.title.x = element_text(size=18), axis.title.y = element_text(size=18)) +
scale_fill_manual(values = c("EGFR" = "firebrick3", "MET" = "brown4", "TGFBR2" = "navy", "NONTARGETING" = "dimgrey")) +
monocle:::monocle_theme_opts() +
xlab("Pseudospace") +
ggsave("Density_of_EGFR_MET_NTC_across_spontaneous_EMT.png", width = 5, height = 10)
region_4_max_pseudospace_value <- max(pData(cds.aligned.list[["TGFB"]])[pData(cds.aligned.list[["TGFB"]])$region == "4",]$Pseudotime)
ggplot(pData(cds.aligned.list[["TGFB"]])[pData(cds.aligned.list[["TGFB"]])$gene %in% c("NONTARGETING", "TGFBR1", "TGFBR2"),],
aes(x = Pseudotime, fill = gene)) +
geom_rect(xmin = 0, xmax = region_4_max_pseudospace_value, ymin = 0, ymax = Inf,
fill = "slategray1", alpha = 0.01) +
geom_density() +
facet_wrap(~factor(gene, levels = c("TGFBR2", "TGFBR1", "NONTARGETING")), scales = "free_y", ncol = 1) +
theme(legend.position = "none", strip.text.x=element_text(size=18),
axis.text.x = element_text(size=10), axis.text.y = element_text(size=10),
axis.title.x = element_text(size=18), axis.title.y = element_text(size=18)) +
scale_fill_manual(values = c("TGFBR2" = "firebrick3", "TGFBR1" = "brown4", "ITGAV" = "navy", "NONTARGETING" = "dimgrey")) +
monocle:::monocle_theme_opts() +
xlab("Pseudospace") +
ggsave("Density_of_TGFBR1_TGFBR2_NTC_across_TGFB_driven_EMT.png", width = 5, height = 10)
# FOr supplemental
# Determine the fraction of CDH1 single positive, CDH1/VIM double positive and VIM single positive cells upon confluence dependent EMT
mock_CDH1_VIM_cds_subset <- cds.aligned.list[["Mock"]][fData(cds.aligned.list[["Mock"]])$gene_short_name %in% c("CDH1","VIM"),
!is.na(pData(cds.aligned.list[["Mock"]])$region)]
mock_CDH1_VIM_cds_exprs <- Biobase::exprs(mock_CDH1_VIM_cds_subset)
mock_CDH1_VIM_cds_exprs <- Matrix::t(Matrix::t(mock_CDH1_VIM_cds_exprs)/sizeFactors(mock_CDH1_VIM_cds_subset))
tgfb_CDH1_VIM_cds_subset <- cds.aligned.list[["TGFB"]][fData(cds.aligned.list[["TGFB"]])$gene_short_name %in% c("CDH1","VIM"),
!is.na(pData(cds.aligned.list[["TGFB"]])$region)]
tgfb_CDH1_VIM_cds_exprs <- Biobase::exprs(tgfb_CDH1_VIM_cds_subset)
tgfb_CDH1_VIM_cds_exprs <- Matrix::t(Matrix::t(tgfb_CDH1_VIM_cds_exprs)/sizeFactors(tgfb_CDH1_VIM_cds_subset))
CDH1_expression_cutoff <- mean(mock_CDH1_VIM_cds_exprs[2,])
VIM_expression_cutoff <- mean(mock_CDH1_VIM_cds_exprs[1,])
CDH1_expression_cutoff
VIM_expression_cutoff
mock_CDH1_VIM_double_positive_cells <- colnames(mock_CDH1_VIM_cds_exprs[,mock_CDH1_VIM_cds_exprs[1,] > VIM_expression_cutoff &
mock_CDH1_VIM_cds_exprs[2,] > CDH1_expression_cutoff])
mock_CDH1_positive_cells <- colnames(mock_CDH1_VIM_cds_exprs[,mock_CDH1_VIM_cds_exprs[2,] > CDH1_expression_cutoff &
!(colnames(mock_CDH1_VIM_cds_exprs) %in% mock_CDH1_VIM_double_positive_cells)])
mock_VIM_positive_cells <- colnames(mock_CDH1_VIM_cds_exprs[,mock_CDH1_VIM_cds_exprs[1,] > VIM_expression_cutoff &
!(colnames(mock_CDH1_VIM_cds_exprs) %in% mock_CDH1_VIM_double_positive_cells)])
tgfb_CDH1_VIM_double_positive_cells <- colnames(tgfb_CDH1_VIM_cds_exprs[,tgfb_CDH1_VIM_cds_exprs[1,] > VIM_expression_cutoff &
tgfb_CDH1_VIM_cds_exprs[2,] > CDH1_expression_cutoff])
tgfb_CDH1_positive_cells <- colnames(tgfb_CDH1_VIM_cds_exprs[,tgfb_CDH1_VIM_cds_exprs[2,] > CDH1_expression_cutoff &
!(colnames(tgfb_CDH1_VIM_cds_exprs) %in% tgfb_CDH1_VIM_double_positive_cells)])
tgfb_VIM_positive_cells <- colnames(tgfb_CDH1_VIM_cds_exprs[,tgfb_CDH1_VIM_cds_exprs[1,] > VIM_expression_cutoff &
!(colnames(tgfb_CDH1_VIM_cds_exprs) %in% tgfb_CDH1_VIM_double_positive_cells)])
mock_pData <- pData(cds.aligned.list[["Mock"]][,!is.na(pData(cds.aligned.list[["Mock"]])$region)])
mock_pData$positive_marker <- sapply(mock_pData$cell, function(x){
if(x %in% mock_CDH1_VIM_double_positive_cells){
return("CDH1/VIM double positive")
}
if(x %in% mock_CDH1_positive_cells){
return("CDH1 single positive")
}
if(x %in% mock_VIM_positive_cells){
return("VIM single positive")
}
return(NA)
})
tgfb_pData <- pData(cds.aligned.list[["TGFB"]][,!is.na(pData(cds.aligned.list[["TGFB"]])$region)])
tgfb_pData$positive_marker <- sapply(tgfb_pData$cell, function(x){
if(x %in% tgfb_CDH1_VIM_double_positive_cells){
return("CDH1/VIM double positive")
}
if(x %in% tgfb_CDH1_positive_cells){
return("CDH1 single positive")
}
if(x %in% tgfb_VIM_positive_cells){
return("VIM single positive")
}
return(NA)
})
ggplot(pData(cds.aligned.list[["Mock"]])[mock_CDH1_positive_cells,], aes(x = Pseudotime)) +
geom_density(fill = "gray70", color = "black") +
theme_classic() +
monocle:::monocle_theme_opts() +
xlab("Pseudospace") +
ylab("Cell density") +
ggtitle("CDH1 single positive cells") +
theme(legend.position = "none",
axis.title.y = element_text(size = 14),axis.title.x = element_text(size = 24),
axis.text.y = element_text(size = 14), plot.title = element_text(size = 24, hjust = 0.5)) +
ggsave("Mock_loss_of_function_cell_density_accross_pseudospace_CDH1_positive_cells_geom_density.png", height = 3, width = 6)
ggplot(pData(cds.aligned.list[["Mock"]])[mock_CDH1_VIM_double_positive_cells,], aes(x = Pseudotime)) +
geom_density(fill = "gray70", color = "black") +
theme_classic() +
monocle:::monocle_theme_opts() +
xlab("Pseudospace") +
ylab("Cell density") +
ggtitle("CDH1/VIM double positive cells") +
theme(legend.position = "none",
axis.title.y = element_text(size = 14),axis.title.x = element_text(size = 24),
axis.text.y = element_text(size = 14), plot.title = element_text(size = 24, hjust = 0.5)) +
ggsave("Mock_loss_of_function_cell_density_accross_pseudospace_CDH1_VIM_double_positive_cells_geom_density.png", height = 3, width = 6)
ggplot(pData(cds.aligned.list[["Mock"]])[mock_VIM_positive_cells,], aes(x = Pseudotime)) +
geom_density(fill = "gray70", color = "black") +
theme_classic() +
monocle:::monocle_theme_opts() +
xlab("Pseudospace") +
ylab("Cell density") +
ggtitle("VIM single positive cells") +
theme(legend.position = "none",
axis.title.y = element_text(size = 14), axis.title.x = element_text(size = 24),
axis.text.y = element_text(size = 14), plot.title = element_text(size = 24, hjust = 0.5)) +
ggsave("Mock_loss_of_function_cell_density_accross_pseudospace_VIM_positive_cells_geom_density.png", height = 3, width = 6)
ggplot(pData(cds.aligned.list[["TGFB"]])[tgfb_CDH1_positive_cells,], aes(x = Pseudotime)) +
geom_density(fill = "gray70", color = "black") +
theme_classic() +
monocle:::monocle_theme_opts() +
xlab("Pseudospace") +
ylab("Cell density") +
ggtitle("CDH1 single positive cells") +
theme(legend.position = "none",
axis.title.y = element_text(size = 14),axis.title.x = element_text(size = 24),
axis.text.y = element_text(size = 14), plot.title = element_text(size = 24, hjust = 0.5)) +
ggsave("TGFB_loss_of_function_cell_density_accross_pseudospace_CDH1_positive_cells_geom_density.png", height = 3, width = 6)
ggplot(pData(cds.aligned.list[["TGFB"]])[tgfb_CDH1_VIM_double_positive_cells,], aes(x = Pseudotime)) +
geom_density(fill = "gray70", color = "black") +
theme_classic() +
monocle:::monocle_theme_opts() +
xlab("Pseudospace") +
ylab("Cell density") +
ggtitle("CDH1/VIM double positive cells") +
theme(legend.position = "none",
axis.title.y = element_text(size = 14),axis.title.x = element_text(size = 24),
axis.text.y = element_text(size = 14), plot.title = element_text(size = 24, hjust = 0.5)) +
ggsave("TGFB_loss_of_function_cell_density_accross_pseudospace_CDH1_VIM_double_positive_cells_geom_density.png", height = 3, width = 6)
ggplot(pData(cds.aligned.list[["TGFB"]])[tgfb_VIM_positive_cells,], aes(x = Pseudotime)) +
geom_density(fill = "gray70", color = "black") +
theme_classic() +
monocle:::monocle_theme_opts() +
xlab("Pseudospace") +
ylab("Cell density") +
ggtitle("VIM single positive cells") +
theme(legend.position = "none",
axis.title.y = element_text(size = 14), axis.title.x = element_text(size = 24),
axis.text.y = element_text(size = 14), plot.title = element_text(size = 24, hjust = 0.5)) +
ggsave("TGFB_loss_of_function_cell_density_accross_pseudospace_VIM_positive_cells_geom_density.png", height = 3, width = 6)
# Identify differentially expressed genes across Pseudospace for every ko vs NTC combination
mock_target_NTC_diff_test.list <- list()
for(target in analysis.targets[["Mock"]]){
message("Obtaining differentially expressed genes between ",target, " and NTC")
subset_cds <- cds.aligned.list[["Mock"]][,pData(cds.aligned.list[["Mock"]])$gene == target |
pData(cds.aligned.list[["Mock"]])$gene == "NONTARGETING"]
subset_cds <- estimateDispersions(subset_cds)
mock_target_NTC_diff_test.list[[target]] <- myDifferentialGeneTest(subset_cds[expressed_genes.list[["Mock"]]],
fullModelFormulaStr = "~sm.ns(Pseudotime, df=3)+gene",
reducedModelFormulaStr = "~sm.ns(Pseudotime, df=3)",
cores = 1)
rm(subset_cds)
message("Done")
}
tgfb_target_NTC_diff_test.list <- list()
for(target in analysis.targets[["TGFB"]]){
message("Obtaining differentially expressed genes between ",target, " and NTC")
subset_cds <- cds.aligned.list[["TGFB"]][,pData(cds.aligned.list[["TGFB"]])$gene == target |
pData(cds.aligned.list[["TGFB"]])$gene == "NONTARGETING"]
subset_cds <- estimateDispersions(subset_cds)
tgfb_target_NTC_diff_test.list[[target]] <- myDifferentialGeneTest(subset_cds[expressed_genes.list[["TGFB"]]],
fullModelFormulaStr = "~sm.ns(Pseudotime, df=3)+gene",
reducedModelFormulaStr = "~sm.ns(Pseudotime, df=3)",
cores = 1)
rm(subset_cds)
message("Done")
}
for(target in names(mock_target_NTC_diff_test.list)){
mock_target_NTC_diff_test.list[[target]]$target <- rep(target,
length(row.names(mock_target_NTC_diff_test.list[[target]])))
}
for(target in names(tgfb_target_NTC_diff_test.list)){
tgfb_target_NTC_diff_test.list[[target]]$target <- rep(target,
length(row.names(tgfb_target_NTC_diff_test.list[[target]])))
}
mock_target_NTC_diff_test <- do.call("rbind", mock_target_NTC_diff_test.list)
tgfb_target_NTC_diff_test <- do.call("rbind", tgfb_target_NTC_diff_test.list)
mock_target_NTC_diff_test$qval <- p.adjust(mock_target_NTC_diff_test$pval, method = "BH")
tgfb_target_NTC_diff_test$qval <- p.adjust(tgfb_target_NTC_diff_test$pval, method = "BH")
mock_target_NTC_sig_genes <- unique(subset(mock_target_NTC_diff_test, qval < 0.05)$id)
length(mock_target_NTC_sig_genes)
tgfb_target_NTC_sig_genes <- unique(subset(tgfb_target_NTC_diff_test, qval < 0.05)$id)
length(tgfb_target_NTC_sig_genes)
mock_target_NTC_dif_test_sig_subset <- mock_target_NTC_diff_test[mock_target_NTC_diff_test$id %in% mock_target_NTC_sig_genes,]
tgfb_target_NTC_dif_test_sig_subset <- tgfb_target_NTC_diff_test[tgfb_target_NTC_diff_test$id %in% tgfb_target_NTC_sig_genes,]
mock_diff_test_summary <- mock_target_NTC_diff_test %>% filter(qval < 0.05) %>% group_by(target) %>%
summarize(n = n()) %>% arrange(desc(n))
colnames(mock_diff_test_summary) <- c("target","total_degs")
mock_cell_number_summary <- pData(cds.aligned.list[["Mock"]]) %>% filter(gene != "NONTARGETING") %>%
group_by(gene) %>% summarize(n = n()) %>% arrange(desc(n))
tgfb_diff_test_summary <- tgfb_target_NTC_diff_test %>% filter(qval < 0.05) %>% group_by(target) %>%
summarize(n = n()) %>% arrange(desc(n))
colnames(tgfb_diff_test_summary) <- c("target","total_degs")
tgfb_cell_number_summary <- pData(cds.aligned.list[["TGFB"]]) %>% filter(gene != "NONTARGETING") %>%
group_by(gene) %>% summarize(n = n()) %>% arrange(desc(n))
mock_summary_df <- merge(mock_diff_test_summary, mock_cell_number_summary, by.x = "target", by.y = "gene")
tgfb_summary_df <- merge(tgfb_diff_test_summary, tgfb_cell_number_summary, by.x = "target", by.y = "gene")
# Plot number of DEGs for every KO vs number of KO cells in the experiment
ggplot(mock_summary_df, aes(x = n, y = total_degs, label = target)) +
geom_point() +
monocle:::monocle_theme_opts() +
theme(text = element_text(size = 24)) +
xlab("Total number of target cells") +
ylab("Total number of DEGs") +
ggsave("Mock_CROPseq_TargetvsNTC_degs.png", width = 6, height = 6)
ggplot(tgfb_summary_df, aes(x = n, y = total_degs, label = target)) +
geom_point() +
monocle:::monocle_theme_opts() +
theme(text = element_text(size = 24)) +
xlab("Total number of target cells") +
ylab("Total number of DEGs") +
ggsave("TGFB_CROPseq_TargetvsNTC_degs.png", width = 6, height = 6)
ggplot(tgfb_summary_df[!(tgfb_summary_df$target %in% c("TGFBR1","TGFBR2")),], aes(x = n, y = total_degs, label = target)) +
geom_point() +
monocle:::monocle_theme_opts() +
theme(text = element_text(size = 24)) +
xlab("Total number of target cells") +
ylab("Total number of DEGs") +
ggsave("TGFB_CROPseq_TargetvsNTC_degs_woTGFRBs.png", width = 6, height = 6)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ibdData.R
\name{het.freq}
\alias{het.freq}
\title{Heterozygosity}
\usage{
het.freq(x, dim = c(1, 2))
}
\arguments{
\item{x}{ibdData object}
\item{dim}{interger for dimention}
}
\description{
Heterozygosity
}
|
/man/het.freq.Rd
|
no_license
|
QTCAT/AMPRIL
|
R
| false
| true
| 287
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ibdData.R
\name{het.freq}
\alias{het.freq}
\title{Heterozygosity}
\usage{
het.freq(x, dim = c(1, 2))
}
\arguments{
\item{x}{ibdData object}
\item{dim}{interger for dimention}
}
\description{
Heterozygosity
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scope.R
\name{scope_dir}
\alias{scope_dir}
\title{Scope to Directory}
\usage{
scope_dir(directory)
}
\arguments{
\item{directory}{The working directory to use.}
}
\description{
Sets the working directory, and resets it at the end
of the active scope.
}
\seealso{
Other scope-related functions: \code{\link{defer}},
\code{\link{scope_env_vars}}, \code{\link{scope_locale}},
\code{\link{scope_options}}, \code{\link{scope_path}}
}
|
/man/scope_dir.Rd
|
no_license
|
kevinushey/later
|
R
| false
| true
| 512
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scope.R
\name{scope_dir}
\alias{scope_dir}
\title{Scope to Directory}
\usage{
scope_dir(directory)
}
\arguments{
\item{directory}{The working directory to use.}
}
\description{
Sets the working directory, and resets it at the end
of the active scope.
}
\seealso{
Other scope-related functions: \code{\link{defer}},
\code{\link{scope_env_vars}}, \code{\link{scope_locale}},
\code{\link{scope_options}}, \code{\link{scope_path}}
}
|
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Two Dice Roll Distribution Viewer"),
# Sidebar with a slider input for the number of bins
sidebarLayout(
sidebarPanel(
htmlOutput("instruction"),
sliderInput("numberOfGames",
"Number of Games:",
min = 10,
max = 1000,
value = 50),
sliderInput("diceRollsPerGame",
"Dice roll per game",
min = 20,
max = 100,
value = 50),
htmlOutput("rethrowInfo"),
actionButton(label="Rethrow", inputId="rethrowDice")
),
# Show a plot of the generated distribution
mainPanel(
htmlOutput("introduction"),
plotOutput("diceRollMean"),
htmlOutput("summary1"),
plotOutput("diceRollData"),
htmlOutput("summary2")
)
)
))
|
/ui.R
|
no_license
|
robertpmatson/DevelopingDataProducts
|
R
| false
| false
| 912
|
r
|
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Two Dice Roll Distribution Viewer"),
# Sidebar with a slider input for the number of bins
sidebarLayout(
sidebarPanel(
htmlOutput("instruction"),
sliderInput("numberOfGames",
"Number of Games:",
min = 10,
max = 1000,
value = 50),
sliderInput("diceRollsPerGame",
"Dice roll per game",
min = 20,
max = 100,
value = 50),
htmlOutput("rethrowInfo"),
actionButton(label="Rethrow", inputId="rethrowDice")
),
# Show a plot of the generated distribution
mainPanel(
htmlOutput("introduction"),
plotOutput("diceRollMean"),
htmlOutput("summary1"),
plotOutput("diceRollData"),
htmlOutput("summary2")
)
)
))
|
#############################################
###############Whole DataSet#################
#############################################
require(arules)
load("~/Projects/MBA/new_data/RData/MASTER_MATRIX_CATEGORICAL.RData")
load("~/Projects/MBA/new_data/RData/MASTER_MATRIX_PERMUTED.RData")
b.t<-as(as.data.frame(m.final),"transactions")
b.t.perm<-as(as.data.frame(m.final.perm),"transactions")
drug.idx<-grep("IC50",labels(m.final)[[2]])
drugs<-labels(m.final)[[2]][drug.idx]
drugs.total<-c(paste(drugs,"Sensitive",sep="="),paste(drugs,"Resistant",sep="="))
rules<-apriori(b.t,parameter = list(support = (3/length(b.t)), confidence = 3/length(b.t), minlen=2, maxlen=2),appearance = list(rhs=drugs.total,default="lhs"))
temp.quality<-quality(rules)
temp.quality[,3]<-NA
temp.quality<-temp.quality[!duplicated(temp.quality), ]
gc()
for (i in 1:nrow(temp.quality)){
print(i)
rules.perm<-apriori(b.t.perm,parameter = list(support = temp.quality$support[i], confidence = temp.quality$confidence[i], minlen=2, maxlen=2))
if (length(rules.perm)==0) next
temp<-hist(log(quality(rules.perm)[,3]),breaks=80,plot=F)
lift.estim.0.05<-exp(temp$breaks[which(cumsum(temp$counts)>sum(temp$counts)*0.95)[1]])
temp.quality$lift[i]<-lift.estim.0.05
rm(rules.perm)
gc()
}
temp.quality$lift[temp.quality$lift<=1]<-NA
temp.quality$lift[is.na(temp.quality$lift)]<-min(temp.quality$lift,na.rm=T)
gc()
status<-rep(NA,length(rules))
quality.rules<-quality(rules)
require(arules)
for (i in 1:nrow(temp.quality)){
print(i)
temp.idx<-which(quality.rules$support==temp.quality$support[i] & quality.rules$confidence==temp.quality$confidence[i])
status[temp.idx]<-sapply (quality.rules$lift[temp.idx], function(lift) if(lift>temp.quality$lift[i]){temp.out<-"pass"}else{temp.out<-"fail"})
}
gc()
rules.sig<-rules[which(status=="pass")]
save(rules.sig,file="~/Projects/MBA/new_data/RData/RULES_SIGNIFICANT_Sup3_Conf3in1001_DYNAMIC_THRESHOLD_FDR0.05.RData")
rm(rules)
gc()
|
/scripts/script_dynamic_thresholding.R
|
no_license
|
kvougas/Vougas_DeepLearning
|
R
| false
| false
| 1,975
|
r
|
#############################################
###############Whole DataSet#################
#############################################
require(arules)
load("~/Projects/MBA/new_data/RData/MASTER_MATRIX_CATEGORICAL.RData")
load("~/Projects/MBA/new_data/RData/MASTER_MATRIX_PERMUTED.RData")
b.t<-as(as.data.frame(m.final),"transactions")
b.t.perm<-as(as.data.frame(m.final.perm),"transactions")
drug.idx<-grep("IC50",labels(m.final)[[2]])
drugs<-labels(m.final)[[2]][drug.idx]
drugs.total<-c(paste(drugs,"Sensitive",sep="="),paste(drugs,"Resistant",sep="="))
rules<-apriori(b.t,parameter = list(support = (3/length(b.t)), confidence = 3/length(b.t), minlen=2, maxlen=2),appearance = list(rhs=drugs.total,default="lhs"))
temp.quality<-quality(rules)
temp.quality[,3]<-NA
temp.quality<-temp.quality[!duplicated(temp.quality), ]
gc()
for (i in 1:nrow(temp.quality)){
print(i)
rules.perm<-apriori(b.t.perm,parameter = list(support = temp.quality$support[i], confidence = temp.quality$confidence[i], minlen=2, maxlen=2))
if (length(rules.perm)==0) next
temp<-hist(log(quality(rules.perm)[,3]),breaks=80,plot=F)
lift.estim.0.05<-exp(temp$breaks[which(cumsum(temp$counts)>sum(temp$counts)*0.95)[1]])
temp.quality$lift[i]<-lift.estim.0.05
rm(rules.perm)
gc()
}
temp.quality$lift[temp.quality$lift<=1]<-NA
temp.quality$lift[is.na(temp.quality$lift)]<-min(temp.quality$lift,na.rm=T)
gc()
status<-rep(NA,length(rules))
quality.rules<-quality(rules)
require(arules)
for (i in 1:nrow(temp.quality)){
print(i)
temp.idx<-which(quality.rules$support==temp.quality$support[i] & quality.rules$confidence==temp.quality$confidence[i])
status[temp.idx]<-sapply (quality.rules$lift[temp.idx], function(lift) if(lift>temp.quality$lift[i]){temp.out<-"pass"}else{temp.out<-"fail"})
}
gc()
rules.sig<-rules[which(status=="pass")]
save(rules.sig,file="~/Projects/MBA/new_data/RData/RULES_SIGNIFICANT_Sup3_Conf3in1001_DYNAMIC_THRESHOLD_FDR0.05.RData")
rm(rules)
gc()
|
require(foreign)
setwd("/Volumes/KINGSTON/AVA01/Untitled Folder/")
allFiles = list.files()
dbfs = grep("*.dbf",allFiles,ignore.case = TRUE)
allFiles = allFiles[dbfs]
for (i in allFiles){
name = sub(".dbf",ignore.case = T,replacement = "",x = i)
fileName = paste0(name,".csv")
write.csv(x = read.dbf(i),file = fileName)
}
|
/dbaseReader.R
|
no_license
|
Gelinator/FinancialDataMart
|
R
| false
| false
| 328
|
r
|
require(foreign)
setwd("/Volumes/KINGSTON/AVA01/Untitled Folder/")
allFiles = list.files()
dbfs = grep("*.dbf",allFiles,ignore.case = TRUE)
allFiles = allFiles[dbfs]
for (i in allFiles){
name = sub(".dbf",ignore.case = T,replacement = "",x = i)
fileName = paste0(name,".csv")
write.csv(x = read.dbf(i),file = fileName)
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 4036
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 4036
c
c Input Parameter (command line, file):
c input filename QBFLIB/Biere/Counter/cnt15e.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1519
c no.of clauses 4036
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 4036
c
c QBFLIB/Biere/Counter/cnt15e.qdimacs 1519 4036 E1 [] 0 15 1504 4036 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Biere/Counter/cnt15e/cnt15e.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 601
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 4036
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 4036
c
c Input Parameter (command line, file):
c input filename QBFLIB/Biere/Counter/cnt15e.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1519
c no.of clauses 4036
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 4036
c
c QBFLIB/Biere/Counter/cnt15e.qdimacs 1519 4036 E1 [] 0 15 1504 4036 NONE
|
# @useDynLib VALUE
# NULL
#' @title Compute p-value of the two sample Kolmogorov-Smirnov test
#' @description Function to compute the p-value of the K-S test, optionally performing a correction by the effective sanmple size
#' @template templateMeasureParams
#' @param corrected Logical flag. SHound the p-value be corrected by the effective sample size?. Default to \code{TRUE}
#' @param dates Ignored. Introduced for compatibility with the rest of measures
#' @return A float number corresponding to the p-value of the K-S test
#' @seealso The atomic function \code{\link{measure.ks}}, returning the KS statistic
#' @details The two-sample Kolmogorov-Smirnov test has the null hypothesis (H0) that x and y were drawn from the same continuous distribution.
#' Therefore, the null hypothesis can be rejected only when p-values obtained are \dQuote{small} (i.e. < 0.05 with ci=0.95). Larger values will indicate
#' the H0 can't be rejected.
#' Since the daily time series often used are serially correlated, this function calculates their effective sample size before estimating the p value of the
#' KS statistic in order to avoid the inflation of type I error (i.e. erroneous rejection of the H0). Under the assumption that the underlying time
#' series follow a first-order autoregressive process (Wilks 2006), the effective sample size, neff is defined as follows: neff=n(1-p1)/(1+p1), where p1 is the
#' lag-1 autocorrelation coefficient.
#'
#' @author J. Bedia, S. Brands
#' @keywords internal
#' @references Wilks, D. (2006) Statistical methods in the atmospheric sciences, 2nd ed. Elsevier, Amsterdam
#' @import stats
#' @export
measure.ks.pval <- function(indexObs = NULL, indexPrd = NULL, obs, prd, dates = NULL, corrected = TRUE) {
x <- prd[!is.na(prd)]
y <- obs[!is.na(obs)]
KSstatistic <- suppressWarnings(valueMeasure1D(obs = y, prd = x, measure.codes = "ts.ks"))
if (corrected) {
x.acf1 <- valueIndex1D(ts = x, index.codes = "AC1")
n.x = unname(length(x)*((1 - x.acf1)/(1 + x.acf1)))
y.acf1 <- valueIndex1D(ts = y, index.codes = "AC1")
n.y = unname(length(y)*((1 - y.acf1)/(1 + y.acf1)))
pval <- 1 - .Call(stats:::C_pSmirnov2x, KSstatistic, n.x, n.y)
} else {
pval <- unname(ks.test(y, x)$p.value)
}
return(pval)
}
|
/R/measure.ks.pval.R
|
no_license
|
SantanderMetGroup/VALUE
|
R
| false
| false
| 2,321
|
r
|
# @useDynLib VALUE
# NULL
#' @title Compute p-value of the two sample Kolmogorov-Smirnov test
#' @description Function to compute the p-value of the K-S test, optionally performing a correction by the effective sanmple size
#' @template templateMeasureParams
#' @param corrected Logical flag. SHound the p-value be corrected by the effective sample size?. Default to \code{TRUE}
#' @param dates Ignored. Introduced for compatibility with the rest of measures
#' @return A float number corresponding to the p-value of the K-S test
#' @seealso The atomic function \code{\link{measure.ks}}, returning the KS statistic
#' @details The two-sample Kolmogorov-Smirnov test has the null hypothesis (H0) that x and y were drawn from the same continuous distribution.
#' Therefore, the null hypothesis can be rejected only when p-values obtained are \dQuote{small} (i.e. < 0.05 with ci=0.95). Larger values will indicate
#' the H0 can't be rejected.
#' Since the daily time series often used are serially correlated, this function calculates their effective sample size before estimating the p value of the
#' KS statistic in order to avoid the inflation of type I error (i.e. erroneous rejection of the H0). Under the assumption that the underlying time
#' series follow a first-order autoregressive process (Wilks 2006), the effective sample size, neff is defined as follows: neff=n(1-p1)/(1+p1), where p1 is the
#' lag-1 autocorrelation coefficient.
#'
#' @author J. Bedia, S. Brands
#' @keywords internal
#' @references Wilks, D. (2006) Statistical methods in the atmospheric sciences, 2nd ed. Elsevier, Amsterdam
#' @import stats
#' @export
measure.ks.pval <- function(indexObs = NULL, indexPrd = NULL, obs, prd, dates = NULL, corrected = TRUE) {
x <- prd[!is.na(prd)]
y <- obs[!is.na(obs)]
KSstatistic <- suppressWarnings(valueMeasure1D(obs = y, prd = x, measure.codes = "ts.ks"))
if (corrected) {
x.acf1 <- valueIndex1D(ts = x, index.codes = "AC1")
n.x = unname(length(x)*((1 - x.acf1)/(1 + x.acf1)))
y.acf1 <- valueIndex1D(ts = y, index.codes = "AC1")
n.y = unname(length(y)*((1 - y.acf1)/(1 + y.acf1)))
pval <- 1 - .Call(stats:::C_pSmirnov2x, KSstatistic, n.x, n.y)
} else {
pval <- unname(ks.test(y, x)$p.value)
}
return(pval)
}
|
library(deSolve)
library(shiny)
library(TSA)
library(Rcpp)
#library(ggplot2)
sourceCpp("modGMS.cpp")
ui <- fluidPage(
tags$head(includeScript("google-analytics.js")),
tabsetPanel(
id="panels",
tabPanel(title = strong("Baseline"),
column(3,
sliderInput(inputId="API", label = "baseline API", value = 10, min=1, max=100,step=0.5),
sliderInput(inputId="bh_max", label = "number of mosquito bites per human per night (peak season)", value = 20, min=0, max=80,step=1), #change range 0-80, Dan's data
sliderInput(inputId="eta", label = "% of all infections that are caught outside the village (forest)", value = 30, min=0, max=100,step=10),
sliderInput(inputId="covEDAT0", label = "baseline % of all clinical cases treated", value = 25, min=0, max=100)
),
column(3,
sliderInput(inputId="covITN0", label = "baseline coverage of ITN (%) ", value = 70, min=0, max=90,step=.5),
sliderInput(inputId="effITN", label = "% of infections averted due to ownership of ITN ", value = 30, min=0, max=50),
sliderInput(inputId="covIRS0", label = "baseline coverage of IRS (%) ", value = 0, min=0, max=90,step=10),
sliderInput(inputId="effIRS", label = "% reduction in biting rate due to IRS ", value = 15, min=0, max=25,step=5)
),
column(3,
sliderInput(inputId="muC", label = "imported clinical cases per 1000 population per year ", value = 1, min=0, max=10,step=1),
sliderInput(inputId="muA", label = "imported asymptomatic microscopically detectable carriers per 1000 population per year ", value = 1, min=0, max=100,step=1),
sliderInput(inputId="muU", label = "imported asymptomatic microscopically undetectable carriers per 1000 population per year ", value = 1, min=0, max=100,step=1)
),
column(3,
sliderInput(inputId="percfail2018", label = "% of cases failing treatment in 2018 and before ", value = 5, min=0, max=100,step=5),
sliderInput(inputId="percfail2019", label = "% of cases failing treatment in 2019 ", value = 15, min=0, max=100,step=5),
sliderInput(inputId="percfail2020", label = "% of cases failing treatment in 2020 and after ", value = 30, min=0, max=100,step=5)
)
),
tabPanel(title = strong("Interventions currently available"),
column(4,
wellPanel(
h3("Early Diagnosis and Treatment"),
checkboxInput(inputId="EDATon", label = "switch on scale up of EDAT ", value = FALSE),
checkboxInput(inputId="primon", label = "ACT+primaquine for EDAT and MDA ", value = FALSE), #under EDAT checkbox
sliderInput(inputId="EDATscale", label = "years to scale up EDAT ", value = 1, min=.25, max=3, step=.25),
sliderInput(inputId="covEDATi", label = "new % of all clinical cases treated", value = 70, min=0, max=100,step=5)
)),
column(4,wellPanel(
h3("Insecticide Treated Net (LLIN)"),
checkboxInput(inputId="ITNon", label = "switch on scale up of LLIN", value = FALSE),
sliderInput(inputId="ITNscale", label = "years to universal access to LLIN", value = 1, min=.25, max=3, step=.25),
sliderInput(inputId="covITNi", label = "new bed-net use of LLIN (%)", value = 90, min=0, max=90,step=5)
)),
column(4,wellPanel(
h3("Indoor Residual Spray"),
checkboxInput(inputId="IRSon", label = "switch on scale up of IRS ", value = FALSE),
sliderInput(inputId="IRSscale", label = "years to scale up IRS ", value = 1, min=.25, max=3, step=.25),
sliderInput(inputId="covIRSi", label = "new coverage of IRS (%) ", value = 90, min=0, max=90,step=5)
))
),
tabPanel(title = strong("Interventions under trial: Focal MVDA (hotspot)"),
column(3,
checkboxInput(inputId="MDAon", label = "switch on MDA", value = FALSE), #6
sliderInput(inputId="lossd", label = "days prophylaxis provided by the ACT", value = 30, min=15, max=30,step=1),
sliderInput(inputId="dm", label = "months to complete each round ", value = 6, min=1, max=24,step=0.5)
),
column(3,
sliderInput(inputId="cmda_1", label = "effective population coverage of focal MDA in round 1 ", value = 50, min=0, max=100,step=10),
sliderInput(inputId="cmda_2", label = "effective population coverage of focal MDA in round 2 ", value = 50, min=0, max=100,step=10),
sliderInput(inputId="cmda_3", label = "effective population coverage of focal MDA in round 3 ", value = 50, min=0, max=100,step=10)
),
column(3,
sliderInput(inputId="tm_1", label = "timing of 1st round [2018+ no. of month, 1 means Jan'2018, 13 means Jan'2019]", value = 9, min=1, max=36,step=1),
sliderInput(inputId="tm_2", label = "timing of 2nd round [2018+ no. of month]", value = 10, min=2, max=36,step=1),
sliderInput(inputId="tm_3", label = "timing of 3rd round [2018+ no. of month]", value = 11, min=3, max=36,step=1)
),
column(3,
radioButtons(inputId="VACon", label = "With vaccination: ", choices = c("No"=0, "Yes"=1), selected = 0, inline=TRUE),
sliderInput(inputId="effv_1", label = "% protective efficacy of RTS,S with 1st dose", value = 75, min=0, max=100),
sliderInput(inputId="effv_2", label = "% protective efficacy of RTS,S with 2nd dose", value = 80, min=0, max=100),
sliderInput(inputId="effv_3", label = "% protective efficacy of RTS,S with 3rd dose", value = 92, min=0, max=100),
sliderInput(inputId="vh", label = "half-life of vaccine protection (days)", value = 90, min=10, max=500,step=10)
)
),
tabPanel(title = strong("Interventions under trial: Focal MSAT (mobile)"),
column(3,
checkboxInput(inputId="MSATon", label = "switch on MSAT for imported cases", value = FALSE),
sliderInput(inputId="MSATscale", label = "years to scale up MSAT ", value = 1, min=.25, max=3, step=.25),
sliderInput(inputId="covMSATi", label = "new coverage of MSAT (%)", value = 90, min=0, max=100,step=10)
),
column(3,
sliderInput(inputId="MSATsensC", label = "sensitivity HS RDT (clinical) ", value = 99, min=0, max=100,step=5),
sliderInput(inputId="MSATsensA", label = "sensitivity HS RDT (micro detectable, asym)", value = 87, min=0, max=100,step=5),
sliderInput(inputId="MSATsensU", label = "sensitivity HS RDT (micro undetectable, asym)", value = 4, min=0, max=100,step=5)
)
),
tabPanel(title= strong("Download"),
br(),
downloadButton("downloadTable", "Download current values of parameters"),
downloadButton("downloadplot","Download high resolution figure")),
tabPanel(title= strong("Restore your parameters"),
wellPanel(
fileInput(inputId = "file", label ="Your input file:", accept = c(".csv"))
)
),
tabPanel(title=strong("User Manual & Help"),
br(),
tags$ul(tags$li(strong(a(href="https://www.dropbox.com/s/d5q4ldkxtm2az6m/RAI_strategydesigntool_usermanual_03032017.pdf?dl=0", "Download User Manual")))),
strong("Contact the developers for any questions and feedback"),
tags$ul(
tags$li(a(href="http://www.tropmedres.ac/sai-thein-than-tun","Sai Thein Than Tun, "), a(href="mailto:sai@tropmedres.ac","sai@tropmedres.ac")),
tags$li(a(href="http://www.tropmedres.ac/researchers/researcher/sompob-saralamba","Sompob Saralamba, "),a(href="mailto:sompob@tropmedres.ac","sompob@tropmedres.ac")),
tags$li("Shwe Sin Kyaw"),
tags$li("Phetsavanh Chanthavilay"),
tags$li("Olivier Celhay, ", a(href="mailto:olivier.celhay@gmail.com","olivier.celhay@gmail.com")),
tags$li("Trần Đăng Nguyên"),
tags$li("Trần Nguyễn Anh Thư"),
tags$li("Daniel M Parker"),
tags$li("Professor Maciej F Boni"),
tags$li("Professor Arjen M Dondorp"),
tags$li(a(href="http://www.tropmedres.ac/researchers/researcher/lisa-white","Professor Lisa White, "), a(href="mailto:lisa@tropmedres.ac","lisa@tropmedres.ac"))
))
),
fluidRow(plotOutput(outputId = "MODEL")),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
hr(),
fluidRow(h4(" Legend")),
fluidRow(h4(" Grey solid line: baseline scenario. Blue solid line: elimination strategy scenario.")),
fluidRow(h4(" Dark blue solid line: target baseline API. Grey dashed lines: start and end of elimination activities.")),
fluidRow(h4(" Red dashed line: pre-elimination threshold (API = 1 per 1000 per year)"))
)
#non-reactive parameters
# define the number of weeks to run the model
dt<-1/12
startyear<-2007
stopyear<-2023
maxt<-stopyear-startyear
times <- seq(0, maxt, by = dt)
tsteps<-length(times)
ParLabel <- read.table('ParLabel.csv', sep=",", as.is=TRUE)
#non-reactive function runGMS is now outside of the server function
runGMS<-function(initprev, scenario, param)
{
#MODEL PARAMETERS
parameters <- c(scenario,
timei = 2018,
nuTr = 14, # days of infectiosness after treatment ACT [N]
nuTrp = 7, # days of infectiosness after treatment ACT+primaquine [N]
alpha = 0.7, # relative amplitude seasonality [N]
phi = 0.0, # phase angle seasonality [N]
epsilonh=0.23, # per bite probability of an infectious mosquito infecting a human
epsilonm=0.5, # per bite probability of an infectious human infecting a mosquito
b=365/3, # per mosquito rate of biting
deltam=365/14, #
gammam=365/10,#Rate of becoming infectious from the latent phase for mosquitos, Kai Matuschewski: Getting infectious
cm_1=80,
cm_2=95,
cm_3=95,
covMSAT0=0,
omega = 2, # average duration of immunity (years) [N]
nuC = 3, # days of symptoms in the absence of treatment [N], #change 9 -> 3
nuA = 60, # days of asymptomatic microscopically detectable carriage [N]
nuU = 100, # days of asymptomatic microscopically undetectable carriage [N], #change 60 -> 100, Mean duration of a malaria untreated infection: 160 days,
rhoa = 55, # relative infectivity of asymptomatic microscopically detectable carriers compared with clinical infections (%) [N]
rhou = 17, # relative infectivity of asymptomatic microscopically undetectable carriers compared with clinical infections (%) [N]
ps = 90, # % of all non-immune new infections that are clinical [N]
pr = 20, # % of all immune new infections that are clinical [N]
mu = 50, # life expectancy (years) [N]
param)
# MODEL INITIAL CONDITIONS
# population size
initP<-10000
initS_0<-0.5*(1-initprev)*initP
initIC_0<-0
initIA_0<-initprev*initP
initIU_0<-0
initR_0<-0.5*(1-initprev)*initP
initTr_0<-0
state <- c(Y = 0, Cinc_det = 0, Cinc_tot = 0,
S_0 = initS_0, IC_0 = initIC_0, IA_0 = initIA_0, IU_0 = initIU_0, R_0 = initR_0, Tr_0 = initTr_0, Sm_0 = 0, Rm_0 = 0,
S_1 = 0, IC_1 = 0, IA_1 = 0, IU_1 = 0, R_1 = 0, Tr_1 = 0, Sm_1 = 0, Rm_1 = 0,
S_2 = 0, IC_2 = 0, IA_2 = 0, IU_2 = 0, R_2 = 0, Tr_2 = 0, Sm_2 = 0, Rm_2 = 0,
S_3 = 0, IC_3 = 0, IA_3 = 0, IU_3 = 0, R_3 = 0, Tr_3 = 0, Sm_3 = 0, Rm_3 = 0,
S_4 = 0, IC_4 = 0, IA_4 = 0, IU_4 = 0, R_4 = 0, Tr_4 = 0, Sm_4 = 0, Rm_4 = 0
)
#out <- ode(y = state, times = times, func = modGMS, parms = parameters)
WmodGMSrcpp<-function(t,state,parameters){
tmp<-modGMSrcpp(t,state,parameters)
return(list(tmp))
}
#out <- ode(y = state, times = times, func = WmodGMSrcpp, parms = parameters)
out <- ode(y = state, times = times, func = WmodGMSrcpp, parms = parameters, method="vode")
# MODEL OUTPUTS
ipop <- 5:44
iinc_det <- 3
iinc_tot <- 4
iprev <- c(6, 7, 8, 10, 14, 15, 16, 18, 22, 23, 24, 26, 30, 31, 32, 34, 38, 39, 40, 42)
# testing purpose
round0 <- seq(from=5,length.out = 8)
round1 <- seq(from=13,length.out = 8)
round2 <- seq(from=21,length.out = 8)
round3 <- seq(from=29,length.out = 8)
round4 <- seq(from=37,length.out = 8)
# population
times<-out[,1]+startyear
pop<-rowSums(out[,ipop])
# clinical incidence detected per 1000 per month
tci_det <- out[,iinc_det]
clinmonth_det <- tci_det
clinmonth_det[1] <- 0
clinmonth_det[2:length(times)] <- 1000*(tci_det[2:length(times)] - tci_det[1:(length(times)-1)])/pop[2:length(times)]
# clinical incidence total per 1000 per month
tci_tot <- out[,iinc_tot]
clinmonth_tot <- tci_tot
clinmonth_tot[1] <- 0
clinmonth_tot[2:length(times)] <- 1000*(tci_tot[2:length(times)] - tci_tot[1:(length(times)-1)])/pop[2:length(times)]
# % prevalence
prevalence <- 100*rowSums(out[,iprev])/pop # Additional file: Equation no.13
GMSout<-matrix(NA,nrow=length(times),ncol=44)
GMSout[,1]<-times
GMSout[,2]<-clinmonth_det
GMSout[,3]<-clinmonth_tot
GMSout[,4]<-prevalence
for(i in 5:44){
GMSout[,i]<-out[,i]
}
return(GMSout)
}
server <- function(input, output, session) {
scenario_0<-c(EDATon = 0,
ITNon = 0,
IRSon = 0,
MDAon = 0,
primon = 0,
MSATon = 0,
VACon = 0)
scenario_iR<-reactive(c(EDATon = input$EDATon,
ITNon = input$ITNon,
IRSon = input$IRSon,
MDAon = input$MDAon,
primon = input$primon,
MSATon = input$MSATon,
VACon = as.numeric(input$VACon)))
parametersR <- reactive(c(
bh_max = input$bh_max, # bites per human per night
eta = input$eta,
covEDAT0 = input$covEDAT0,
covITN0 = input$covITN0,
effITN = input$effITN,
covIRS0 = input$covIRS0,
effIRS = input$effIRS,
muC = input$muC,
muA = input$muA,
muU = input$muU,
percfail2018 = input$percfail2018,
percfail2019 = input$percfail2019,
percfail2020 = input$percfail2020,
EDATscale = input$EDATscale,
covEDATi = input$covEDATi,
ITNscale = input$ITNscale,
covITNi = input$covITNi,
IRSscale = input$IRSscale,
covIRSi = input$covIRSi,
cmda_1 = input$cmda_1,
cmda_2 = input$cmda_2,
cmda_3 = input$cmda_3,
tm_1 = input$tm_1, # timing of 1st round [2018 to 2021 - 1 month steps]
tm_2 = input$tm_2, # timing of 2nd round [2018+(1/12) to 2021 - 1 month steps]
tm_3 = input$tm_3, # timing of 3rd round [2018+(2/12) to 2021 - 1 month steps]
dm = input$dm,
lossd = input$lossd,
MSATscale = input$MSATscale,
covMSATi = input$covMSATi,
MSATsensC = input$MSATsensC,
MSATsensA = input$MSATsensA,
MSATsensU = input$MSATsensU,
effv_1 = input$effv_1,
effv_2 = input$effv_2,
effv_3 = input$effv_3,
vh = input$vh
))
#getting back previous parameters
data <- reactive({read.csv(input$file$datapath)})
datavalue <- reactive(data()[,2])
observeEvent(input$file,{
updateCheckboxInput(session, "EDATon", value = datavalue()[1])
updateCheckboxInput(session, "ITNon", value = datavalue()[2])
updateCheckboxInput(session, "IRSon", value = datavalue()[3])
updateCheckboxInput(session, "MDAon", value = datavalue()[4])
updateCheckboxInput(session, "primon", value = datavalue()[5])
updateCheckboxInput(session, "MSATon", value = datavalue()[6])
updateSliderInput(session, "VACon", value = datavalue()[7])
updateSliderInput(session, "API", value = datavalue()[8])
updateSliderInput(session, "bh_max", value = datavalue()[9])
updateSliderInput(session, "eta", value = datavalue()[10])
updateSliderInput(session, "covEDAT0", value = datavalue()[11])
updateSliderInput(session, "covITN0", value = datavalue()[12])
updateSliderInput(session, "effITN", value = datavalue()[13])
updateSliderInput(session, "covIRS0", value = datavalue()[14])
updateSliderInput(session, "effIRS", value = datavalue()[15])
updateSliderInput(session, "muC", value = datavalue()[16])
updateSliderInput(session, "muA", value = datavalue()[17])
updateSliderInput(session, "muU", value = datavalue()[18])
updateSliderInput(session, "percfail2018", value = datavalue()[19])
updateSliderInput(session, "percfail2019", value = datavalue()[20])
updateSliderInput(session, "percfail2020", value = datavalue()[21])
updateSliderInput(session, "EDATscale", value = datavalue()[22])
updateSliderInput(session, "covEDATi", value = datavalue()[23])
updateSliderInput(session, "ITNscale", value = datavalue()[24])
updateSliderInput(session, "covITNi", value = datavalue()[25])
updateSliderInput(session, "IRSscale", value = datavalue()[26])
updateSliderInput(session, "covIRSi", value = datavalue()[27])
updateSliderInput(session, "cmda_1", value = datavalue()[28])
updateSliderInput(session, "cmda_2", value = datavalue()[29])
updateSliderInput(session, "cmda_3", value = datavalue()[30])
updateSliderInput(session, "tm_1", value = datavalue()[31])
updateSliderInput(session, "tm_2", value = datavalue()[32])
updateSliderInput(session, "tm_3", value = datavalue()[33])
updateSliderInput(session, "dm", value = datavalue()[34])
updateSliderInput(session, "lossd", value = datavalue()[35])
updateSliderInput(session, "MSATscale", value = datavalue()[36])
updateSliderInput(session, "covMSATi", value = datavalue()[37])
updateSliderInput(session, "MSATsensC", value = datavalue()[38])
updateSliderInput(session, "MSATsensA", value = datavalue()[39])
updateSliderInput(session, "MSATsensU", value = datavalue()[40])
updateSliderInput(session, "effv_1", value = datavalue()[41])
updateSliderInput(session, "effv_2", value = datavalue()[42])
updateSliderInput(session, "effv_3", value = datavalue()[43])
updateSliderInput(session, "vh", value = datavalue()[44])
})
# initial prevalence
initprevR <- reactive(0.001*input$API)
GMSout0R <- reactive(runGMS(initprevR(), scenario_0,parametersR()))
GMSoutiR <- reactive(runGMS(initprevR(), scenario_iR(),parametersR()))
plotR2 <- function()
{
GMSouti<-GMSoutiR()
par(mfrow=c(5,8))
for(i in 5:44){
plot(GMSouti[-c(1:125),i], type='l')
}
}
plotR <- function()
{
GMSout0<-GMSout0R()
GMSouti<-GMSoutiR()
times<-GMSout0[,1]
clinmonth_det<-cbind(GMSout0[,2],GMSouti[,2])
clinmonth_tot<-cbind(GMSout0[,3],GMSouti[,3])
prevalence<-cbind(GMSout0[,4],GMSouti[,4])
runin<-(2016-startyear)/dt
finclin<-max(clinmonth_tot[(runin:length(clinmonth_det[,1])),])
finprev<-max(prevalence[(runin:length(prevalence[,1])),])
# PLOTTING
par(mfrow=c(1,2), cex=1.5)
maxy<-max(finclin,input$API/12)
x<-times[(runin:length(clinmonth_det[,1]))]
y1<-clinmonth_det[runin:length(clinmonth_det[,1]),1]
y2<-clinmonth_tot[runin:length(clinmonth_tot[,1]),1]
plot(x,y1, type='l',lty=1,col=rgb(0,0,0,alpha=0.1),xlab = "Time",ylab="incidence per 1000 per month",main="Monthly cases per 1000 population",ylim=c(0,maxy),lwd=2)
lines(x,y2, type='l',lty=1,col=rgb(0,0,0,alpha=0.1),lwd=2)
polygon(c(x,rev(x)),c(y2,rev(y1)),col=rgb(0,0,0,alpha=0.1),border=NA)
y1<-clinmonth_det[runin:length(clinmonth_det[,1]),2]
y2<-clinmonth_tot[runin:length(clinmonth_tot[,1]),2]
lines(x,y1, type='l',lty=1,col=rgb(0,0,1,alpha=0.4),lwd=2)
lines(x,y2, type='l',lty=1,col=rgb(0,0,1,alpha=0.4),lwd=2)
polygon(c(x,rev(x)),c(y2,rev(y1)),col=rgb(0,0,1,alpha=0.4),border=NA)
lines(c(2018,2018),c(-maxy,2*maxy),col="dark grey",lty=3,lwd=2)
abline(h=input$API/12,col="dark blue",lty=1,lwd=1)
abline(h=1/12,col="red",lty=3,lwd=3)
maxy<-finprev
plot(times[(runin:length(prevalence[,1]))],prevalence[(runin:length(prevalence[,1])),1], type='l',lty=1,col=rgb(0,0,0,alpha=0.25),xlab = "Time",ylab="% prevalence",main="Predicted true prevalence",ylim=c(0,maxy),lwd=6)
lines(times[(runin:length(prevalence[,1]))],prevalence[(runin:length(prevalence[,1])),2], type='l',lty=1,col=rgb(0,0,1,alpha=0.6),xlab = "Time",ylab="% prevalence",main="Predicted true prevalence",ylim=c(0,maxy),lwd=6)
lines(c(2018,2018),c(-maxy,2*maxy),col="dark grey",lty=3,lwd=2)
}
output$MODEL <- renderPlot({
#plotR()
plotR2()
})
output$downloadplot <- downloadHandler(
filename = function(){paste('MalMod_',gsub("\\:","",Sys.time()),'.png',sep='')},
content = function(file) {
png(filename=file, height= 4800, width=14400, units= "px", res=300) #if(...=="png"){png(file)} else if(...=="pdf"){pdf(file)}
#plotR()
plotR2()
dev.off()
})
tableContentR <- reactive({
tmp <- c(scenario_iR(), input$API, parametersR())
tmp2 <- cbind(ParLabel[,1], tmp, ParLabel[,2], names(tmp))
colnames(tmp2) <- c("Name","Value","Unit","VarName")
tmp2
})
output$downloadTable <- downloadHandler(
filename = function(){paste('MalMod_',gsub("\\:","",Sys.time()),'.csv',sep='')},
content = function(file) {
write.csv(tableContentR(), file, row.names = FALSE)
})
}
shinyApp(ui = ui, server = server)
|
/dynamics at each MDA round/app.R
|
no_license
|
MAEMOD-MORU/lmrm
|
R
| false
| false
| 23,369
|
r
|
library(deSolve)
library(shiny)
library(TSA)
library(Rcpp)
#library(ggplot2)
sourceCpp("modGMS.cpp")
ui <- fluidPage(
tags$head(includeScript("google-analytics.js")),
tabsetPanel(
id="panels",
tabPanel(title = strong("Baseline"),
column(3,
sliderInput(inputId="API", label = "baseline API", value = 10, min=1, max=100,step=0.5),
sliderInput(inputId="bh_max", label = "number of mosquito bites per human per night (peak season)", value = 20, min=0, max=80,step=1), #change range 0-80, Dan's data
sliderInput(inputId="eta", label = "% of all infections that are caught outside the village (forest)", value = 30, min=0, max=100,step=10),
sliderInput(inputId="covEDAT0", label = "baseline % of all clinical cases treated", value = 25, min=0, max=100)
),
column(3,
sliderInput(inputId="covITN0", label = "baseline coverage of ITN (%) ", value = 70, min=0, max=90,step=.5),
sliderInput(inputId="effITN", label = "% of infections averted due to ownership of ITN ", value = 30, min=0, max=50),
sliderInput(inputId="covIRS0", label = "baseline coverage of IRS (%) ", value = 0, min=0, max=90,step=10),
sliderInput(inputId="effIRS", label = "% reduction in biting rate due to IRS ", value = 15, min=0, max=25,step=5)
),
column(3,
sliderInput(inputId="muC", label = "imported clinical cases per 1000 population per year ", value = 1, min=0, max=10,step=1),
sliderInput(inputId="muA", label = "imported asymptomatic microscopically detectable carriers per 1000 population per year ", value = 1, min=0, max=100,step=1),
sliderInput(inputId="muU", label = "imported asymptomatic microscopically undetectable carriers per 1000 population per year ", value = 1, min=0, max=100,step=1)
),
column(3,
sliderInput(inputId="percfail2018", label = "% of cases failing treatment in 2018 and before ", value = 5, min=0, max=100,step=5),
sliderInput(inputId="percfail2019", label = "% of cases failing treatment in 2019 ", value = 15, min=0, max=100,step=5),
sliderInput(inputId="percfail2020", label = "% of cases failing treatment in 2020 and after ", value = 30, min=0, max=100,step=5)
)
),
tabPanel(title = strong("Interventions currently available"),
column(4,
wellPanel(
h3("Early Diagnosis and Treatment"),
checkboxInput(inputId="EDATon", label = "switch on scale up of EDAT ", value = FALSE),
checkboxInput(inputId="primon", label = "ACT+primaquine for EDAT and MDA ", value = FALSE), #under EDAT checkbox
sliderInput(inputId="EDATscale", label = "years to scale up EDAT ", value = 1, min=.25, max=3, step=.25),
sliderInput(inputId="covEDATi", label = "new % of all clinical cases treated", value = 70, min=0, max=100,step=5)
)),
column(4,wellPanel(
h3("Insecticide Treated Net (LLIN)"),
checkboxInput(inputId="ITNon", label = "switch on scale up of LLIN", value = FALSE),
sliderInput(inputId="ITNscale", label = "years to universal access to LLIN", value = 1, min=.25, max=3, step=.25),
sliderInput(inputId="covITNi", label = "new bed-net use of LLIN (%)", value = 90, min=0, max=90,step=5)
)),
column(4,wellPanel(
h3("Indoor Residual Spray"),
checkboxInput(inputId="IRSon", label = "switch on scale up of IRS ", value = FALSE),
sliderInput(inputId="IRSscale", label = "years to scale up IRS ", value = 1, min=.25, max=3, step=.25),
sliderInput(inputId="covIRSi", label = "new coverage of IRS (%) ", value = 90, min=0, max=90,step=5)
))
),
tabPanel(title = strong("Interventions under trial: Focal MVDA (hotspot)"),
column(3,
checkboxInput(inputId="MDAon", label = "switch on MDA", value = FALSE), #6
sliderInput(inputId="lossd", label = "days prophylaxis provided by the ACT", value = 30, min=15, max=30,step=1),
sliderInput(inputId="dm", label = "months to complete each round ", value = 6, min=1, max=24,step=0.5)
),
column(3,
sliderInput(inputId="cmda_1", label = "effective population coverage of focal MDA in round 1 ", value = 50, min=0, max=100,step=10),
sliderInput(inputId="cmda_2", label = "effective population coverage of focal MDA in round 2 ", value = 50, min=0, max=100,step=10),
sliderInput(inputId="cmda_3", label = "effective population coverage of focal MDA in round 3 ", value = 50, min=0, max=100,step=10)
),
column(3,
sliderInput(inputId="tm_1", label = "timing of 1st round [2018+ no. of month, 1 means Jan'2018, 13 means Jan'2019]", value = 9, min=1, max=36,step=1),
sliderInput(inputId="tm_2", label = "timing of 2nd round [2018+ no. of month]", value = 10, min=2, max=36,step=1),
sliderInput(inputId="tm_3", label = "timing of 3rd round [2018+ no. of month]", value = 11, min=3, max=36,step=1)
),
column(3,
radioButtons(inputId="VACon", label = "With vaccination: ", choices = c("No"=0, "Yes"=1), selected = 0, inline=TRUE),
sliderInput(inputId="effv_1", label = "% protective efficacy of RTS,S with 1st dose", value = 75, min=0, max=100),
sliderInput(inputId="effv_2", label = "% protective efficacy of RTS,S with 2nd dose", value = 80, min=0, max=100),
sliderInput(inputId="effv_3", label = "% protective efficacy of RTS,S with 3rd dose", value = 92, min=0, max=100),
sliderInput(inputId="vh", label = "half-life of vaccine protection (days)", value = 90, min=10, max=500,step=10)
)
),
tabPanel(title = strong("Interventions under trial: Focal MSAT (mobile)"),
column(3,
checkboxInput(inputId="MSATon", label = "switch on MSAT for imported cases", value = FALSE),
sliderInput(inputId="MSATscale", label = "years to scale up MSAT ", value = 1, min=.25, max=3, step=.25),
sliderInput(inputId="covMSATi", label = "new coverage of MSAT (%)", value = 90, min=0, max=100,step=10)
),
column(3,
sliderInput(inputId="MSATsensC", label = "sensitivity HS RDT (clinical) ", value = 99, min=0, max=100,step=5),
sliderInput(inputId="MSATsensA", label = "sensitivity HS RDT (micro detectable, asym)", value = 87, min=0, max=100,step=5),
sliderInput(inputId="MSATsensU", label = "sensitivity HS RDT (micro undetectable, asym)", value = 4, min=0, max=100,step=5)
)
),
tabPanel(title= strong("Download"),
br(),
downloadButton("downloadTable", "Download current values of parameters"),
downloadButton("downloadplot","Download high resolution figure")),
tabPanel(title= strong("Restore your parameters"),
wellPanel(
fileInput(inputId = "file", label ="Your input file:", accept = c(".csv"))
)
),
tabPanel(title=strong("User Manual & Help"),
br(),
tags$ul(tags$li(strong(a(href="https://www.dropbox.com/s/d5q4ldkxtm2az6m/RAI_strategydesigntool_usermanual_03032017.pdf?dl=0", "Download User Manual")))),
strong("Contact the developers for any questions and feedback"),
tags$ul(
tags$li(a(href="http://www.tropmedres.ac/sai-thein-than-tun","Sai Thein Than Tun, "), a(href="mailto:sai@tropmedres.ac","sai@tropmedres.ac")),
tags$li(a(href="http://www.tropmedres.ac/researchers/researcher/sompob-saralamba","Sompob Saralamba, "),a(href="mailto:sompob@tropmedres.ac","sompob@tropmedres.ac")),
tags$li("Shwe Sin Kyaw"),
tags$li("Phetsavanh Chanthavilay"),
tags$li("Olivier Celhay, ", a(href="mailto:olivier.celhay@gmail.com","olivier.celhay@gmail.com")),
tags$li("Trần Đăng Nguyên"),
tags$li("Trần Nguyễn Anh Thư"),
tags$li("Daniel M Parker"),
tags$li("Professor Maciej F Boni"),
tags$li("Professor Arjen M Dondorp"),
tags$li(a(href="http://www.tropmedres.ac/researchers/researcher/lisa-white","Professor Lisa White, "), a(href="mailto:lisa@tropmedres.ac","lisa@tropmedres.ac"))
))
),
fluidRow(plotOutput(outputId = "MODEL")),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
hr(),
fluidRow(h4(" Legend")),
fluidRow(h4(" Grey solid line: baseline scenario. Blue solid line: elimination strategy scenario.")),
fluidRow(h4(" Dark blue solid line: target baseline API. Grey dashed lines: start and end of elimination activities.")),
fluidRow(h4(" Red dashed line: pre-elimination threshold (API = 1 per 1000 per year)"))
)
#non-reactive parameters
# define the number of weeks to run the model
dt<-1/12
startyear<-2007
stopyear<-2023
maxt<-stopyear-startyear
times <- seq(0, maxt, by = dt)
tsteps<-length(times)
ParLabel <- read.table('ParLabel.csv', sep=",", as.is=TRUE)
#non-reactive function runGMS is now outside of the server function
runGMS<-function(initprev, scenario, param)
{
#MODEL PARAMETERS
parameters <- c(scenario,
timei = 2018,
nuTr = 14, # days of infectiosness after treatment ACT [N]
nuTrp = 7, # days of infectiosness after treatment ACT+primaquine [N]
alpha = 0.7, # relative amplitude seasonality [N]
phi = 0.0, # phase angle seasonality [N]
epsilonh=0.23, # per bite probability of an infectious mosquito infecting a human
epsilonm=0.5, # per bite probability of an infectious human infecting a mosquito
b=365/3, # per mosquito rate of biting
deltam=365/14, #
gammam=365/10,#Rate of becoming infectious from the latent phase for mosquitos, Kai Matuschewski: Getting infectious
cm_1=80,
cm_2=95,
cm_3=95,
covMSAT0=0,
omega = 2, # average duration of immunity (years) [N]
nuC = 3, # days of symptoms in the absence of treatment [N], #change 9 -> 3
nuA = 60, # days of asymptomatic microscopically detectable carriage [N]
nuU = 100, # days of asymptomatic microscopically undetectable carriage [N], #change 60 -> 100, Mean duration of a malaria untreated infection: 160 days,
rhoa = 55, # relative infectivity of asymptomatic microscopically detectable carriers compared with clinical infections (%) [N]
rhou = 17, # relative infectivity of asymptomatic microscopically undetectable carriers compared with clinical infections (%) [N]
ps = 90, # % of all non-immune new infections that are clinical [N]
pr = 20, # % of all immune new infections that are clinical [N]
mu = 50, # life expectancy (years) [N]
param)
# MODEL INITIAL CONDITIONS
# population size
initP<-10000
initS_0<-0.5*(1-initprev)*initP
initIC_0<-0
initIA_0<-initprev*initP
initIU_0<-0
initR_0<-0.5*(1-initprev)*initP
initTr_0<-0
state <- c(Y = 0, Cinc_det = 0, Cinc_tot = 0,
S_0 = initS_0, IC_0 = initIC_0, IA_0 = initIA_0, IU_0 = initIU_0, R_0 = initR_0, Tr_0 = initTr_0, Sm_0 = 0, Rm_0 = 0,
S_1 = 0, IC_1 = 0, IA_1 = 0, IU_1 = 0, R_1 = 0, Tr_1 = 0, Sm_1 = 0, Rm_1 = 0,
S_2 = 0, IC_2 = 0, IA_2 = 0, IU_2 = 0, R_2 = 0, Tr_2 = 0, Sm_2 = 0, Rm_2 = 0,
S_3 = 0, IC_3 = 0, IA_3 = 0, IU_3 = 0, R_3 = 0, Tr_3 = 0, Sm_3 = 0, Rm_3 = 0,
S_4 = 0, IC_4 = 0, IA_4 = 0, IU_4 = 0, R_4 = 0, Tr_4 = 0, Sm_4 = 0, Rm_4 = 0
)
#out <- ode(y = state, times = times, func = modGMS, parms = parameters)
WmodGMSrcpp<-function(t,state,parameters){
tmp<-modGMSrcpp(t,state,parameters)
return(list(tmp))
}
#out <- ode(y = state, times = times, func = WmodGMSrcpp, parms = parameters)
out <- ode(y = state, times = times, func = WmodGMSrcpp, parms = parameters, method="vode")
# MODEL OUTPUTS
ipop <- 5:44
iinc_det <- 3
iinc_tot <- 4
iprev <- c(6, 7, 8, 10, 14, 15, 16, 18, 22, 23, 24, 26, 30, 31, 32, 34, 38, 39, 40, 42)
# testing purpose
round0 <- seq(from=5,length.out = 8)
round1 <- seq(from=13,length.out = 8)
round2 <- seq(from=21,length.out = 8)
round3 <- seq(from=29,length.out = 8)
round4 <- seq(from=37,length.out = 8)
# population
times<-out[,1]+startyear
pop<-rowSums(out[,ipop])
# clinical incidence detected per 1000 per month
tci_det <- out[,iinc_det]
clinmonth_det <- tci_det
clinmonth_det[1] <- 0
clinmonth_det[2:length(times)] <- 1000*(tci_det[2:length(times)] - tci_det[1:(length(times)-1)])/pop[2:length(times)]
# clinical incidence total per 1000 per month
tci_tot <- out[,iinc_tot]
clinmonth_tot <- tci_tot
clinmonth_tot[1] <- 0
clinmonth_tot[2:length(times)] <- 1000*(tci_tot[2:length(times)] - tci_tot[1:(length(times)-1)])/pop[2:length(times)]
# % prevalence
prevalence <- 100*rowSums(out[,iprev])/pop # Additional file: Equation no.13
GMSout<-matrix(NA,nrow=length(times),ncol=44)
GMSout[,1]<-times
GMSout[,2]<-clinmonth_det
GMSout[,3]<-clinmonth_tot
GMSout[,4]<-prevalence
for(i in 5:44){
GMSout[,i]<-out[,i]
}
return(GMSout)
}
server <- function(input, output, session) {
scenario_0<-c(EDATon = 0,
ITNon = 0,
IRSon = 0,
MDAon = 0,
primon = 0,
MSATon = 0,
VACon = 0)
scenario_iR<-reactive(c(EDATon = input$EDATon,
ITNon = input$ITNon,
IRSon = input$IRSon,
MDAon = input$MDAon,
primon = input$primon,
MSATon = input$MSATon,
VACon = as.numeric(input$VACon)))
parametersR <- reactive(c(
bh_max = input$bh_max, # bites per human per night
eta = input$eta,
covEDAT0 = input$covEDAT0,
covITN0 = input$covITN0,
effITN = input$effITN,
covIRS0 = input$covIRS0,
effIRS = input$effIRS,
muC = input$muC,
muA = input$muA,
muU = input$muU,
percfail2018 = input$percfail2018,
percfail2019 = input$percfail2019,
percfail2020 = input$percfail2020,
EDATscale = input$EDATscale,
covEDATi = input$covEDATi,
ITNscale = input$ITNscale,
covITNi = input$covITNi,
IRSscale = input$IRSscale,
covIRSi = input$covIRSi,
cmda_1 = input$cmda_1,
cmda_2 = input$cmda_2,
cmda_3 = input$cmda_3,
tm_1 = input$tm_1, # timing of 1st round [2018 to 2021 - 1 month steps]
tm_2 = input$tm_2, # timing of 2nd round [2018+(1/12) to 2021 - 1 month steps]
tm_3 = input$tm_3, # timing of 3rd round [2018+(2/12) to 2021 - 1 month steps]
dm = input$dm,
lossd = input$lossd,
MSATscale = input$MSATscale,
covMSATi = input$covMSATi,
MSATsensC = input$MSATsensC,
MSATsensA = input$MSATsensA,
MSATsensU = input$MSATsensU,
effv_1 = input$effv_1,
effv_2 = input$effv_2,
effv_3 = input$effv_3,
vh = input$vh
))
#getting back previous parameters
data <- reactive({read.csv(input$file$datapath)})
datavalue <- reactive(data()[,2])
observeEvent(input$file,{
updateCheckboxInput(session, "EDATon", value = datavalue()[1])
updateCheckboxInput(session, "ITNon", value = datavalue()[2])
updateCheckboxInput(session, "IRSon", value = datavalue()[3])
updateCheckboxInput(session, "MDAon", value = datavalue()[4])
updateCheckboxInput(session, "primon", value = datavalue()[5])
updateCheckboxInput(session, "MSATon", value = datavalue()[6])
updateSliderInput(session, "VACon", value = datavalue()[7])
updateSliderInput(session, "API", value = datavalue()[8])
updateSliderInput(session, "bh_max", value = datavalue()[9])
updateSliderInput(session, "eta", value = datavalue()[10])
updateSliderInput(session, "covEDAT0", value = datavalue()[11])
updateSliderInput(session, "covITN0", value = datavalue()[12])
updateSliderInput(session, "effITN", value = datavalue()[13])
updateSliderInput(session, "covIRS0", value = datavalue()[14])
updateSliderInput(session, "effIRS", value = datavalue()[15])
updateSliderInput(session, "muC", value = datavalue()[16])
updateSliderInput(session, "muA", value = datavalue()[17])
updateSliderInput(session, "muU", value = datavalue()[18])
updateSliderInput(session, "percfail2018", value = datavalue()[19])
updateSliderInput(session, "percfail2019", value = datavalue()[20])
updateSliderInput(session, "percfail2020", value = datavalue()[21])
updateSliderInput(session, "EDATscale", value = datavalue()[22])
updateSliderInput(session, "covEDATi", value = datavalue()[23])
updateSliderInput(session, "ITNscale", value = datavalue()[24])
updateSliderInput(session, "covITNi", value = datavalue()[25])
updateSliderInput(session, "IRSscale", value = datavalue()[26])
updateSliderInput(session, "covIRSi", value = datavalue()[27])
updateSliderInput(session, "cmda_1", value = datavalue()[28])
updateSliderInput(session, "cmda_2", value = datavalue()[29])
updateSliderInput(session, "cmda_3", value = datavalue()[30])
updateSliderInput(session, "tm_1", value = datavalue()[31])
updateSliderInput(session, "tm_2", value = datavalue()[32])
updateSliderInput(session, "tm_3", value = datavalue()[33])
updateSliderInput(session, "dm", value = datavalue()[34])
updateSliderInput(session, "lossd", value = datavalue()[35])
updateSliderInput(session, "MSATscale", value = datavalue()[36])
updateSliderInput(session, "covMSATi", value = datavalue()[37])
updateSliderInput(session, "MSATsensC", value = datavalue()[38])
updateSliderInput(session, "MSATsensA", value = datavalue()[39])
updateSliderInput(session, "MSATsensU", value = datavalue()[40])
updateSliderInput(session, "effv_1", value = datavalue()[41])
updateSliderInput(session, "effv_2", value = datavalue()[42])
updateSliderInput(session, "effv_3", value = datavalue()[43])
updateSliderInput(session, "vh", value = datavalue()[44])
})
# initial prevalence
initprevR <- reactive(0.001*input$API)
GMSout0R <- reactive(runGMS(initprevR(), scenario_0,parametersR()))
GMSoutiR <- reactive(runGMS(initprevR(), scenario_iR(),parametersR()))
plotR2 <- function()
{
GMSouti<-GMSoutiR()
par(mfrow=c(5,8))
for(i in 5:44){
plot(GMSouti[-c(1:125),i], type='l')
}
}
plotR <- function()
{
GMSout0<-GMSout0R()
GMSouti<-GMSoutiR()
times<-GMSout0[,1]
clinmonth_det<-cbind(GMSout0[,2],GMSouti[,2])
clinmonth_tot<-cbind(GMSout0[,3],GMSouti[,3])
prevalence<-cbind(GMSout0[,4],GMSouti[,4])
runin<-(2016-startyear)/dt
finclin<-max(clinmonth_tot[(runin:length(clinmonth_det[,1])),])
finprev<-max(prevalence[(runin:length(prevalence[,1])),])
# PLOTTING
par(mfrow=c(1,2), cex=1.5)
maxy<-max(finclin,input$API/12)
x<-times[(runin:length(clinmonth_det[,1]))]
y1<-clinmonth_det[runin:length(clinmonth_det[,1]),1]
y2<-clinmonth_tot[runin:length(clinmonth_tot[,1]),1]
plot(x,y1, type='l',lty=1,col=rgb(0,0,0,alpha=0.1),xlab = "Time",ylab="incidence per 1000 per month",main="Monthly cases per 1000 population",ylim=c(0,maxy),lwd=2)
lines(x,y2, type='l',lty=1,col=rgb(0,0,0,alpha=0.1),lwd=2)
polygon(c(x,rev(x)),c(y2,rev(y1)),col=rgb(0,0,0,alpha=0.1),border=NA)
y1<-clinmonth_det[runin:length(clinmonth_det[,1]),2]
y2<-clinmonth_tot[runin:length(clinmonth_tot[,1]),2]
lines(x,y1, type='l',lty=1,col=rgb(0,0,1,alpha=0.4),lwd=2)
lines(x,y2, type='l',lty=1,col=rgb(0,0,1,alpha=0.4),lwd=2)
polygon(c(x,rev(x)),c(y2,rev(y1)),col=rgb(0,0,1,alpha=0.4),border=NA)
lines(c(2018,2018),c(-maxy,2*maxy),col="dark grey",lty=3,lwd=2)
abline(h=input$API/12,col="dark blue",lty=1,lwd=1)
abline(h=1/12,col="red",lty=3,lwd=3)
maxy<-finprev
plot(times[(runin:length(prevalence[,1]))],prevalence[(runin:length(prevalence[,1])),1], type='l',lty=1,col=rgb(0,0,0,alpha=0.25),xlab = "Time",ylab="% prevalence",main="Predicted true prevalence",ylim=c(0,maxy),lwd=6)
lines(times[(runin:length(prevalence[,1]))],prevalence[(runin:length(prevalence[,1])),2], type='l',lty=1,col=rgb(0,0,1,alpha=0.6),xlab = "Time",ylab="% prevalence",main="Predicted true prevalence",ylim=c(0,maxy),lwd=6)
lines(c(2018,2018),c(-maxy,2*maxy),col="dark grey",lty=3,lwd=2)
}
output$MODEL <- renderPlot({
#plotR()
plotR2()
})
output$downloadplot <- downloadHandler(
filename = function(){paste('MalMod_',gsub("\\:","",Sys.time()),'.png',sep='')},
content = function(file) {
png(filename=file, height= 4800, width=14400, units= "px", res=300) #if(...=="png"){png(file)} else if(...=="pdf"){pdf(file)}
#plotR()
plotR2()
dev.off()
})
tableContentR <- reactive({
tmp <- c(scenario_iR(), input$API, parametersR())
tmp2 <- cbind(ParLabel[,1], tmp, ParLabel[,2], names(tmp))
colnames(tmp2) <- c("Name","Value","Unit","VarName")
tmp2
})
output$downloadTable <- downloadHandler(
filename = function(){paste('MalMod_',gsub("\\:","",Sys.time()),'.csv',sep='')},
content = function(file) {
write.csv(tableContentR(), file, row.names = FALSE)
})
}
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/html_dependency.R
\name{copyDependencyToDir}
\alias{copyDependencyToDir}
\title{Copy an HTML dependency to a directory}
\usage{
copyDependencyToDir(dependency, outputDir, mustWork = TRUE)
}
\arguments{
\item{dependency}{A single HTML dependency object.}
\item{outputDir}{The directory in which a subdirectory should be created for
this dependency.}
\item{mustWork}{If \code{TRUE} and \code{dependency} does not point to a
directory on disk (but rather a URL location), an error is raised. If
\code{FALSE} then non-disk dependencies are returned without modification.}
}
\value{
The dependency with its \code{src} value updated to the new
location's absolute path.
}
\description{
Copies an HTML dependency to a subdirectory of the given directory. The
subdirectory name will be \emph{name}-\emph{version} (for example,
"outputDir/jquery-1.11.0").
}
\details{
In order for disk-based dependencies to work with static HTML files, it's
generally necessary to copy them to either the directory of the referencing
HTML file, or to a subdirectory of that directory. This function makes it
easier to perform that copy.
If a subdirectory named \emph{name}-\emph{version} already exists in
\code{outputDir}, then copying is not performed; the existing contents are
assumed to be up-to-date.
}
\seealso{
\code{\link{makeDependencyRelative}} can be used with the returned
value to make the path relative to a specific directory.
}
|
/man/copyDependencyToDir.Rd
|
no_license
|
datastorm-open/htmltools
|
R
| false
| false
| 1,518
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/html_dependency.R
\name{copyDependencyToDir}
\alias{copyDependencyToDir}
\title{Copy an HTML dependency to a directory}
\usage{
copyDependencyToDir(dependency, outputDir, mustWork = TRUE)
}
\arguments{
\item{dependency}{A single HTML dependency object.}
\item{outputDir}{The directory in which a subdirectory should be created for
this dependency.}
\item{mustWork}{If \code{TRUE} and \code{dependency} does not point to a
directory on disk (but rather a URL location), an error is raised. If
\code{FALSE} then non-disk dependencies are returned without modification.}
}
\value{
The dependency with its \code{src} value updated to the new
location's absolute path.
}
\description{
Copies an HTML dependency to a subdirectory of the given directory. The
subdirectory name will be \emph{name}-\emph{version} (for example,
"outputDir/jquery-1.11.0").
}
\details{
In order for disk-based dependencies to work with static HTML files, it's
generally necessary to copy them to either the directory of the referencing
HTML file, or to a subdirectory of that directory. This function makes it
easier to perform that copy.
If a subdirectory named \emph{name}-\emph{version} already exists in
\code{outputDir}, then copying is not performed; the existing contents are
assumed to be up-to-date.
}
\seealso{
\code{\link{makeDependencyRelative}} can be used with the returned
value to make the path relative to a specific directory.
}
|
#!/usr/bin/env Rscript
if(!suppressPackageStartupMessages(require("optparse", quietly=TRUE))) {
stop("the 'optparse' package is needed in order to run this script")
}
option_list <-
list(
make_option("--input", help = "input file name (use '-' for stdin) [default: -]",
default = '-')
)
parser <- OptionParser(usage = "%prog --input=inputFileName",
description = "cast a thin, long tab-delimited table into a fat, large table",
option_list = option_list)
opts <- parse_args(parser, positional_arguments = FALSE)
inputFileName <- opts$input
if(inputFileName != "-") {
stopifnot(file.exists(inputFileName))
fin <- file(inputFileName, open = "r", raw = TRUE)
} else {
fin <- file("stdin", open = "r", raw = TRUE)
}
temp <- readLines(fin, n = 1)
pushBack(temp, fin)
numInputColumns <- length(strsplit(temp, "\t")[[1]])
message(numInputColumns, " columns detected in input")
if(numInputColumns > 3) {
stop("not implemented yet")
}
if(numInputColumns < 3) {
message("noting to possibly expand to: terminating")
quit(save = "no", status = 0)
}
outputIDs <- numInputColumns - 2
idVarNames <- paste0("ID", if(outputIDs > 1) seq_len(outputIDs) else "")
##
## read all columns data from the 1st output row
##
message("\nscanning input data for IDs...")
firstRegressorID <- c()
samplesIDs <- c()
lns <- c()
repeat {
line <- readLines(fin, n = 1)
if(length(line) != 1) {
stop("error while scanning input data: expecting to read in 1 row, got ",
length(line), " instead")
}
lns <- c(lns, line)
fields <- strsplit(line, "\t")[[1]]
if(length(fields) != 3) {
stop("invalid input data format: was expecting 3 tab-delimited columns")
}
if(length(firstRegressorID) > 0) {
if(fields[1] != firstRegressorID) {
break
}
} else {
firstRegressorID <- fields[1]
}
if(fields[2] %in% samplesIDs) {
stop("samples ids (2nd column) must be unique")
}
samplesIDs <- c(samplesIDs, fields[2])
}
pushBack(lns, fin)
samplesIDs <- sort(samplesIDs)
message("OK. detected ", length(samplesIDs), " samples:")
sink(stderr())
str(samplesIDs)
sink()
message("")
outputTemplate <- character(length(samplesIDs) + length(idVarNames))
names(outputTemplate) <- c(idVarNames, samplesIDs)
outputHeader <- paste(names(outputTemplate), collapse = "\t")
writeLines(outputHeader)
CHUNK_SIZE <- 100L
readRow <- function(con) {
ans <- data.frame(ID = character(0), sampleID = character(0), value = character(0),
stringsAsFactors = FALSE)
repeat {
txt <- readLines(con, CHUNK_SIZE, warn = FALSE)
if(length(txt) == 0) {
break
}
txtCon <- textConnection(txt)
chunk <- read.table(txtCon,
sep = "\t",
header = FALSE,
quote = "",
comment.char = "",
colClasses = c("character", "character", "character"),
col.names = c("ID", "sampleID", "value"))
close(txtCon)
if(nrow(ans) == 0) {
ans <- chunk
firstID <- head(ans$ID, 1)
if(head(ans$ID, 1) == tail(ans$ID, 1)) {
## we're not done with this row: keep reading
next
}
lastID <- tail(ans$ID, 1)
keep.flag <- with(ans, ID != lastID)
ans <- ans[keep.flag, ]
pushBack(txt[!keep.flag], con)
break
}
lastID <- tail(chunk$ID, 1)
if(lastID != firstID) {
keep.flag <- with(chunk, ID != lastID)
ans <- rbind(ans, subset(chunk, keep.flag))
pushBack(txt[!keep.flag], con)
break
}
ans <- rbind(ans, chunk)
}
return(ans)
}
rowsCounter <- 0
TIME_INTERVAL <- as.difftime(1, units = "mins")
TIME_ORIG <- TIME_START <- Sys.time()
repeat {
row <- readRow(fin)
if(nrow(row) == 0) {
break
}
rowsCounter <- rowsCounter + 1L
outputTemplate['ID'] <- row$ID[1]
outputTemplate[row$sampleID] <- row$value
writeLines(paste(outputTemplate, collapse = "\t"))
TIME_CUR <- Sys.time()
TIME_ELAPSED <- TIME_CUR - TIME_START
if(TIME_ELAPSED > TIME_INTERVAL) {
TIME_START <- TIME_CUR
message(rowsCounter, " rows processed in ", format(TIME_CUR - TIME_ORIG))
gc()
}
}
TIME_CUR <- Sys.time()
message(rowsCounter, " rows processed in ", format(TIME_CUR - TIME_ORIG))
message("conversion completed.")
|
/table-cast
|
no_license
|
antoniofabio/eqtl-ranef
|
R
| false
| false
| 4,393
|
#!/usr/bin/env Rscript
if(!suppressPackageStartupMessages(require("optparse", quietly=TRUE))) {
stop("the 'optparse' package is needed in order to run this script")
}
option_list <-
list(
make_option("--input", help = "input file name (use '-' for stdin) [default: -]",
default = '-')
)
parser <- OptionParser(usage = "%prog --input=inputFileName",
description = "cast a thin, long tab-delimited table into a fat, large table",
option_list = option_list)
opts <- parse_args(parser, positional_arguments = FALSE)
inputFileName <- opts$input
if(inputFileName != "-") {
stopifnot(file.exists(inputFileName))
fin <- file(inputFileName, open = "r", raw = TRUE)
} else {
fin <- file("stdin", open = "r", raw = TRUE)
}
temp <- readLines(fin, n = 1)
pushBack(temp, fin)
numInputColumns <- length(strsplit(temp, "\t")[[1]])
message(numInputColumns, " columns detected in input")
if(numInputColumns > 3) {
stop("not implemented yet")
}
if(numInputColumns < 3) {
message("noting to possibly expand to: terminating")
quit(save = "no", status = 0)
}
outputIDs <- numInputColumns - 2
idVarNames <- paste0("ID", if(outputIDs > 1) seq_len(outputIDs) else "")
##
## read all columns data from the 1st output row
##
message("\nscanning input data for IDs...")
firstRegressorID <- c()
samplesIDs <- c()
lns <- c()
repeat {
line <- readLines(fin, n = 1)
if(length(line) != 1) {
stop("error while scanning input data: expecting to read in 1 row, got ",
length(line), " instead")
}
lns <- c(lns, line)
fields <- strsplit(line, "\t")[[1]]
if(length(fields) != 3) {
stop("invalid input data format: was expecting 3 tab-delimited columns")
}
if(length(firstRegressorID) > 0) {
if(fields[1] != firstRegressorID) {
break
}
} else {
firstRegressorID <- fields[1]
}
if(fields[2] %in% samplesIDs) {
stop("samples ids (2nd column) must be unique")
}
samplesIDs <- c(samplesIDs, fields[2])
}
pushBack(lns, fin)
samplesIDs <- sort(samplesIDs)
message("OK. detected ", length(samplesIDs), " samples:")
sink(stderr())
str(samplesIDs)
sink()
message("")
outputTemplate <- character(length(samplesIDs) + length(idVarNames))
names(outputTemplate) <- c(idVarNames, samplesIDs)
outputHeader <- paste(names(outputTemplate), collapse = "\t")
writeLines(outputHeader)
CHUNK_SIZE <- 100L
readRow <- function(con) {
ans <- data.frame(ID = character(0), sampleID = character(0), value = character(0),
stringsAsFactors = FALSE)
repeat {
txt <- readLines(con, CHUNK_SIZE, warn = FALSE)
if(length(txt) == 0) {
break
}
txtCon <- textConnection(txt)
chunk <- read.table(txtCon,
sep = "\t",
header = FALSE,
quote = "",
comment.char = "",
colClasses = c("character", "character", "character"),
col.names = c("ID", "sampleID", "value"))
close(txtCon)
if(nrow(ans) == 0) {
ans <- chunk
firstID <- head(ans$ID, 1)
if(head(ans$ID, 1) == tail(ans$ID, 1)) {
## we're not done with this row: keep reading
next
}
lastID <- tail(ans$ID, 1)
keep.flag <- with(ans, ID != lastID)
ans <- ans[keep.flag, ]
pushBack(txt[!keep.flag], con)
break
}
lastID <- tail(chunk$ID, 1)
if(lastID != firstID) {
keep.flag <- with(chunk, ID != lastID)
ans <- rbind(ans, subset(chunk, keep.flag))
pushBack(txt[!keep.flag], con)
break
}
ans <- rbind(ans, chunk)
}
return(ans)
}
rowsCounter <- 0
TIME_INTERVAL <- as.difftime(1, units = "mins")
TIME_ORIG <- TIME_START <- Sys.time()
repeat {
row <- readRow(fin)
if(nrow(row) == 0) {
break
}
rowsCounter <- rowsCounter + 1L
outputTemplate['ID'] <- row$ID[1]
outputTemplate[row$sampleID] <- row$value
writeLines(paste(outputTemplate, collapse = "\t"))
TIME_CUR <- Sys.time()
TIME_ELAPSED <- TIME_CUR - TIME_START
if(TIME_ELAPSED > TIME_INTERVAL) {
TIME_START <- TIME_CUR
message(rowsCounter, " rows processed in ", format(TIME_CUR - TIME_ORIG))
gc()
}
}
TIME_CUR <- Sys.time()
message(rowsCounter, " rows processed in ", format(TIME_CUR - TIME_ORIG))
message("conversion completed.")
|
|
#Cryptocurrency Data
###data download
download.file(file.path("http://api.bitcoincharts.com/v1/csv", "bitstampUSD.csv.gz"),
destfile = file.path("Bitcoincopy/dataset", "bitstampUSD.csv.gz"))
bitcoin <- read.csv(gzfile(file.path("Bitcoincopy/dataset", "bitstampUSD.csv.gz")), header=T)
names(bitcoin) <- c("date","price","amount")
bitcoin$date <- as.Date(as.POSIXct(bitcoin$date, origin="1970-01-01"))
###select last 365 values
bitcoin <- bitcoin[bitcoin$date >= Sys.Date()-365, ]
row.names(bitcoin) <- 1:nrow(bitcoin)
###calculate trade volume
bitcoin$volume <- round(bitcoin$price*bitcoin$amount, 3)
###aggregate by date
bitcoin <- aggregate(x = bitcoin[c("amount", "volume")], by=list(date=bitcoin$date), FUN=sum)
###calculate average weighted price
bitcoin$wprice <- bitcoin$volume/bitcoin$amount
View(bitcoin)
###create final time serie
library(forecast)
y <- ts(bitcoin$wprice, start=as.numeric(strsplit(as.character(min(bitcoin$date)), '-')[[1]]), frequency=365.25)
|
/bitcoin.R
|
no_license
|
molodnyak/bitcoin
|
R
| false
| false
| 1,017
|
r
|
#Cryptocurrency Data
###data download
download.file(file.path("http://api.bitcoincharts.com/v1/csv", "bitstampUSD.csv.gz"),
destfile = file.path("Bitcoincopy/dataset", "bitstampUSD.csv.gz"))
bitcoin <- read.csv(gzfile(file.path("Bitcoincopy/dataset", "bitstampUSD.csv.gz")), header=T)
names(bitcoin) <- c("date","price","amount")
bitcoin$date <- as.Date(as.POSIXct(bitcoin$date, origin="1970-01-01"))
###select last 365 values
bitcoin <- bitcoin[bitcoin$date >= Sys.Date()-365, ]
row.names(bitcoin) <- 1:nrow(bitcoin)
###calculate trade volume
bitcoin$volume <- round(bitcoin$price*bitcoin$amount, 3)
###aggregate by date
bitcoin <- aggregate(x = bitcoin[c("amount", "volume")], by=list(date=bitcoin$date), FUN=sum)
###calculate average weighted price
bitcoin$wprice <- bitcoin$volume/bitcoin$amount
View(bitcoin)
###create final time serie
library(forecast)
y <- ts(bitcoin$wprice, start=as.numeric(strsplit(as.character(min(bitcoin$date)), '-')[[1]]), frequency=365.25)
|
setwd("C:/Users/malombardi/Desktop/Coursera/")
if(!file.exists("project2")) {
dir.create("project2")
}
fileUrl<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./project2/project2.zip")
dateDownloaded<-date()
dateDownloaded
unzip(zipfile="./project2/project2.zip",exdir="./project2")
path<-file.path("./project2", "UCI HAR Dataset")
files<-list.files(path,recursive=TRUE)
files
#Merge the training and the test sets
dataActivityTest <- read.table(file.path(path, "test" , "Y_test.txt" ),header = FALSE)
dataActivityTrain <- read.table(file.path(path, "train", "Y_train.txt"),header = FALSE)
dataSubjectTrain <- read.table(file.path(path, "train", "subject_train.txt"),header = FALSE)
dataSubjectTest <- read.table(file.path(path, "test" , "subject_test.txt"),header = FALSE)
dataFeaturesTest <- read.table(file.path(path_rf, "test" , "X_test.txt" ),header = FALSE)
dataFeaturesTrain <- read.table(file.path(path_rf, "train", "X_train.txt"),header = FALSE)
#Merge
dataSubject <- rbind(dataSubjectTrain, dataSubjectTest)
dataActivity<- rbind(dataActivityTrain, dataActivityTest)
dataFeatures<- rbind(dataFeaturesTrain, dataFeaturesTest)
#Extracting Mean and STD
colNames <- colnames(setAllInOne)
mean_and_std <- (grepl("activityId" , colNames) |
grepl("subjectId" , colNames) |
grepl("mean.." , colNames) |
grepl("std.." , colNames)
)
setForMeanAndStd <- setAllInOne[ , mean_and_std == TRUE]
#Descriptive Names
setWithActivityNames <- merge(setForMeanAndStd, activityLabels,
by='activityId',
all.x=TRUE)
|
/run_analysis.R
|
no_license
|
mmlombardi/Getting-and-Cleaning-Data-Course-Project
|
R
| false
| false
| 1,736
|
r
|
setwd("C:/Users/malombardi/Desktop/Coursera/")
if(!file.exists("project2")) {
dir.create("project2")
}
fileUrl<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./project2/project2.zip")
dateDownloaded<-date()
dateDownloaded
unzip(zipfile="./project2/project2.zip",exdir="./project2")
path<-file.path("./project2", "UCI HAR Dataset")
files<-list.files(path,recursive=TRUE)
files
#Merge the training and the test sets
dataActivityTest <- read.table(file.path(path, "test" , "Y_test.txt" ),header = FALSE)
dataActivityTrain <- read.table(file.path(path, "train", "Y_train.txt"),header = FALSE)
dataSubjectTrain <- read.table(file.path(path, "train", "subject_train.txt"),header = FALSE)
dataSubjectTest <- read.table(file.path(path, "test" , "subject_test.txt"),header = FALSE)
dataFeaturesTest <- read.table(file.path(path_rf, "test" , "X_test.txt" ),header = FALSE)
dataFeaturesTrain <- read.table(file.path(path_rf, "train", "X_train.txt"),header = FALSE)
#Merge
dataSubject <- rbind(dataSubjectTrain, dataSubjectTest)
dataActivity<- rbind(dataActivityTrain, dataActivityTest)
dataFeatures<- rbind(dataFeaturesTrain, dataFeaturesTest)
#Extracting Mean and STD
colNames <- colnames(setAllInOne)
mean_and_std <- (grepl("activityId" , colNames) |
grepl("subjectId" , colNames) |
grepl("mean.." , colNames) |
grepl("std.." , colNames)
)
setForMeanAndStd <- setAllInOne[ , mean_and_std == TRUE]
#Descriptive Names
setWithActivityNames <- merge(setForMeanAndStd, activityLabels,
by='activityId',
all.x=TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Path-class.R
\name{Path$show}
\alias{Path$show}
\title{Show the entire path as a string}
\value{
the path as as tring
}
\description{
Returns the path as a string
}
\seealso{
Other Path:
\code{\link{Path$..}},
\code{\link{Path$.}},
\code{\link{Path$J}},
\code{\link{Path$dir}},
\code{\link{Path$join}},
\code{\link{Path$name}},
\code{\link{Path$new}},
\code{\link{Path$parent}},
\code{\link{Path}},
\code{\link{\%//\%}()}
}
\concept{Path}
|
/man/Path-cash-show.Rd
|
permissive
|
strazto/pathlibr
|
R
| false
| true
| 518
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Path-class.R
\name{Path$show}
\alias{Path$show}
\title{Show the entire path as a string}
\value{
the path as as tring
}
\description{
Returns the path as a string
}
\seealso{
Other Path:
\code{\link{Path$..}},
\code{\link{Path$.}},
\code{\link{Path$J}},
\code{\link{Path$dir}},
\code{\link{Path$join}},
\code{\link{Path$name}},
\code{\link{Path$new}},
\code{\link{Path$parent}},
\code{\link{Path}},
\code{\link{\%//\%}()}
}
\concept{Path}
|
##########################################################
#
# HANJO ODENDAAL
# hanjo.oden@gmail.com
# www.daeconomist.com
# @UbuntR314
# https://github.com/HanjoStudy
#
#
# ██████╗ ███████╗███████╗██╗ ███████╗███╗ ██╗██╗██╗ ██╗███╗ ███╗
# ██╔══██╗██╔════╝██╔════╝██║ ██╔════╝████╗ ██║██║██║ ██║████╗ ████║
# ██████╔╝███████╗█████╗ ██║ █████╗ ██╔██╗ ██║██║██║ ██║██╔████╔██║
# ██╔══██╗╚════██║██╔══╝ ██║ ██╔══╝ ██║╚██╗██║██║██║ ██║██║╚██╔╝██║
# ██║ ██║███████║███████╗███████╗███████╗██║ ╚████║██║╚██████╔╝██║ ╚═╝ ██║
# ╚═╝ ╚═╝╚══════╝╚══════╝╚══════╝╚══════╝╚═╝ ╚═══╝╚═╝ ╚═════╝ ╚═╝ ╚═╝
#
# Last update: July 2018
#
##########################################################
# By the end of the session I want you to be comfortable with
#
# * Connecting to RSelenium
# - Understand basic docker commands
# * Be able to construct a scraper that
# - navigates
# - scrolls
# - interacts with DOM
# - build a scraper framework snippet
# * Use screenshots
# -------------------------------------
# Why we use RSelenium
# -------------------------------------
# RSelenium allows you to carry out unit testing and regression testing on your webapps and webpages across a range of browser/OS combinations
# > Selenium makes our task easy as it can scrape complicated webpages with dynamic content
# > "Human-like" behaviour such as clicking and scrolling
# > FINALLY a stable server instance through docker!
# > They joy when you finally get it working!
# Getting the old boy started
# CRAN recently removed `RSelenium` from the repo, thus it is even more difficult to get the your `Selenium` instance up and running in `R`
# We will be using `devtools` to install the necessary dependencies from `github`
devtools::install_github("johndharrison/binman")
devtools::install_github("johndharrison/wdman")
devtools::install_github("ropensci/RSelenium")
# Once you have installed all the packages, remember to load `RSelenium` into your workspace
library(RSelenium)
library(rvest)
library(tidyverse)
# -------------------------------------
# Turning the iginition (docker style)
# -------------------------------------
# RSelenium is notorius for instability and compatibility issues. It is thus amazing that they now have a docker image for headless webdrivers. Running a docker container standardises the build across OS’s and removes many of the issues user may have relating to JAVA/browser version/selenium version
# > Offers improved stability
# > Greater ease in setting up the Selenium server
# > Quick up and down
# Get your environment setup
# sudo groupadd docker
# sudo usermod -aG docker $USER
# sudo docker pull selenium/standalone-chrome-debug
# Starting your Selenium Server i debug
# docker run --name chrome -v /dev/shm:/dev/shm -d -p 4445:4444 -p 5901:5900 selenium/standalone-chrome-debug:latest
# add swap if needed:
# sudo fallocate -l 3G /swapfile
# sudo chmod 600 /swapfile
# sudo mkswap /swapfile
# sudo swapon /swapfile
# sudo cp /etc/fstab /etc/fstab.bak
# sudo docker ps
# * `-name` name your container, otherwise docker will ;-)
# * `-v` mount volume
# * `-d` detached mode
# * `-p` port mapping (external:internal)
# * if on external server: `127.0.0.1:port:port`
# Attach your viewport (TightVNC & Vinagre)
# We can use Virtual Network Computing (VNC) viewers to view what is happening
# Finally - RSelenium is operational
# * Quick overview of the tools you will be using
# * Useful functions written in javascript that I find useful
# * Obsure and fun functions
# * Combine it all into a case study
# -------------------------------------
# Open and navigate
# -------------------------------------
library(RSelenium)
# This command sets up a list of the parameters we are going to send to selenium to kick off
remDr <- remoteDriver(remoteServerAddr = "192.168.99.100",
port = 4445L,
browser = "chrome")
# Notice the strange notation? Thats because of Java object.method
remDr$open()
# Use method navigate to drive your browser around
remDr$navigate("http://www.google.com")
remDr$navigate("http://www.bing.com")
pg <- remDr$navigate("https://en.wikipedia.org/wiki/Rugby_World_Cup")
# Use methods back and forward to jump between pages
remDr$goBack()
remDr$goForward()
# -------------------------------------
# Using keys and Scrolling
# -------------------------------------
# We can send various keys to the Selenium
rs_keynames <- RSelenium:::selKeys %>% names()
remDr$navigate("https://en.wikipedia.org/wiki/Rugby_World_Cup")
webElem <- remDr$findElement("css", "body")
webElem$sendKeysToElement( list
(key = 'page_down' ) )
# Note the notation of the command object$method(list = "command)
webElem$sendKeysToElement(list(key = "page_down"))
webElem$sendKeysToElement(list(key = "page_up"))
webElem$sendKeysToElement(list(key = "home"))
# We also send Javascript to the page - this becomes important if you want to know how far down you have scrolled...
remDr$executeScript("return window.scrollY", args = list(1))
remDr$executeScript("return document.body.scrollHeight", args = list(1))
remDr$executeScript("return window.innerHeight", args = list(1))
remDr$executeScript("return window.innerWidth", args = list(1))
webElem$sendKeysToElement(list(key = "home"))
webElem$sendKeysToElement(list(key = "end"))
# -------------------------------------
# Interacting with the DOM
# -------------------------------------
# The DOM stands for the Document Object Model. It is a cross-platform and language-independent convention for representing and interacting with objects in HTML, XHTML and XML documents. To get the whole DOM:
remDr$getPageSource() %>% .[[1]] %>% read_html()
# To interact with the DOM, we will use the `findElement` method:
# > Search by id, class, selector, xpath
remDr$navigate("http://www.google.com/")
# This is equivalent to html_nodes
webElem <- remDr$findElement(using = 'class', "gsfi")
webElem$highlightElement()
# Having identified the element we want to interact with, we have a couple of methods that we can apply to the object:
webElem$clickElement()
webElem$click(2)
# Cannot interact with objects not on screen
remDr$mouseMoveToLocation(webElement = webElem)
webElem$sendKeysToActiveElement(list(key = 'down_arrow', key = 'down_arrow', key = 'enter'))
webElem$sendKeysToActiveElement(list("Hallo World", key = 'enter'))
# -------------------------------------
# Nice to have functions
# -------------------------------------
remDr$maxWindowSize()
remDr$getTitle()
remDr$screenshot(display = TRUE)
b64out<- remDr$screenshot()
writeBin(RCurl::base64Decode(b64out, "raw"), 'screenshot.png')
# Scroll into view
remDr$executeScript("arguments[0].scrollIntoView(true);", args = list(webElem))
# Building a RSelenium pipe function
# RSelenium has 2 types of commands:
#
# * Those with side-effects (action)
# * Those that returns information we want to push into `rvest`
#
# For the 1st case, we would want to return the driver object as the state of it has changed
navi <- function(remDr, site = "www.google.com"){
remDr$navigate(site)
return(remDr)
}
remDr %>% navi(., "www.google.com")
# -------------------------------------
# Case Study: A Tour of the winelands!
# -------------------------------------
## Extending your wine knowledge
# Australia is famous for its wines! Lets find out a little bit more about the wine region
#
# > * Go to vivino.com
# > * Collect 2 pages worth of information
# > - Name of wine farm, name of wine, star rating, count of ratings
# Display all the wine
library(RSelenium)
remDr <- remoteDriver(remoteServerAddr = "192.168.99.100",
port = 4445L,
browser = "chrome")
remDr$open()
remDr$navigate("https://www.vivino.com/")
# This piece isolates the button we need to click on to explore wines
webElem <- remDr$findElement("css", '.explore-widget__main__submit__button')
webElem$highlightElement()
webElem$clickElement()
scrollTo <- function(remDr, webElem){
remDr$executeScript("arguments[0].scrollIntoView(true);", args = list(webElem))
webElem$highlightElement()
}
# I use xpath here, just because I want to illustrates the handy function: starts with
# I am trying to isolate where I can fill in the name of the region I am looking to search
webElem <- remDr$findElements("xpath", '//input[starts-with(@class, "filterPills")]')
scrollTo(remDr, webElem[[2]])
webElem[[2]]$clickElement()
webElem[[2]]$sendKeysToActiveElement(list("Australia"))
webElem <- remDr$findElements("css", '.pill__inner--7gfKn')
# How I identify the correct webelem to click on
country_elem <- webElem %>%
sapply(., function(x) x$getElementText()) %>%
reduce(c) %>%
grepl("Australia", .) %>%
which
scrollTo(remDr, webElem[[country_elem]])
webElem[[country_elem]]$clickElement()
# Some pages need you to scroll to the bottom in order for more content to load. Vivino is one of them
remDr$executeScript("return window.scrollY", args = list(1))
remDr$executeScript("return document.body.scrollHeight", args = list(1))
remDr$sendKeysToActiveElement(list(key = "end"))
remDr$executeScript("return window.scrollY", args = list(1))
# Now we done with RSelenium, on to rvest!
pg <- remDr$getPageSource() %>% .[[1]] %>%
read_html()
collect_info <- function(pg){
farm <- pg %>% html_nodes(".vintageTitle__winery--2YoIr") %>%
html_text()
wine <- pg %>% html_nodes(".vintageTitle__wine--U7t9G") %>%
html_text()
rating <- pg %>% html_nodes("span.vivinoRating__rating--4Oti3") %>%
html_text() %>%
as.numeric
rating_count <- pg %>% html_nodes("span.vivinoRating__ratingCount--NmiVg") %>%
html_text() %>%
gsub("[^0-9]", "",.) %>%
as.numeric
data.frame(farm, wine, rating, rating_count)
}
collect_info(pg)
collect_info(pg)
# ------------------------------#
# ███████╗███╗ ██╗██████╗ #
# ██╔════╝████╗ ██║██╔══██╗ #
# █████╗ ██╔██╗ ██║██║ ██║ #
# ██╔══╝ ██║╚██╗██║██║ ██║ #
# ███████╗██║ ╚████║██████╔╝ #
# ╚══════╝╚═╝ ╚═══╝╚═════╝ #
# ------------------------------#
|
/RSelenium.R
|
no_license
|
lin-learns/RSelenium_UseR
|
R
| false
| false
| 11,349
|
r
|
##########################################################
#
# HANJO ODENDAAL
# hanjo.oden@gmail.com
# www.daeconomist.com
# @UbuntR314
# https://github.com/HanjoStudy
#
#
# ██████╗ ███████╗███████╗██╗ ███████╗███╗ ██╗██╗██╗ ██╗███╗ ███╗
# ██╔══██╗██╔════╝██╔════╝██║ ██╔════╝████╗ ██║██║██║ ██║████╗ ████║
# ██████╔╝███████╗█████╗ ██║ █████╗ ██╔██╗ ██║██║██║ ██║██╔████╔██║
# ██╔══██╗╚════██║██╔══╝ ██║ ██╔══╝ ██║╚██╗██║██║██║ ██║██║╚██╔╝██║
# ██║ ██║███████║███████╗███████╗███████╗██║ ╚████║██║╚██████╔╝██║ ╚═╝ ██║
# ╚═╝ ╚═╝╚══════╝╚══════╝╚══════╝╚══════╝╚═╝ ╚═══╝╚═╝ ╚═════╝ ╚═╝ ╚═╝
#
# Last update: July 2018
#
##########################################################
# By the end of the session I want you to be comfortable with
#
# * Connecting to RSelenium
# - Understand basic docker commands
# * Be able to construct a scraper that
# - navigates
# - scrolls
# - interacts with DOM
# - build a scraper framework snippet
# * Use screenshots
# -------------------------------------
# Why we use RSelenium
# -------------------------------------
# RSelenium allows you to carry out unit testing and regression testing on your webapps and webpages across a range of browser/OS combinations
# > Selenium makes our task easy as it can scrape complicated webpages with dynamic content
# > "Human-like" behaviour such as clicking and scrolling
# > FINALLY a stable server instance through docker!
# > They joy when you finally get it working!
# Getting the old boy started
# CRAN recently removed `RSelenium` from the repo, thus it is even more difficult to get the your `Selenium` instance up and running in `R`
# We will be using `devtools` to install the necessary dependencies from `github`
devtools::install_github("johndharrison/binman")
devtools::install_github("johndharrison/wdman")
devtools::install_github("ropensci/RSelenium")
# Once you have installed all the packages, remember to load `RSelenium` into your workspace
library(RSelenium)
library(rvest)
library(tidyverse)
# -------------------------------------
# Turning the iginition (docker style)
# -------------------------------------
# RSelenium is notorius for instability and compatibility issues. It is thus amazing that they now have a docker image for headless webdrivers. Running a docker container standardises the build across OS’s and removes many of the issues user may have relating to JAVA/browser version/selenium version
# > Offers improved stability
# > Greater ease in setting up the Selenium server
# > Quick up and down
# Get your environment setup
# sudo groupadd docker
# sudo usermod -aG docker $USER
# sudo docker pull selenium/standalone-chrome-debug
# Starting your Selenium Server i debug
# docker run --name chrome -v /dev/shm:/dev/shm -d -p 4445:4444 -p 5901:5900 selenium/standalone-chrome-debug:latest
# add swap if needed:
# sudo fallocate -l 3G /swapfile
# sudo chmod 600 /swapfile
# sudo mkswap /swapfile
# sudo swapon /swapfile
# sudo cp /etc/fstab /etc/fstab.bak
# sudo docker ps
# * `-name` name your container, otherwise docker will ;-)
# * `-v` mount volume
# * `-d` detached mode
# * `-p` port mapping (external:internal)
# * if on external server: `127.0.0.1:port:port`
# Attach your viewport (TightVNC & Vinagre)
# We can use Virtual Network Computing (VNC) viewers to view what is happening
# Finally - RSelenium is operational
# * Quick overview of the tools you will be using
# * Useful functions written in javascript that I find useful
# * Obsure and fun functions
# * Combine it all into a case study
# -------------------------------------
# Open and navigate
# -------------------------------------
library(RSelenium)
# This command sets up a list of the parameters we are going to send to selenium to kick off
remDr <- remoteDriver(remoteServerAddr = "192.168.99.100",
port = 4445L,
browser = "chrome")
# Notice the strange notation? Thats because of Java object.method
remDr$open()
# Use method navigate to drive your browser around
remDr$navigate("http://www.google.com")
remDr$navigate("http://www.bing.com")
pg <- remDr$navigate("https://en.wikipedia.org/wiki/Rugby_World_Cup")
# Use methods back and forward to jump between pages
remDr$goBack()
remDr$goForward()
# -------------------------------------
# Using keys and Scrolling
# -------------------------------------
# We can send various keys to the Selenium
rs_keynames <- RSelenium:::selKeys %>% names()
remDr$navigate("https://en.wikipedia.org/wiki/Rugby_World_Cup")
webElem <- remDr$findElement("css", "body")
webElem$sendKeysToElement( list
(key = 'page_down' ) )
# Note the notation of the command object$method(list = "command)
webElem$sendKeysToElement(list(key = "page_down"))
webElem$sendKeysToElement(list(key = "page_up"))
webElem$sendKeysToElement(list(key = "home"))
# We also send Javascript to the page - this becomes important if you want to know how far down you have scrolled...
remDr$executeScript("return window.scrollY", args = list(1))
remDr$executeScript("return document.body.scrollHeight", args = list(1))
remDr$executeScript("return window.innerHeight", args = list(1))
remDr$executeScript("return window.innerWidth", args = list(1))
webElem$sendKeysToElement(list(key = "home"))
webElem$sendKeysToElement(list(key = "end"))
# -------------------------------------
# Interacting with the DOM
# -------------------------------------
# The DOM stands for the Document Object Model. It is a cross-platform and language-independent convention for representing and interacting with objects in HTML, XHTML and XML documents. To get the whole DOM:
remDr$getPageSource() %>% .[[1]] %>% read_html()
# To interact with the DOM, we will use the `findElement` method:
# > Search by id, class, selector, xpath
remDr$navigate("http://www.google.com/")
# This is equivalent to html_nodes
webElem <- remDr$findElement(using = 'class', "gsfi")
webElem$highlightElement()
# Having identified the element we want to interact with, we have a couple of methods that we can apply to the object:
webElem$clickElement()
webElem$click(2)
# Cannot interact with objects not on screen
remDr$mouseMoveToLocation(webElement = webElem)
webElem$sendKeysToActiveElement(list(key = 'down_arrow', key = 'down_arrow', key = 'enter'))
webElem$sendKeysToActiveElement(list("Hallo World", key = 'enter'))
# -------------------------------------
# Nice to have functions
# -------------------------------------
remDr$maxWindowSize()
remDr$getTitle()
remDr$screenshot(display = TRUE)
b64out<- remDr$screenshot()
writeBin(RCurl::base64Decode(b64out, "raw"), 'screenshot.png')
# Scroll into view
remDr$executeScript("arguments[0].scrollIntoView(true);", args = list(webElem))
# Building a RSelenium pipe function
# RSelenium has 2 types of commands:
#
# * Those with side-effects (action)
# * Those that returns information we want to push into `rvest`
#
# For the 1st case, we would want to return the driver object as the state of it has changed
navi <- function(remDr, site = "www.google.com"){
remDr$navigate(site)
return(remDr)
}
remDr %>% navi(., "www.google.com")
# -------------------------------------
# Case Study: A Tour of the winelands!
# -------------------------------------
## Extending your wine knowledge
# Australia is famous for its wines! Lets find out a little bit more about the wine region
#
# > * Go to vivino.com
# > * Collect 2 pages worth of information
# > - Name of wine farm, name of wine, star rating, count of ratings
# Display all the wine
library(RSelenium)
remDr <- remoteDriver(remoteServerAddr = "192.168.99.100",
port = 4445L,
browser = "chrome")
remDr$open()
remDr$navigate("https://www.vivino.com/")
# This piece isolates the button we need to click on to explore wines
webElem <- remDr$findElement("css", '.explore-widget__main__submit__button')
webElem$highlightElement()
webElem$clickElement()
scrollTo <- function(remDr, webElem){
remDr$executeScript("arguments[0].scrollIntoView(true);", args = list(webElem))
webElem$highlightElement()
}
# I use xpath here, just because I want to illustrates the handy function: starts with
# I am trying to isolate where I can fill in the name of the region I am looking to search
webElem <- remDr$findElements("xpath", '//input[starts-with(@class, "filterPills")]')
scrollTo(remDr, webElem[[2]])
webElem[[2]]$clickElement()
webElem[[2]]$sendKeysToActiveElement(list("Australia"))
webElem <- remDr$findElements("css", '.pill__inner--7gfKn')
# How I identify the correct webelem to click on
country_elem <- webElem %>%
sapply(., function(x) x$getElementText()) %>%
reduce(c) %>%
grepl("Australia", .) %>%
which
scrollTo(remDr, webElem[[country_elem]])
webElem[[country_elem]]$clickElement()
# Some pages need you to scroll to the bottom in order for more content to load. Vivino is one of them
remDr$executeScript("return window.scrollY", args = list(1))
remDr$executeScript("return document.body.scrollHeight", args = list(1))
remDr$sendKeysToActiveElement(list(key = "end"))
remDr$executeScript("return window.scrollY", args = list(1))
# Now we done with RSelenium, on to rvest!
pg <- remDr$getPageSource() %>% .[[1]] %>%
read_html()
collect_info <- function(pg){
farm <- pg %>% html_nodes(".vintageTitle__winery--2YoIr") %>%
html_text()
wine <- pg %>% html_nodes(".vintageTitle__wine--U7t9G") %>%
html_text()
rating <- pg %>% html_nodes("span.vivinoRating__rating--4Oti3") %>%
html_text() %>%
as.numeric
rating_count <- pg %>% html_nodes("span.vivinoRating__ratingCount--NmiVg") %>%
html_text() %>%
gsub("[^0-9]", "",.) %>%
as.numeric
data.frame(farm, wine, rating, rating_count)
}
collect_info(pg)
collect_info(pg)
# ------------------------------#
# ███████╗███╗ ██╗██████╗ #
# ██╔════╝████╗ ██║██╔══██╗ #
# █████╗ ██╔██╗ ██║██║ ██║ #
# ██╔══╝ ██║╚██╗██║██║ ██║ #
# ███████╗██║ ╚████║██████╔╝ #
# ╚══════╝╚═╝ ╚═══╝╚═════╝ #
# ------------------------------#
|
rm(list=ls())
dat<-read.table("HR.txt",header = TRUE,sep = "\t",row.names = 1)
f<-table(dat$Gender)
n1<-f[1]#number of male
n2<-f[2]#number of female
M<-tapply(dat$MonthlyIncome,dat$Gender,mean)
S<-tapply(dat$MonthlyIncome,dat$Gender,sd)
xbar1<-M[1]
xbar2<-M[2]
s1<-S[1]
s2<-S[2]
dfs<-min(n1-1,n2-1)
#calculate test-statistic
tdata<-(xbar1-xbar2)/sqrt((s1^2/n1)+(s2^2/n2))
#calculate the p-value (2-tailed)
pvalue<-2*pt(tdata,df=dfs,lower.tail = FALSE)
tdata;pvalue
#high p-value indicates, fail to reject null hyp ie., there is no
#significant difference in means
|
/day2_t_Test_Male_Female_Income.R
|
no_license
|
PaulSaikat/R-Programming
|
R
| false
| false
| 591
|
r
|
rm(list=ls())
dat<-read.table("HR.txt",header = TRUE,sep = "\t",row.names = 1)
f<-table(dat$Gender)
n1<-f[1]#number of male
n2<-f[2]#number of female
M<-tapply(dat$MonthlyIncome,dat$Gender,mean)
S<-tapply(dat$MonthlyIncome,dat$Gender,sd)
xbar1<-M[1]
xbar2<-M[2]
s1<-S[1]
s2<-S[2]
dfs<-min(n1-1,n2-1)
#calculate test-statistic
tdata<-(xbar1-xbar2)/sqrt((s1^2/n1)+(s2^2/n2))
#calculate the p-value (2-tailed)
pvalue<-2*pt(tdata,df=dfs,lower.tail = FALSE)
tdata;pvalue
#high p-value indicates, fail to reject null hyp ie., there is no
#significant difference in means
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot2_themes.R
\name{doc_new}
\alias{doc_new}
\title{Create New Document}
\usage{
doc_new(path, type = c("doc", "slides"))
}
\arguments{
\item{path}{Path to the location of the new document}
\item{type}{Type of document to create}
}
\description{
Creates a new R Markdown document of the requested type. There are currently
three templates, one for reports where the default is HTML based on the HTML
vignette template, and another with a Moffitt-styled \pkg{xaringan} theme. In
all cases, the document and supporting files are added to a directory with
the name given by the file.
}
\examples{
\dontrun{
doc_new("my_report.Rmd", "doc")
doc_new("my_slides.Rmd", "slides")
}
}
|
/man/doc_new.Rd
|
permissive
|
GerkeLab/grkmisc
|
R
| false
| true
| 758
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot2_themes.R
\name{doc_new}
\alias{doc_new}
\title{Create New Document}
\usage{
doc_new(path, type = c("doc", "slides"))
}
\arguments{
\item{path}{Path to the location of the new document}
\item{type}{Type of document to create}
}
\description{
Creates a new R Markdown document of the requested type. There are currently
three templates, one for reports where the default is HTML based on the HTML
vignette template, and another with a Moffitt-styled \pkg{xaringan} theme. In
all cases, the document and supporting files are added to a directory with
the name given by the file.
}
\examples{
\dontrun{
doc_new("my_report.Rmd", "doc")
doc_new("my_slides.Rmd", "slides")
}
}
|
\name{removepoints}
\alias{removepoints}
\title{
Remove a given number of patches from the landscape
}
\description{
Randomly removes a given number of patches from the landscape.
}
\usage{
removepoints(rl, nr)
}
\arguments{
\item{rl}{
Object of class 'landscape'.
}
\item{nr}{
Number of patches to remove.
}
}
\value{
Returns an object of class 'landscape'.
}
\author{
Frederico Mestre and Fernando Canovas
}
\seealso{
\code{\link{rland.graph}}, \code{\link{addpoints}}
}
\examples{
data(rland)
#Checking the number of patches in the starting landscape:
rland$number.patches
#60
#Removing 10 patches from the landscape:
rl1 <- removepoints(rl=rland, nr=10)
#Checking the number of patches in the output landscape:
rl1$number.patches
#50
}
|
/MetaLandSim/man/removepoints.Rd
|
no_license
|
albrizre/spatstat.revdep
|
R
| false
| false
| 754
|
rd
|
\name{removepoints}
\alias{removepoints}
\title{
Remove a given number of patches from the landscape
}
\description{
Randomly removes a given number of patches from the landscape.
}
\usage{
removepoints(rl, nr)
}
\arguments{
\item{rl}{
Object of class 'landscape'.
}
\item{nr}{
Number of patches to remove.
}
}
\value{
Returns an object of class 'landscape'.
}
\author{
Frederico Mestre and Fernando Canovas
}
\seealso{
\code{\link{rland.graph}}, \code{\link{addpoints}}
}
\examples{
data(rland)
#Checking the number of patches in the starting landscape:
rland$number.patches
#60
#Removing 10 patches from the landscape:
rl1 <- removepoints(rl=rland, nr=10)
#Checking the number of patches in the output landscape:
rl1$number.patches
#50
}
|
switch(Sys.info()[['sysname']],
Windows= {#WINDOWS
source("C:/Users/papa/Dropbox/Desarrollo/BasicReinforcementLearningAlgorithm/src/Analysis/RLGraficar.R")
},
Darwin = {# MAC
source("/Users/pedro/Google Drive/Desarrollo/BasicReinforcementLearningAlgorithm/src/Analysis/RLGraficar.R")
}
)
crearGrafico(file.path("/Users","pedro","Google Drive","Desarrollo","BasicReinforcementLearningAlgorithm","Experimentos","GameProblemWithMap","newRewardFunction_5x5"), c("GameWorldSimpleMap_SARSA_E_GREEDY_CHANGING_TEMPORALLY_t100.0_ep150.0_lRate1.0_dFactor0.7_eG0.5_0.05_5x5","QValues_GameWorldSimpleMap_SARSA_E_GREEDY_CHANGING_TEMPORALLY_t100.0_ep150.0_lRate1.0_dFactor0.7_eG0.5_0.05_5x5"),"xEpisode_yAverageReward")
crearGrafico(file.path("/Users","pedro","Google Drive","Desarrollo","BasicReinforcementLearningAlgorithm","Experimentos","GameProblemWithMap","newRewardFunction_5x5"), c("GameWorldSimpleMap_SARSA_E_GREEDY_CHANGING_TEMPORALLY_t100.0_ep150.0_lRate1.0_dFactor0.7_eG0.5_0.05_5x5","QValues_GameWorldSimpleMap_SARSA_E_GREEDY_CHANGING_TEMPORALLY_t100.0_ep150.0_lRate1.0_dFactor0.7_eG0.5_0.05_5x5"),"xEpisode_yAverageStepsWhenWin")
crearGrafico(file.path("/Users","pedro","Google Drive","Desarrollo","BasicReinforcementLearningAlgorithm","Experimentos","GameProblemWithMap","newRewardFunction_5x5"), c("GameWorldSimpleMap_SARSA_E_GREEDY_CHANGING_TEMPORALLY_t100.0_ep150.0_lRate1.0_dFactor0.7_eG0.5_0.05_5x5","QValues_GameWorldSimpleMap_SARSA_E_GREEDY_CHANGING_TEMPORALLY_t100.0_ep150.0_lRate1.0_dFactor0.7_eG0.5_0.05_5x5"),"xEpisode_yPercentageOfActions")
crearGrafico(file.path("/Users","pedro","Google Drive","Desarrollo","BasicReinforcementLearningAlgorithm","Experimentos","GameProblemWithMap","newRewardFunction_5x5"), c("GameWorldSimpleMap_SARSA_E_GREEDY_CHANGING_TEMPORALLY_t100.0_ep150.0_lRate1.0_dFactor0.7_eG0.5_0.05_5x5","QValues_GameWorldSimpleMap_SARSA_E_GREEDY_CHANGING_TEMPORALLY_t100.0_ep150.0_lRate1.0_dFactor0.7_eG0.5_0.05_5x5"),"xEpisode_yWinProbability")
|
/ConnectingRToJavaTest/src/Analysis/RLMain.R
|
no_license
|
PedroReyes/ReinforcementLearning
|
R
| false
| false
| 2,020
|
r
|
switch(Sys.info()[['sysname']],
Windows= {#WINDOWS
source("C:/Users/papa/Dropbox/Desarrollo/BasicReinforcementLearningAlgorithm/src/Analysis/RLGraficar.R")
},
Darwin = {# MAC
source("/Users/pedro/Google Drive/Desarrollo/BasicReinforcementLearningAlgorithm/src/Analysis/RLGraficar.R")
}
)
crearGrafico(file.path("/Users","pedro","Google Drive","Desarrollo","BasicReinforcementLearningAlgorithm","Experimentos","GameProblemWithMap","newRewardFunction_5x5"), c("GameWorldSimpleMap_SARSA_E_GREEDY_CHANGING_TEMPORALLY_t100.0_ep150.0_lRate1.0_dFactor0.7_eG0.5_0.05_5x5","QValues_GameWorldSimpleMap_SARSA_E_GREEDY_CHANGING_TEMPORALLY_t100.0_ep150.0_lRate1.0_dFactor0.7_eG0.5_0.05_5x5"),"xEpisode_yAverageReward")
crearGrafico(file.path("/Users","pedro","Google Drive","Desarrollo","BasicReinforcementLearningAlgorithm","Experimentos","GameProblemWithMap","newRewardFunction_5x5"), c("GameWorldSimpleMap_SARSA_E_GREEDY_CHANGING_TEMPORALLY_t100.0_ep150.0_lRate1.0_dFactor0.7_eG0.5_0.05_5x5","QValues_GameWorldSimpleMap_SARSA_E_GREEDY_CHANGING_TEMPORALLY_t100.0_ep150.0_lRate1.0_dFactor0.7_eG0.5_0.05_5x5"),"xEpisode_yAverageStepsWhenWin")
crearGrafico(file.path("/Users","pedro","Google Drive","Desarrollo","BasicReinforcementLearningAlgorithm","Experimentos","GameProblemWithMap","newRewardFunction_5x5"), c("GameWorldSimpleMap_SARSA_E_GREEDY_CHANGING_TEMPORALLY_t100.0_ep150.0_lRate1.0_dFactor0.7_eG0.5_0.05_5x5","QValues_GameWorldSimpleMap_SARSA_E_GREEDY_CHANGING_TEMPORALLY_t100.0_ep150.0_lRate1.0_dFactor0.7_eG0.5_0.05_5x5"),"xEpisode_yPercentageOfActions")
crearGrafico(file.path("/Users","pedro","Google Drive","Desarrollo","BasicReinforcementLearningAlgorithm","Experimentos","GameProblemWithMap","newRewardFunction_5x5"), c("GameWorldSimpleMap_SARSA_E_GREEDY_CHANGING_TEMPORALLY_t100.0_ep150.0_lRate1.0_dFactor0.7_eG0.5_0.05_5x5","QValues_GameWorldSimpleMap_SARSA_E_GREEDY_CHANGING_TEMPORALLY_t100.0_ep150.0_lRate1.0_dFactor0.7_eG0.5_0.05_5x5"),"xEpisode_yWinProbability")
|
library(shiny)
library(rsconnect)
runApp("~/Google Drive/Xunyang_Fall16Spring17/Hilsa fish research/3.Analysis/Rscript/shinyapp")
deployApp("~/Google Drive/Xunyang_Fall16Spring17/Hilsa fish research/3.Analysis/Rscript/shinyapp")
runApp("~/Google Drive/Xunyang_Fall16Spring17/Hilsa fish research/3.Analysis/Rscript/Interactive Map")
|
/HilsaFish/Dashboard/Shiny.R
|
no_license
|
yc3207/CU_Research
|
R
| false
| false
| 332
|
r
|
library(shiny)
library(rsconnect)
runApp("~/Google Drive/Xunyang_Fall16Spring17/Hilsa fish research/3.Analysis/Rscript/shinyapp")
deployApp("~/Google Drive/Xunyang_Fall16Spring17/Hilsa fish research/3.Analysis/Rscript/shinyapp")
runApp("~/Google Drive/Xunyang_Fall16Spring17/Hilsa fish research/3.Analysis/Rscript/Interactive Map")
|
# =============================================================================
# Description: Assignment 1
#
#
#
# Authhor: Bruno Hunkeler
# Date: 04.11.2015
# =============================================================================
# references to functions
source("corr.R")
source("complete.R")
# library references
# library(miscTools)
# =============================================================================
# Assignment 1/3 - correlation
# =============================================================================
directory <- "specdata"
# =============================================================================
# Assignment 1/2 - correlation
# =============================================================================
correlation_TC1 <- corr(directory, 150);
head(correlation_TC1)
summary(correlation_TC1)
length(correlation_TC1)
correlation_TC2 <- corr(directory, 400);
head(correlation_TC2)
summary(correlation_TC2)
length(correlation_TC2)
correlation_TC3 <- corr(directory, 5000);
head(correlation_TC3)
summary(correlation_TC3)
length(correlation_TC3)
correlation_TC4 <- corr(directory);
head(correlation_TC4)
summary(correlation_TC4)
length(correlation_TC4)
# =============================================================================
# Coursera Test Cases
# =============================================================================
# cr <- corr("specdata", 150)
# head(cr)
## [1] -0.01896 -0.14051 -0.04390 -0.06816 -0.12351 -0.07589
# summary(cr)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -0.2110 -0.0500 0.0946 0.1250 0.2680 0.7630
# cr <- corr("specdata", 400)
# head(cr)
## [1] -0.01896 -0.04390 -0.06816 -0.07589 0.76313 -0.15783
# summary(cr)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -0.1760 -0.0311 0.1000 0.1400 0.2680 0.7630
# cr <- corr("specdata", 5000)
# summary(cr)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
##
# length(cr)
## [1] 0
# cr <- corr("specdata")
# summary(cr)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -1.0000 -0.0528 0.1070 0.1370 0.2780 1.0000
# length(cr)
## [1] 323
|
/Data_Science - Johns Hopkins University/002_R_Programming/week 1/Assignment_1/main_correlation.R
|
no_license
|
bhunkeler/DataScienceCoursera
|
R
| false
| false
| 2,147
|
r
|
# =============================================================================
# Description: Assignment 1
#
#
#
# Authhor: Bruno Hunkeler
# Date: 04.11.2015
# =============================================================================
# references to functions
source("corr.R")
source("complete.R")
# library references
# library(miscTools)
# =============================================================================
# Assignment 1/3 - correlation
# =============================================================================
directory <- "specdata"
# =============================================================================
# Assignment 1/2 - correlation
# =============================================================================
correlation_TC1 <- corr(directory, 150);
head(correlation_TC1)
summary(correlation_TC1)
length(correlation_TC1)
correlation_TC2 <- corr(directory, 400);
head(correlation_TC2)
summary(correlation_TC2)
length(correlation_TC2)
correlation_TC3 <- corr(directory, 5000);
head(correlation_TC3)
summary(correlation_TC3)
length(correlation_TC3)
correlation_TC4 <- corr(directory);
head(correlation_TC4)
summary(correlation_TC4)
length(correlation_TC4)
# =============================================================================
# Coursera Test Cases
# =============================================================================
# cr <- corr("specdata", 150)
# head(cr)
## [1] -0.01896 -0.14051 -0.04390 -0.06816 -0.12351 -0.07589
# summary(cr)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -0.2110 -0.0500 0.0946 0.1250 0.2680 0.7630
# cr <- corr("specdata", 400)
# head(cr)
## [1] -0.01896 -0.04390 -0.06816 -0.07589 0.76313 -0.15783
# summary(cr)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -0.1760 -0.0311 0.1000 0.1400 0.2680 0.7630
# cr <- corr("specdata", 5000)
# summary(cr)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
##
# length(cr)
## [1] 0
# cr <- corr("specdata")
# summary(cr)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -1.0000 -0.0528 0.1070 0.1370 0.2780 1.0000
# length(cr)
## [1] 323
|
# load("lwt_image.Rdata")
library(rvest)
library(xml2)
url <- "https://en.wikipedia.org/wiki/List_of_Last_Week_Tonight_with_John_Oliver_episodes#Episodes"
# reference! http://blog.corynissen.com/2015/01/using-rvest-to-scrape-html-table.html
for (i in 2:5) {
title <- url %>%
read_html() %>%
html_nodes(xpath = paste0('//*[@id="mw-content-text"]/table[', i, ']')) %>%
html_table()
title <- title[[1]]
title <- title %>%
bind_cols(tibble(season = rep(i - 1, each = nrow(title))))
colnames(title) <- c("abs_episode", "episode_ish","main_segment", "air_date", "viewers", "season")
assign(paste0("lwt_s0", i - 1), title)
}
episodes_wiki <- bind_rows(lwt_s01, lwt_s02, lwt_s03, lwt_s04) %>%
filter(main_segment != "TBA") %>%
mutate(episode = ifelse(nchar(episode_ish) > 3, lag(episode_ish), episode_ish))
episodes_wiki <- episodes_wiki %>%
left_join(episodes_wiki, by = c("season" = "season", "episode" = "episode")) %>%
filter(episode_ish.x != episode_ish.y,
episode_ish.x != episode) %>%
select(season, episode,
abs_episode = abs_episode.y,
main_segment = main_segment.y,
air_date = air_date.y,
viewers = viewers.y,
segments = main_segment.x)
rm(lwt_s01, lwt_s02, lwt_s03, lwt_s04)
library(fuzzyjoin)
youtube_names <- videos %>%
select(short_title, short_desc) %>%
stringdist_left_join(episodes_wiki, by = c("short_title" = "main_segment"), method = "soundex") %>%
select(short_title, short_desc, main_segment) %>%
stringdist_left_join(episodes_wiki, by = c("short_desc" = "main_segment"), method = "soundex") %>%
select(short_title, short_desc, main_segment.x, main_segment.y) #%>%
# stringdist_left_join(episodes_wiki, by = c("short_desc" = "segments"), method = "soundex") %>%
# select(short_title, short_desc, main_segment.x, main_segment.y, segments)
tbl_df %>%
# stringdist_full_join(episodes_wiki, by = c("value" = "main_segment"), max_dist = 3) %>%
stringdist_left_join(episodes_wiki, by = c("value" = "main_segment"), method = "soundex") %>%
select(value, main_segment) %>%
stringdist_left_join(episodes_wiki, by = c("value" = "main_segment"), max_dist = 3) %>%
select(value, main_segment.x, main_segment.y)
left_join(episodes_wiki, by = c("value" = "main_segment"))
save.image("lwt_wiki")
|
/jo_08_wiki_scrape.R
|
no_license
|
olaTechie/John-Oliver-sentiment-analysis
|
R
| false
| false
| 2,365
|
r
|
# load("lwt_image.Rdata")
library(rvest)
library(xml2)
url <- "https://en.wikipedia.org/wiki/List_of_Last_Week_Tonight_with_John_Oliver_episodes#Episodes"
# reference! http://blog.corynissen.com/2015/01/using-rvest-to-scrape-html-table.html
for (i in 2:5) {
title <- url %>%
read_html() %>%
html_nodes(xpath = paste0('//*[@id="mw-content-text"]/table[', i, ']')) %>%
html_table()
title <- title[[1]]
title <- title %>%
bind_cols(tibble(season = rep(i - 1, each = nrow(title))))
colnames(title) <- c("abs_episode", "episode_ish","main_segment", "air_date", "viewers", "season")
assign(paste0("lwt_s0", i - 1), title)
}
episodes_wiki <- bind_rows(lwt_s01, lwt_s02, lwt_s03, lwt_s04) %>%
filter(main_segment != "TBA") %>%
mutate(episode = ifelse(nchar(episode_ish) > 3, lag(episode_ish), episode_ish))
episodes_wiki <- episodes_wiki %>%
left_join(episodes_wiki, by = c("season" = "season", "episode" = "episode")) %>%
filter(episode_ish.x != episode_ish.y,
episode_ish.x != episode) %>%
select(season, episode,
abs_episode = abs_episode.y,
main_segment = main_segment.y,
air_date = air_date.y,
viewers = viewers.y,
segments = main_segment.x)
rm(lwt_s01, lwt_s02, lwt_s03, lwt_s04)
library(fuzzyjoin)
youtube_names <- videos %>%
select(short_title, short_desc) %>%
stringdist_left_join(episodes_wiki, by = c("short_title" = "main_segment"), method = "soundex") %>%
select(short_title, short_desc, main_segment) %>%
stringdist_left_join(episodes_wiki, by = c("short_desc" = "main_segment"), method = "soundex") %>%
select(short_title, short_desc, main_segment.x, main_segment.y) #%>%
# stringdist_left_join(episodes_wiki, by = c("short_desc" = "segments"), method = "soundex") %>%
# select(short_title, short_desc, main_segment.x, main_segment.y, segments)
tbl_df %>%
# stringdist_full_join(episodes_wiki, by = c("value" = "main_segment"), max_dist = 3) %>%
stringdist_left_join(episodes_wiki, by = c("value" = "main_segment"), method = "soundex") %>%
select(value, main_segment) %>%
stringdist_left_join(episodes_wiki, by = c("value" = "main_segment"), max_dist = 3) %>%
select(value, main_segment.x, main_segment.y)
left_join(episodes_wiki, by = c("value" = "main_segment"))
save.image("lwt_wiki")
|
# CVFolds creates the list of row number in each of the V folds. special cases are when shuffling row number, id cluster identification and if stratify by the outcome to maintain (near) balance in each fold.
CVFolds <- function(V, N.all, shuffle, id, stratifyCV, Y) {
if(!stratifyCV) {
if(shuffle) {
if(is.null(id)) {
DATA.split <- split(sample(1:N.all), rep(1:V, length=N.all))
} else {
n.id <- length(unique(id))
id.split <- split(sample(1:n.id), rep(1:V, length=n.id))
DATA.split <- vector("list", V)
for(v in seq(V)) {
DATA.split[[v]] <- which(id %in% unique(id)[id.split[[v]]])
}
}
} else {
if(is.null(id)) {
DATA.split <- split(1:N.all, rep(1:V, length=N.all))
} else {
n.id <- length(unique(id))
id.split <- split(1:n.id, rep(1:V, length=n.id))
DATA.split <- vector("list", V)
for(v in seq(V)) {
DATA.split[[v]] <- which(id %in% unique(id)[id.split[[v]]])
}
}
}
} else {
if(length(unique(Y)) != 2) {
stop("stratifyCV only implemented for binary Y")
}
if(sum(Y) < V | sum(!Y) < V) {
stop("number of (Y=1) or (Y=0) is less than the number of folds")
}
if(shuffle) {
if(is.null(id)) {
within.split <- suppressWarnings(tapply(1:N.all, INDEX = Y, FUN = split, rep(1:V)))
DATA.split <- vector("list", length = V)
names(DATA.split) <- paste(seq(V))
for(vv in seq(V)) {
DATA.split[[vv]] <- c(within.split[[1]][[vv]], within.split[[2]][[vv]])
}
} else {
stop("stratified sampling with id not currently implemented")
}
} else {
if(is.null(id)) {
within.split <- suppressWarnings(tapply(1:N.all, INDEX = Y, FUN = split, rep(1:V)))
DATA.split <- vector("list", length = V)
names(DATA.split) <- paste(seq(V))
for(vv in seq(V)) {
DATA.split[[vv]] <- c(within.split[[1]][[vv]], within.split[[2]][[vv]])
}
} else {
stop("stratified sampling with id not currently implemented")
}
}
}
invisible(DATA.split)
}
# # testing
# N.all <- 200
# Y <- rbinom(N.all, 1, 0.2)
# V <- 10
# shuffle <- TRUE
# id <- NULL
# stratifyCV <- TRUE
#
# within.split <- suppressWarnings(tapply(1:N.all, INDEX = Y, FUN = split, rep(1:V)))
# DATA.split <- vector("list", length = V)
#
# for(vv in seq(V)) {
# DATA.split[[vv]] <- c(within.split[[1]][[vv]], within.split[[2]][[vv]])
# }
#
# # check
# for(i in seq(V)) {
# print(mean(Y[DATA.split[[i]]]))
# }
#
# fooS <- CVFolds(V=V, N.all = N.all, shuffle = FALSE, id = NULL, stratifyCV = TRUE, Y = Y)
# fooN <- CVFolds(V=V, N.all = N.all, shuffle = FALSE, id = NULL, stratifyCV = FALSE, Y = Y)
#
# for(i in seq(V)) {
# print(mean(Y[fooS[[i]]]))
# }
# for(i in seq(V)) {
# print(mean(Y[fooN[[i]]]))
# }
|
/R/CVFolds.R
|
no_license
|
tedwestling/SuperLearner_Old
|
R
| false
| false
| 2,714
|
r
|
# CVFolds creates the list of row number in each of the V folds. special cases are when shuffling row number, id cluster identification and if stratify by the outcome to maintain (near) balance in each fold.
CVFolds <- function(V, N.all, shuffle, id, stratifyCV, Y) {
if(!stratifyCV) {
if(shuffle) {
if(is.null(id)) {
DATA.split <- split(sample(1:N.all), rep(1:V, length=N.all))
} else {
n.id <- length(unique(id))
id.split <- split(sample(1:n.id), rep(1:V, length=n.id))
DATA.split <- vector("list", V)
for(v in seq(V)) {
DATA.split[[v]] <- which(id %in% unique(id)[id.split[[v]]])
}
}
} else {
if(is.null(id)) {
DATA.split <- split(1:N.all, rep(1:V, length=N.all))
} else {
n.id <- length(unique(id))
id.split <- split(1:n.id, rep(1:V, length=n.id))
DATA.split <- vector("list", V)
for(v in seq(V)) {
DATA.split[[v]] <- which(id %in% unique(id)[id.split[[v]]])
}
}
}
} else {
if(length(unique(Y)) != 2) {
stop("stratifyCV only implemented for binary Y")
}
if(sum(Y) < V | sum(!Y) < V) {
stop("number of (Y=1) or (Y=0) is less than the number of folds")
}
if(shuffle) {
if(is.null(id)) {
within.split <- suppressWarnings(tapply(1:N.all, INDEX = Y, FUN = split, rep(1:V)))
DATA.split <- vector("list", length = V)
names(DATA.split) <- paste(seq(V))
for(vv in seq(V)) {
DATA.split[[vv]] <- c(within.split[[1]][[vv]], within.split[[2]][[vv]])
}
} else {
stop("stratified sampling with id not currently implemented")
}
} else {
if(is.null(id)) {
within.split <- suppressWarnings(tapply(1:N.all, INDEX = Y, FUN = split, rep(1:V)))
DATA.split <- vector("list", length = V)
names(DATA.split) <- paste(seq(V))
for(vv in seq(V)) {
DATA.split[[vv]] <- c(within.split[[1]][[vv]], within.split[[2]][[vv]])
}
} else {
stop("stratified sampling with id not currently implemented")
}
}
}
invisible(DATA.split)
}
# # testing
# N.all <- 200
# Y <- rbinom(N.all, 1, 0.2)
# V <- 10
# shuffle <- TRUE
# id <- NULL
# stratifyCV <- TRUE
#
# within.split <- suppressWarnings(tapply(1:N.all, INDEX = Y, FUN = split, rep(1:V)))
# DATA.split <- vector("list", length = V)
#
# for(vv in seq(V)) {
# DATA.split[[vv]] <- c(within.split[[1]][[vv]], within.split[[2]][[vv]])
# }
#
# # check
# for(i in seq(V)) {
# print(mean(Y[DATA.split[[i]]]))
# }
#
# fooS <- CVFolds(V=V, N.all = N.all, shuffle = FALSE, id = NULL, stratifyCV = TRUE, Y = Y)
# fooN <- CVFolds(V=V, N.all = N.all, shuffle = FALSE, id = NULL, stratifyCV = FALSE, Y = Y)
#
# for(i in seq(V)) {
# print(mean(Y[fooS[[i]]]))
# }
# for(i in seq(V)) {
# print(mean(Y[fooN[[i]]]))
# }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_ts.R
\name{get_ts}
\alias{get_ts}
\title{Return CBS timeseries}
\usage{
get_ts(id, ts_code, refresh = FALSE, raw_cbs_dir = "raw_cbs_data",
include_meta = TRUE, min_year = NULL, frequencies = NULL, download,
base_url = NULL, download_all_keys = FALSE)
}
\arguments{
\item{id}{table id}
\item{ts_code}{a \code{ts_code} object. This object can be created
and modified with function \code{\link{edit_ts_code}}, which starts a Shiny
app.}
\item{refresh}{should the data in directory \code{raw_cbs_dir} be refreshed?
If \code{TRUE}, the data are always downloaded from the
CBS website. Otherwise the data will only be downloaded if the
corresponding files in directory \code{raw_cbs_dir} are missing or not
complete (missing dimension keys). The default is \code{FALSE}.
Note that data may also be downloaded when new keys are selected
in the timeseries coding.}
\item{raw_cbs_dir}{directory where the raw downloaded data are stored.}
\item{include_meta}{include meta data (the default is \code{TRUE})}
\item{min_year}{the minimum year of the returned timeseries. Data
for years before \code{min_year} are disregarded. Specify \code{NULL}
or \code{NA} to not impose a minimum year}
\item{frequencies}{a character string specifying the frequencies of the
returned timeseries. Specify \code{"Y"}, \code{"H"}, \code{"Q"} or \code{"M"} for annual,
semi-annual, quarterly or monthly series, respectively. It is possible to specify a
combination of these characters, e.g. \code{"YQ"} for annual and quarterly series.
Another example: to retrieve annual, quarterly and monthly series simultaneously,
specify \code{"YQM"}. The function returns a list with a component for each
specified frequency.}
\item{download}{This argument overrules argument \code{refresh}. If \code{FALSE},
then data all never downloaded again. You will get an error if the files in
directory \code{raw_cbs_dir} are missing or not
complete (missing dimension keys). If \code{TRUE} then data are always
downloaded.}
\item{base_url}{optionally specify a different server. Useful for third party
data services implementing the same protocol.}
\item{download_all_keys}{This option specifies how to download data. By default,
for each table dimension (excluding the topic) only the selected keys in the
timeseries coding are downloaded. Although this can significantly reduce
downloading time, this approach has the disadvantage that it is necessary to
download the data again when a new dimension key is selected in the
timeseries coding. To prevent that, use argument \code{download_all_keys = TRUE},
then all keys are downloaded for each dimension.}
}
\value{
a list with class \code{table_ts}, with the following components
\item{Y}{Annual timeseries (if present)}
\item{H}{Semi-annual timeseries (if present)}
\item{Q}{Quarterly timeseries (if present)}
\item{M}{Monthly timeseries (if present)}
\item{ts_names}{A data frame with an overview of the timeseries names}
\item{meta}{Meta data, only if argument \code{include_meta} is \code{TRUE}}
}
\description{
Return CBS timeseries
}
|
/cbsots/man/get_ts.Rd
|
no_license
|
timemod/cbsots
|
R
| false
| true
| 3,141
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_ts.R
\name{get_ts}
\alias{get_ts}
\title{Return CBS timeseries}
\usage{
get_ts(id, ts_code, refresh = FALSE, raw_cbs_dir = "raw_cbs_data",
include_meta = TRUE, min_year = NULL, frequencies = NULL, download,
base_url = NULL, download_all_keys = FALSE)
}
\arguments{
\item{id}{table id}
\item{ts_code}{a \code{ts_code} object. This object can be created
and modified with function \code{\link{edit_ts_code}}, which starts a Shiny
app.}
\item{refresh}{should the data in directory \code{raw_cbs_dir} be refreshed?
If \code{TRUE}, the data are always downloaded from the
CBS website. Otherwise the data will only be downloaded if the
corresponding files in directory \code{raw_cbs_dir} are missing or not
complete (missing dimension keys). The default is \code{FALSE}.
Note that data may also be downloaded when new keys are selected
in the timeseries coding.}
\item{raw_cbs_dir}{directory where the raw downloaded data are stored.}
\item{include_meta}{include meta data (the default is \code{TRUE})}
\item{min_year}{the minimum year of the returned timeseries. Data
for years before \code{min_year} are disregarded. Specify \code{NULL}
or \code{NA} to not impose a minimum year}
\item{frequencies}{a character string specifying the frequencies of the
returned timeseries. Specify \code{"Y"}, \code{"H"}, \code{"Q"} or \code{"M"} for annual,
semi-annual, quarterly or monthly series, respectively. It is possible to specify a
combination of these characters, e.g. \code{"YQ"} for annual and quarterly series.
Another example: to retrieve annual, quarterly and monthly series simultaneously,
specify \code{"YQM"}. The function returns a list with a component for each
specified frequency.}
\item{download}{This argument overrules argument \code{refresh}. If \code{FALSE},
then data all never downloaded again. You will get an error if the files in
directory \code{raw_cbs_dir} are missing or not
complete (missing dimension keys). If \code{TRUE} then data are always
downloaded.}
\item{base_url}{optionally specify a different server. Useful for third party
data services implementing the same protocol.}
\item{download_all_keys}{This option specifies how to download data. By default,
for each table dimension (excluding the topic) only the selected keys in the
timeseries coding are downloaded. Although this can significantly reduce
downloading time, this approach has the disadvantage that it is necessary to
download the data again when a new dimension key is selected in the
timeseries coding. To prevent that, use argument \code{download_all_keys = TRUE},
then all keys are downloaded for each dimension.}
}
\value{
a list with class \code{table_ts}, with the following components
\item{Y}{Annual timeseries (if present)}
\item{H}{Semi-annual timeseries (if present)}
\item{Q}{Quarterly timeseries (if present)}
\item{M}{Monthly timeseries (if present)}
\item{ts_names}{A data frame with an overview of the timeseries names}
\item{meta}{Meta data, only if argument \code{include_meta} is \code{TRUE}}
}
\description{
Return CBS timeseries
}
|
"permn" <-function(x, fun = NULL, ...) {
if(is.numeric(x) && length(x) == 1 && x > 0 && trunc(x) == x) x <- seq(x)
n <- length(x)
nofun <- is.null(fun)
out <- vector("list", gamma(n + 1))
p <- ip <- seqn <- 1:n
d <- rep(-1, n)
d[1] <- 0
m <- n + 1
p <- c(m, p, m)
i <- 1
use <- - c(1, n + 2)
while(m != 1) {
out[[i]] <- if(nofun) x[p[use]] else fun(x[p[use]], ...)
i <- i + 1
m <- n
chk <- (p[ip + d + 1] > seqn)
m <- max(seqn[!chk])
if(m < n)
d[(m + 1):n] <- - d[(m + 1):n]
index1 <- ip[m] + 1
index2 <- p[index1] <- p[index1 + d[m]]
p[index1 + d[m]] <- m
tmp <- ip[index2]
ip[index2] <- ip[m]
ip[m] <- tmp
}
out
}
#' @title Asian Option Price
#' @description Returns the price of an asian option using a binomial tree approach
#' @param S the initial stock price
#' @param K the strike price
#' @param r the risk free (continuously compounded interest rate)
#' @param delta the annual dividend rate
#' @param sigma the volatility
#' @param t the expiration time (default one year)
#' @param call TRUE if option is a call, FALSE is option is a put
#' @param arithmetic TRUE if arithmetic average is used, FALSE if geometric average is used
#' @param price TRUE if average price is used, FALSE if average strike is used
#' @param h the number of subdivisions between 0 and t (default 10)
#' @details Uses a forward tree to compute u and d. p is the risk-neutral probability
#' @examples asianOption(40, 39, 0.05, 0, 0.3, 3/12, call=FALSE, arithmetic=TRUE, price=TRUE, h=3)
#' @export
asianOption <- function(S, K, r, delta, sigma, t = 1, call=TRUE, arithmetic=TRUE, price=TRUE, h=10) {
u = exp( (r-delta)*h + sigma*sqrt(h))
d = exp( (r-delta)*h - sigma*sqrt(h))
p = (exp((r-delta)*(t/h)) - d)/(u-d)
paths <-numeric(0) # compute the possible (ordered) paths
for(i in 0:h) {
path = c(rep(0,i),rep(1,(h-i)))
paths = c(paths, unique(permn(path)))
}
payoffs <- numeric(0) # determine the payoff for each path
probabilities <- numeric(0) # determine the probability for each path
averages <- numeric(0)
for(path in paths) {
previousPrice = S
probability = 1
avg <- numeric(0)
payoff <- numeric(0)
prices <- numeric(0)
for(num in path) {
if(num==1) { # u
prices = c(prices, previousPrice*u)
previousPrice = previousPrice * u
probability = probability * p
} else { # d
prices = c(prices, previousPrice*d)
previousPrice = previousPrice * d
probability = probability * (1-p)
}
}
if(arithmetic==TRUE) {
avg = mean(prices)
} else { # geometric average
avg = exp(mean(log(prices)))
}
if(price==TRUE) {
if(call==TRUE) {
payoff=max(0, avg-K)
} else { # call == FALSE
payoff = max(0, K-avg)
}
} else { # strike option (K=avg, S = prices[h])
if(call==TRUE) {
payoff=max(0, prices[h]-avg)
} else {
payoff=max(0, avg-prices[h])
}
}
payoffs = c(payoffs, payoff)
probabilities = c(probabilities, probability)
averages = c(averages, avg)
}
payoffs <<- payoffs
averages <<- averages
probabilities <<- probabilities
exp(-r*t)*sum(probabilities*payoffs)
}
|
/m4fe/R/asianOption.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 3,402
|
r
|
"permn" <-function(x, fun = NULL, ...) {
if(is.numeric(x) && length(x) == 1 && x > 0 && trunc(x) == x) x <- seq(x)
n <- length(x)
nofun <- is.null(fun)
out <- vector("list", gamma(n + 1))
p <- ip <- seqn <- 1:n
d <- rep(-1, n)
d[1] <- 0
m <- n + 1
p <- c(m, p, m)
i <- 1
use <- - c(1, n + 2)
while(m != 1) {
out[[i]] <- if(nofun) x[p[use]] else fun(x[p[use]], ...)
i <- i + 1
m <- n
chk <- (p[ip + d + 1] > seqn)
m <- max(seqn[!chk])
if(m < n)
d[(m + 1):n] <- - d[(m + 1):n]
index1 <- ip[m] + 1
index2 <- p[index1] <- p[index1 + d[m]]
p[index1 + d[m]] <- m
tmp <- ip[index2]
ip[index2] <- ip[m]
ip[m] <- tmp
}
out
}
#' @title Asian Option Price
#' @description Returns the price of an asian option using a binomial tree approach
#' @param S the initial stock price
#' @param K the strike price
#' @param r the risk free (continuously compounded interest rate)
#' @param delta the annual dividend rate
#' @param sigma the volatility
#' @param t the expiration time (default one year)
#' @param call TRUE if option is a call, FALSE is option is a put
#' @param arithmetic TRUE if arithmetic average is used, FALSE if geometric average is used
#' @param price TRUE if average price is used, FALSE if average strike is used
#' @param h the number of subdivisions between 0 and t (default 10)
#' @details Uses a forward tree to compute u and d. p is the risk-neutral probability
#' @examples asianOption(40, 39, 0.05, 0, 0.3, 3/12, call=FALSE, arithmetic=TRUE, price=TRUE, h=3)
#' @export
asianOption <- function(S, K, r, delta, sigma, t = 1, call=TRUE, arithmetic=TRUE, price=TRUE, h=10) {
u = exp( (r-delta)*h + sigma*sqrt(h))
d = exp( (r-delta)*h - sigma*sqrt(h))
p = (exp((r-delta)*(t/h)) - d)/(u-d)
paths <-numeric(0) # compute the possible (ordered) paths
for(i in 0:h) {
path = c(rep(0,i),rep(1,(h-i)))
paths = c(paths, unique(permn(path)))
}
payoffs <- numeric(0) # determine the payoff for each path
probabilities <- numeric(0) # determine the probability for each path
averages <- numeric(0)
for(path in paths) {
previousPrice = S
probability = 1
avg <- numeric(0)
payoff <- numeric(0)
prices <- numeric(0)
for(num in path) {
if(num==1) { # u
prices = c(prices, previousPrice*u)
previousPrice = previousPrice * u
probability = probability * p
} else { # d
prices = c(prices, previousPrice*d)
previousPrice = previousPrice * d
probability = probability * (1-p)
}
}
if(arithmetic==TRUE) {
avg = mean(prices)
} else { # geometric average
avg = exp(mean(log(prices)))
}
if(price==TRUE) {
if(call==TRUE) {
payoff=max(0, avg-K)
} else { # call == FALSE
payoff = max(0, K-avg)
}
} else { # strike option (K=avg, S = prices[h])
if(call==TRUE) {
payoff=max(0, prices[h]-avg)
} else {
payoff=max(0, avg-prices[h])
}
}
payoffs = c(payoffs, payoff)
probabilities = c(probabilities, probability)
averages = c(averages, avg)
}
payoffs <<- payoffs
averages <<- averages
probabilities <<- probabilities
exp(-r*t)*sum(probabilities*payoffs)
}
|
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test VersioningVersionEnvironmentBlob")
model.instance <- VersioningVersionEnvironmentBlob$new()
test_that("major", {
# tests for the property `major` (integer)
# uncomment below to test the property
#expect_equal(model.instance$`major`, "EXPECTED_RESULT")
})
test_that("minor", {
# tests for the property `minor` (integer)
# uncomment below to test the property
#expect_equal(model.instance$`minor`, "EXPECTED_RESULT")
})
test_that("patch", {
# tests for the property `patch` (integer)
# uncomment below to test the property
#expect_equal(model.instance$`patch`, "EXPECTED_RESULT")
})
test_that("suffix", {
# tests for the property `suffix` (character)
# uncomment below to test the property
#expect_equal(model.instance$`suffix`, "EXPECTED_RESULT")
})
|
/tests/testthat/test_versioning_version_environment_blob.R
|
no_license
|
botchkoAI/VertaRegistryService
|
R
| false
| false
| 921
|
r
|
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test VersioningVersionEnvironmentBlob")
model.instance <- VersioningVersionEnvironmentBlob$new()
test_that("major", {
# tests for the property `major` (integer)
# uncomment below to test the property
#expect_equal(model.instance$`major`, "EXPECTED_RESULT")
})
test_that("minor", {
# tests for the property `minor` (integer)
# uncomment below to test the property
#expect_equal(model.instance$`minor`, "EXPECTED_RESULT")
})
test_that("patch", {
# tests for the property `patch` (integer)
# uncomment below to test the property
#expect_equal(model.instance$`patch`, "EXPECTED_RESULT")
})
test_that("suffix", {
# tests for the property `suffix` (character)
# uncomment below to test the property
#expect_equal(model.instance$`suffix`, "EXPECTED_RESULT")
})
|
# clean_eurostat_cache()
# install.packages("package:ggplot2")
# install.packages("ecb")
# install.packages("eurostat")
# install.packages("mFilter")
# install.packages("tseries")
# install.packages("forecast")
# install.packages("tidyverse")
# install.packages("TSstudio")
# installed.packages("urca")
# installed.packages("vars")
#load packages
# library(ecb)
# library(eurostat)
# library(urca)
library(vars)
# library(mFilter)
# library(tseries)
# library(TSstudio)
# library(forecast)
# library(tidyverse)
matrix <- readRDS("data/data.RDS")
matrix <- na.omit(matrix)
#Select AIC-suggested lag#
lagselect <-VARselect(matrix,lag.max=12,type="both")
lagselect$selection
p_retenu = 2
model<-VAR(matrix, p=p_retenu,type = "const")
###Forecast Error Impulse Response###
forimp <- irf(model, impulse = "EURIBOR_3M",
response = c("unemployment","dlGDP","inflation","underinf"),
n.ahead = 8, ortho = FALSE, runs = 1000)
plot(forimp,plot.type="multiple",
mar.multi = c(.5, 4, .5, 4))
#response of Unemployment to EURIBOR#
forimp1 <- irf(model, impulse = "EURIBOR_3M", response = "unemployment",
n.ahead = 8, ortho = FALSE, runs = 1000)
#response of dlGDP to EURIBOR#
forimp2 <- irf(model, impulse = "EURIBOR_3M", response = "dlGDP",
n.ahead = 8, ortho = FALSE, runs = 1000)
#response of inflation to EURIBOR#
forimp3 <- irf(model, impulse = "EURIBOR_3M", response = "inflation",
n.ahead = 8, ortho = FALSE, runs = 1000)
#response of underlying inflation to EURIBOR#
forimp4 <- irf(model, impulse = "EURIBOR_3M", response = "underinf",
n.ahead = 8, ortho = FALSE, runs = 1000)
#draw plots
par(mfrow=c(2,2))
plot(forimp1)
plot(forimp2)
plot(forimp3)
plot(forimp4)
###Orthogonal Impulse Response###
oir <- irf(model, impulse = "EURIBOR_3M",
response = c("unemployment","dlGDP","inflation","underinf"),
n.ahead = 8, ortho = TRUE, runs = 1000)
plot(oir,plot.type="multiple",
mar.multi = c(.5, 4, .5, 4))
#response of Unemployment to EURIBOR#
oir1 <- irf(model, impulse = "EURIBOR_3M", response = "unemployment",
n.ahead = 8, ortho = TRUE, runs = 1000)
#response of dlGDP to EURIBOR#
oir2 <- irf(model, impulse = "EURIBOR_3M", response = "dlGDP",
n.ahead = 8, ortho = TRUE, runs = 1000)
#response of inflation to EURIBOR#
oir3 <- irf(model, impulse = "EURIBOR_3M", response = "inflation",
n.ahead = 8, ortho = TRUE, runs = 1000)
#response of underlying inflation to EURIBOR#
oir4 <- irf(model, impulse = "EURIBOR_3M", response = "underinf",
n.ahead = 8, ortho = TRUE, runs = 1000)
#draw plots
plot(oir,plot.type="single")
par(mfrow=c(2,2))
plot(oir1,plot.type = "single")
plot(oir2,plot.type = "single")
plot(oir3,plot.type = "single")
plot(oir4,plot.type = "single")
??vars:::plot.varirf
|
/R/2-estimation_modeles.R
|
no_license
|
GautierLENFANT/AppliedMacroEuribor
|
R
| false
| false
| 2,868
|
r
|
# clean_eurostat_cache()
# install.packages("package:ggplot2")
# install.packages("ecb")
# install.packages("eurostat")
# install.packages("mFilter")
# install.packages("tseries")
# install.packages("forecast")
# install.packages("tidyverse")
# install.packages("TSstudio")
# installed.packages("urca")
# installed.packages("vars")
#load packages
# library(ecb)
# library(eurostat)
# library(urca)
library(vars)
# library(mFilter)
# library(tseries)
# library(TSstudio)
# library(forecast)
# library(tidyverse)
matrix <- readRDS("data/data.RDS")
matrix <- na.omit(matrix)
#Select AIC-suggested lag#
lagselect <-VARselect(matrix,lag.max=12,type="both")
lagselect$selection
p_retenu = 2
model<-VAR(matrix, p=p_retenu,type = "const")
###Forecast Error Impulse Response###
forimp <- irf(model, impulse = "EURIBOR_3M",
response = c("unemployment","dlGDP","inflation","underinf"),
n.ahead = 8, ortho = FALSE, runs = 1000)
plot(forimp,plot.type="multiple",
mar.multi = c(.5, 4, .5, 4))
#response of Unemployment to EURIBOR#
forimp1 <- irf(model, impulse = "EURIBOR_3M", response = "unemployment",
n.ahead = 8, ortho = FALSE, runs = 1000)
#response of dlGDP to EURIBOR#
forimp2 <- irf(model, impulse = "EURIBOR_3M", response = "dlGDP",
n.ahead = 8, ortho = FALSE, runs = 1000)
#response of inflation to EURIBOR#
forimp3 <- irf(model, impulse = "EURIBOR_3M", response = "inflation",
n.ahead = 8, ortho = FALSE, runs = 1000)
#response of underlying inflation to EURIBOR#
forimp4 <- irf(model, impulse = "EURIBOR_3M", response = "underinf",
n.ahead = 8, ortho = FALSE, runs = 1000)
#draw plots
par(mfrow=c(2,2))
plot(forimp1)
plot(forimp2)
plot(forimp3)
plot(forimp4)
###Orthogonal Impulse Response###
oir <- irf(model, impulse = "EURIBOR_3M",
response = c("unemployment","dlGDP","inflation","underinf"),
n.ahead = 8, ortho = TRUE, runs = 1000)
plot(oir,plot.type="multiple",
mar.multi = c(.5, 4, .5, 4))
#response of Unemployment to EURIBOR#
oir1 <- irf(model, impulse = "EURIBOR_3M", response = "unemployment",
n.ahead = 8, ortho = TRUE, runs = 1000)
#response of dlGDP to EURIBOR#
oir2 <- irf(model, impulse = "EURIBOR_3M", response = "dlGDP",
n.ahead = 8, ortho = TRUE, runs = 1000)
#response of inflation to EURIBOR#
oir3 <- irf(model, impulse = "EURIBOR_3M", response = "inflation",
n.ahead = 8, ortho = TRUE, runs = 1000)
#response of underlying inflation to EURIBOR#
oir4 <- irf(model, impulse = "EURIBOR_3M", response = "underinf",
n.ahead = 8, ortho = TRUE, runs = 1000)
#draw plots
plot(oir,plot.type="single")
par(mfrow=c(2,2))
plot(oir1,plot.type = "single")
plot(oir2,plot.type = "single")
plot(oir3,plot.type = "single")
plot(oir4,plot.type = "single")
??vars:::plot.varirf
|
### ISLR: Chapter 5 Cross-Validation and Bootstrapping
## Validation Set Approach
# We will split randomly a dataset in a training and test set
library(ISLR)
set.seed(1)
train <- sample(392,196)
# Perform a linear regression on the Auto dataset
regr <- lm(mpg~horsepower, data = Auto, subset = train)
# Now look at the results on the test set
mean((Auto$mpg-predict(regr, Auto))[-train]^2)
# Try with different degrees polynomial regressions
regr2 <- lm(mpg~poly(horsepower,2), data = Auto, subset = train)
mean((Auto$mpg-predict(regr2, Auto))[-train]^2)
regr3 <- lm(mpg~poly(horsepower,3), data = Auto, subset = train)
mean((Auto$mpg-predict(regr3, Auto))[-train]^2)
# See what happens with a different random split
set.seed(2)
train <- sample(392,196)
regr <- lm(mpg~horsepower, data = Auto, subset = train)
mean((Auto$mpg-predict(regr, Auto))[-train]^2)
regr2 <- lm(mpg~poly(horsepower,2), data = Auto, subset = train)
mean((Auto$mpg-predict(regr2, Auto))[-train]^2)
regr3 <- lm(mpg~poly(horsepower,3), data = Auto, subset = train)
mean((Auto$mpg-predict(regr3, Auto))[-train]^2)
## Leave-One-Out CV (LOOCV)
# We will use the cv.glm() function to perform LOOCV in the boot package
library(boot)
regr <- glm(mpg~horsepower, data = Auto)
cv <- cv.glm(Auto, regr)
cv$delta
# I can repeat the process to test for increasing degrees of polynomials for example
cv <- rep(0,5)
for(i in 1:10){
regr <- glm(mpg~poly(horsepower,i), data = Auto)
cv[i] <- cv.glm(Auto, regr)$delta[1]
}
cv <- data.frame(Error = cv, Degree = seq(1,10,by = 1))
library(ggplot2)
ggplot(cv, aes(Degree, Error))+
geom_line(color = "red", size = 1)+
geom_point(aes(Degree, Error), color = "red")+
theme_bw()+
scale_x_continuous(breaks = seq(1,10,1))
## K-fold CV
# To perform K-fold we use a similar approach as before
set.seed(17)
cv <- rep(0,10)
for(i in 1:10){
regr <- glm(mpg~poly(horsepower,i), data = Auto)
cv[i] <- cv.glm(Auto, regr, K = 10)$delta[1]
}
cv
## Bootstrap
# We want to minimize the risk of an investment portfolio made of 2 items. We will use bootstrapping
A <- function(data, index){
X = data$X[index]
Y = data$Y[index]
return((var(Y)-cov(X,Y))/(var(X)+var(Y)-2*cov(X,Y)))
}
# Estimation without bootstrapping
A(Portfolio, 1:100)
# Let's say we want to be more confident on the result. We will use bootstrapping. First let's do just one sampling: bootstrapping does the same thing repeated n times
set.seed(1)
A(Portfolio, sample(100, 100, replace = TRUE))
# Now the real bootstrapping with 1000 reps
boot(Portfolio, A, R = 1000)
# If we want to do bootstrapping on the parameters of a lm we can do like this
fnc <- function(data, index)return(coef(lm(mpg~horsepower, data = data, subset = index)))
fnc(Auto, 1:392)
boot(Auto, fnc, R = 1000)
|
/Chapter 5 - CV and Booststrapping.R
|
no_license
|
alanmarazzi/ISLR
|
R
| false
| false
| 2,789
|
r
|
### ISLR: Chapter 5 Cross-Validation and Bootstrapping
## Validation Set Approach
# We will split randomly a dataset in a training and test set
library(ISLR)
set.seed(1)
train <- sample(392,196)
# Perform a linear regression on the Auto dataset
regr <- lm(mpg~horsepower, data = Auto, subset = train)
# Now look at the results on the test set
mean((Auto$mpg-predict(regr, Auto))[-train]^2)
# Try with different degrees polynomial regressions
regr2 <- lm(mpg~poly(horsepower,2), data = Auto, subset = train)
mean((Auto$mpg-predict(regr2, Auto))[-train]^2)
regr3 <- lm(mpg~poly(horsepower,3), data = Auto, subset = train)
mean((Auto$mpg-predict(regr3, Auto))[-train]^2)
# See what happens with a different random split
set.seed(2)
train <- sample(392,196)
regr <- lm(mpg~horsepower, data = Auto, subset = train)
mean((Auto$mpg-predict(regr, Auto))[-train]^2)
regr2 <- lm(mpg~poly(horsepower,2), data = Auto, subset = train)
mean((Auto$mpg-predict(regr2, Auto))[-train]^2)
regr3 <- lm(mpg~poly(horsepower,3), data = Auto, subset = train)
mean((Auto$mpg-predict(regr3, Auto))[-train]^2)
## Leave-One-Out CV (LOOCV)
# We will use the cv.glm() function to perform LOOCV in the boot package
library(boot)
regr <- glm(mpg~horsepower, data = Auto)
cv <- cv.glm(Auto, regr)
cv$delta
# I can repeat the process to test for increasing degrees of polynomials for example
cv <- rep(0,5)
for(i in 1:10){
regr <- glm(mpg~poly(horsepower,i), data = Auto)
cv[i] <- cv.glm(Auto, regr)$delta[1]
}
cv <- data.frame(Error = cv, Degree = seq(1,10,by = 1))
library(ggplot2)
ggplot(cv, aes(Degree, Error))+
geom_line(color = "red", size = 1)+
geom_point(aes(Degree, Error), color = "red")+
theme_bw()+
scale_x_continuous(breaks = seq(1,10,1))
## K-fold CV
# To perform K-fold we use a similar approach as before
set.seed(17)
cv <- rep(0,10)
for(i in 1:10){
regr <- glm(mpg~poly(horsepower,i), data = Auto)
cv[i] <- cv.glm(Auto, regr, K = 10)$delta[1]
}
cv
## Bootstrap
# We want to minimize the risk of an investment portfolio made of 2 items. We will use bootstrapping
A <- function(data, index){
X = data$X[index]
Y = data$Y[index]
return((var(Y)-cov(X,Y))/(var(X)+var(Y)-2*cov(X,Y)))
}
# Estimation without bootstrapping
A(Portfolio, 1:100)
# Let's say we want to be more confident on the result. We will use bootstrapping. First let's do just one sampling: bootstrapping does the same thing repeated n times
set.seed(1)
A(Portfolio, sample(100, 100, replace = TRUE))
# Now the real bootstrapping with 1000 reps
boot(Portfolio, A, R = 1000)
# If we want to do bootstrapping on the parameters of a lm we can do like this
fnc <- function(data, index)return(coef(lm(mpg~horsepower, data = data, subset = index)))
fnc(Auto, 1:392)
boot(Auto, fnc, R = 1000)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fcn_misc.R
\name{LCS}
\alias{LCS}
\title{Compute longest common substring of two strings.}
\usage{
LCS(s1, s2)
}
\arguments{
\item{s1}{String one}
\item{s2}{String two}
}
\value{
String containing the longest common substring
}
\description{
Implementation is very inefficient (dynamic programming in R)
--> use only on small instances
}
|
/man/LCS.Rd
|
no_license
|
Maddocent/PTXQC
|
R
| false
| true
| 418
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fcn_misc.R
\name{LCS}
\alias{LCS}
\title{Compute longest common substring of two strings.}
\usage{
LCS(s1, s2)
}
\arguments{
\item{s1}{String one}
\item{s2}{String two}
}
\value{
String containing the longest common substring
}
\description{
Implementation is very inefficient (dynamic programming in R)
--> use only on small instances
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gh_repo_search.R
\name{gh_repo_search}
\alias{gh_repo_search}
\title{Get files in a repo}
\usage{
gh_repo_search(
code = "RocheData",
organisation = "PHC",
full_name = NULL,
custom = "in:file",
...
)
}
\arguments{
\item{code}{Output from \code{gh_commits_get()}}
\item{organisation}{Shortcut to add a filter onto a specific organisation}
\item{full_name}{Shortcut to add a filter on to a specific repo}
\item{custom}{Add your own query}
\item{...}{Pass down options to \code{gh::gh()}}
}
\description{
Get files in a repo
}
\details{
\strong{New Columns}
\describe{
\item{full_name}{org/repo}
\item{name}{repo name}
\item{file_name}{File name}
\item{path}{Path within repo to file including filename}
\item{url}{URL to the file and commit on github}
\item{score}{i didn't actually look this up... maybe matching score}
\item{lang}{Language guessed via \code{GithubMetrics:::gh_filetype()}}
}
}
|
/man/gh_repo_search.Rd
|
permissive
|
epijim/GithubMetrics
|
R
| false
| true
| 995
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gh_repo_search.R
\name{gh_repo_search}
\alias{gh_repo_search}
\title{Get files in a repo}
\usage{
gh_repo_search(
code = "RocheData",
organisation = "PHC",
full_name = NULL,
custom = "in:file",
...
)
}
\arguments{
\item{code}{Output from \code{gh_commits_get()}}
\item{organisation}{Shortcut to add a filter onto a specific organisation}
\item{full_name}{Shortcut to add a filter on to a specific repo}
\item{custom}{Add your own query}
\item{...}{Pass down options to \code{gh::gh()}}
}
\description{
Get files in a repo
}
\details{
\strong{New Columns}
\describe{
\item{full_name}{org/repo}
\item{name}{repo name}
\item{file_name}{File name}
\item{path}{Path within repo to file including filename}
\item{url}{URL to the file and commit on github}
\item{score}{i didn't actually look this up... maybe matching score}
\item{lang}{Language guessed via \code{GithubMetrics:::gh_filetype()}}
}
}
|
library(shiny)
library(ggplot2)
source("HDXdata.combo.R")
source("HDXpepmap.combo.R")
source("subdata.R")
source("WHICH.R")
source("Format.R")
source("hdx.curve.R")
source("Byonic.HDX.format.R")
shinyServer(function(input, output) {
origin<-reactive({
validate(
need(input$fileinput$datapath != "", "")
)
read.csv(input$fileinput[['datapath']], skip=1) })
output$bdppt<-renderUI({
all.ppt<-unique(paste(origin()$Start,"-",origin()$End,", +",origin()$Charge,sep=""))
selectInput("bdppt",label=div(h4("Select peptide(s) you'd like to discard"),style="font-family:'marker felt';color:purple"), choices = all.ppt, multiple = TRUE)
})
dataIn<-eventReactive(input$act,{HDXdata.combo(origin(),bad.peptides=input$bdppt,time.points = input$timepoints, rep=input$replicates)})
dataF<-reactive({HDXpepmap.combo(dataIn(), time.points = input$timepoints)})
height<-reactive({180*input$nrow})
TM<-reactive({(gsub(" ","",input$timepoints) %>% strsplit(",", fixed=TRUE))[[1]] })
output$fulldt<-downloadHandler(filename=function(){paste("formatted_",input$fileinput$name,sep="")},
content=function(file){write.csv(Format(dataIn(),length(TM())), file)})
WH<-reactive({WHICH(input$selplot)})
sub<-reactive({
validate(
need(input$selplot !="", "Reminder: Please specify WHICH PEPTIDE(S) TO PLOT and make sure you have ENOUGH panels, turn up Slider Bars of Row/Column number if necessary.")
)
subdata(origin(), input$bdppt, input$replicates, WH(),input$timepoints)})
sub1<-sub
viewDT<-eventReactive(input$vw,{sub1()})
output$tbvw<-DT::renderDataTable({
return(viewDT())}, options=list(lengthMenu=list(c(10,25,50,-1),c("10","25","50","ALL")), pageLength=10))
## end of prep.
output$pepmap<-DT::renderDataTable({
return(dataF())}, options=list(lengthMenu=list(c(10,25,50,-1), c("10","25","50","ALL")), pageLength=25))
## the theme(aspect.ratio=1) set the y/x of each panel to be 1, so comes out a square-looking plot for each kinetic curve panel.
observeEvent(input$refresh,{
output$hdxcurves<-renderPlot({
isolate({ hdx.curve(sub(), input$ptsize, input$transparent, input$S1, input$S2, input$apo.col, input$holo.col, input$nrow, input$ncol)
})
}, height = height)
})
output$spec.output<-downloadHandler(filename = function(){paste("HDX-",Sys.Date(),".png", sep="")}, content = function(file){
device<-function(...,width=width, height=height) grDevices::png(..., width = width, height = height, res=300, units = "in")
ggsave(file, plot = isolate(hdx.curve(sub(), input$ptsize, input$transparent, input$S1, input$S2, input$apo.col, input$holo.col, input$nrow, input$ncol)
), device = "png", height = 2*input$nrow, width = 2.2*input$ncol,dpi = 400) }, contentType = "image/png")
cvtd<-reactive({Byonic.HDX.format(input$from.byonic[['datapath']])})
output$output.byonic<-downloadHandler(filename= function(){paste("HDX-Byonic-", Sys.Date(),".csv", sep="")}, content = function(file){
write.csv(cvtd(), file, row.names = FALSE)}
)
})
|
/server.R
|
permissive
|
benniu720/HDExaminer-Assistant
|
R
| false
| false
| 3,058
|
r
|
library(shiny)
library(ggplot2)
source("HDXdata.combo.R")
source("HDXpepmap.combo.R")
source("subdata.R")
source("WHICH.R")
source("Format.R")
source("hdx.curve.R")
source("Byonic.HDX.format.R")
shinyServer(function(input, output) {
origin<-reactive({
validate(
need(input$fileinput$datapath != "", "")
)
read.csv(input$fileinput[['datapath']], skip=1) })
output$bdppt<-renderUI({
all.ppt<-unique(paste(origin()$Start,"-",origin()$End,", +",origin()$Charge,sep=""))
selectInput("bdppt",label=div(h4("Select peptide(s) you'd like to discard"),style="font-family:'marker felt';color:purple"), choices = all.ppt, multiple = TRUE)
})
dataIn<-eventReactive(input$act,{HDXdata.combo(origin(),bad.peptides=input$bdppt,time.points = input$timepoints, rep=input$replicates)})
dataF<-reactive({HDXpepmap.combo(dataIn(), time.points = input$timepoints)})
height<-reactive({180*input$nrow})
TM<-reactive({(gsub(" ","",input$timepoints) %>% strsplit(",", fixed=TRUE))[[1]] })
output$fulldt<-downloadHandler(filename=function(){paste("formatted_",input$fileinput$name,sep="")},
content=function(file){write.csv(Format(dataIn(),length(TM())), file)})
WH<-reactive({WHICH(input$selplot)})
sub<-reactive({
validate(
need(input$selplot !="", "Reminder: Please specify WHICH PEPTIDE(S) TO PLOT and make sure you have ENOUGH panels, turn up Slider Bars of Row/Column number if necessary.")
)
subdata(origin(), input$bdppt, input$replicates, WH(),input$timepoints)})
sub1<-sub
viewDT<-eventReactive(input$vw,{sub1()})
output$tbvw<-DT::renderDataTable({
return(viewDT())}, options=list(lengthMenu=list(c(10,25,50,-1),c("10","25","50","ALL")), pageLength=10))
## end of prep.
output$pepmap<-DT::renderDataTable({
return(dataF())}, options=list(lengthMenu=list(c(10,25,50,-1), c("10","25","50","ALL")), pageLength=25))
## the theme(aspect.ratio=1) set the y/x of each panel to be 1, so comes out a square-looking plot for each kinetic curve panel.
observeEvent(input$refresh,{
output$hdxcurves<-renderPlot({
isolate({ hdx.curve(sub(), input$ptsize, input$transparent, input$S1, input$S2, input$apo.col, input$holo.col, input$nrow, input$ncol)
})
}, height = height)
})
output$spec.output<-downloadHandler(filename = function(){paste("HDX-",Sys.Date(),".png", sep="")}, content = function(file){
device<-function(...,width=width, height=height) grDevices::png(..., width = width, height = height, res=300, units = "in")
ggsave(file, plot = isolate(hdx.curve(sub(), input$ptsize, input$transparent, input$S1, input$S2, input$apo.col, input$holo.col, input$nrow, input$ncol)
), device = "png", height = 2*input$nrow, width = 2.2*input$ncol,dpi = 400) }, contentType = "image/png")
cvtd<-reactive({Byonic.HDX.format(input$from.byonic[['datapath']])})
output$output.byonic<-downloadHandler(filename= function(){paste("HDX-Byonic-", Sys.Date(),".csv", sep="")}, content = function(file){
write.csv(cvtd(), file, row.names = FALSE)}
)
})
|
plot_fig_5b <- function(forecast, start.date = as.Date("2020-05-15"),
end.date = end.date <- as.Date("2020-07-15"))
{
data <- vroom(paste0(data_repo, today, "/1wk/", forecast,
"_figure_5_inc_data.csv")
) %>%
mutate(Dates = as.Date(Dates)) %>%
filter(Dates >= start.date & Dates <= end.date & variable != "mod_3") %>%
mutate(date.fmt = paste0(format(Dates, "%b %d")),
val.fmt = format(round(value), big.mark = ",", scientific = FALSE,
trim = T)
) %>%
mutate(
text = paste0(paste0(date.fmt, ": ", val.fmt,
" projected cases per day")
)
) %>%
group_by(color) %>%
mutate(value = predict(loess(value ~ as.numeric(Dates), span = .2)))
cap <- paste0("© COV-IND-19 Study Group. Last updated: ",
format(today, format = "%b %d"), sep = ' ')
axis.title.font <- list(size = 16)
tickfont <- list(size = 16)
xaxis <- list(title = "", titlefont = axis.title.font, showticklabels = TRUE,
tickangle = -30, zeroline = F)
yaxis <- list(title = "Number of new infected cases per 100,000 per day",
titlefont = axis.title.font, zeroline = T)
anno.data <- filter(data, as.character(Dates) %in% c("2020-05-15", "2020-06-15",
"2020-07-15", "2020-08-15")
) %>%
group_by(Dates) %>% summarise(diff = (max(value) - min(value)),
value = max(value) * 1e5 / 1.34e9
) %>%
mutate(y = ifelse(1 + value > 1.2 * value, 1.2 * value, 1 + value))
line <- list(
type = "line",
xref = "x",
yref = "y",
y0 = 0,
layer = "below",
line = list(color = "#eee", width = 3)
)
lines <- list()
for (i in seq(nrow(anno.data))) {
line$x0 <- anno.data$Dates[i]
line$x1 <- anno.data$Dates[i]
line$y1 <- anno.data$y[i] - 0.1
lines[[i]] <- line
}
colors <- c("#173F5F", "#0472CF", "#3CAEA3", "#f2c82e")
p <- plot_ly(data, x = ~Dates, y = ~ value * 1e5 / 1.34e9, text = ~text,
color = ~ color, colors = colors, type = "scatter",
mode = "lines", hoverinfo = "text", line = list(width = 4),
hoverlabel = list(align = "left")
) %>%
layout(xaxis = xaxis, yaxis = yaxis,
title = list(text = cap, xanchor = "left", x = 0),
legend = list(orientation = "h", font = list(size = 16))
# shapes = lines
) %>%
# add_annotations(
# x = anno.data$Dates,
# y = anno.data$y,
# text = paste0("Difference between social distancing and <br>cautious return on ",
# format(anno.data$Dates, "%B %e"), ": ",
# format(anno.data$diff, big.mark = ",", trim = T, sci = F),
# " cases<br>"
# ),
# align = "left",
# font = list(size = 16),
# xref = "x",
# yref = "y",
# showarrow = F
# ) %>%
plotly::config(toImageButtonOptions = list(width = NULL, height = NULL))
# vroom_write(data, path = paste0(data_repo, today, "/plot5b.csv"),
# delim = ","
# )
p
}
|
/model/r_scripts/plots/plot_fig_5b.R
|
no_license
|
kravi2018/cov-ind-19
|
R
| false
| false
| 3,226
|
r
|
plot_fig_5b <- function(forecast, start.date = as.Date("2020-05-15"),
end.date = end.date <- as.Date("2020-07-15"))
{
data <- vroom(paste0(data_repo, today, "/1wk/", forecast,
"_figure_5_inc_data.csv")
) %>%
mutate(Dates = as.Date(Dates)) %>%
filter(Dates >= start.date & Dates <= end.date & variable != "mod_3") %>%
mutate(date.fmt = paste0(format(Dates, "%b %d")),
val.fmt = format(round(value), big.mark = ",", scientific = FALSE,
trim = T)
) %>%
mutate(
text = paste0(paste0(date.fmt, ": ", val.fmt,
" projected cases per day")
)
) %>%
group_by(color) %>%
mutate(value = predict(loess(value ~ as.numeric(Dates), span = .2)))
cap <- paste0("© COV-IND-19 Study Group. Last updated: ",
format(today, format = "%b %d"), sep = ' ')
axis.title.font <- list(size = 16)
tickfont <- list(size = 16)
xaxis <- list(title = "", titlefont = axis.title.font, showticklabels = TRUE,
tickangle = -30, zeroline = F)
yaxis <- list(title = "Number of new infected cases per 100,000 per day",
titlefont = axis.title.font, zeroline = T)
anno.data <- filter(data, as.character(Dates) %in% c("2020-05-15", "2020-06-15",
"2020-07-15", "2020-08-15")
) %>%
group_by(Dates) %>% summarise(diff = (max(value) - min(value)),
value = max(value) * 1e5 / 1.34e9
) %>%
mutate(y = ifelse(1 + value > 1.2 * value, 1.2 * value, 1 + value))
line <- list(
type = "line",
xref = "x",
yref = "y",
y0 = 0,
layer = "below",
line = list(color = "#eee", width = 3)
)
lines <- list()
for (i in seq(nrow(anno.data))) {
line$x0 <- anno.data$Dates[i]
line$x1 <- anno.data$Dates[i]
line$y1 <- anno.data$y[i] - 0.1
lines[[i]] <- line
}
colors <- c("#173F5F", "#0472CF", "#3CAEA3", "#f2c82e")
p <- plot_ly(data, x = ~Dates, y = ~ value * 1e5 / 1.34e9, text = ~text,
color = ~ color, colors = colors, type = "scatter",
mode = "lines", hoverinfo = "text", line = list(width = 4),
hoverlabel = list(align = "left")
) %>%
layout(xaxis = xaxis, yaxis = yaxis,
title = list(text = cap, xanchor = "left", x = 0),
legend = list(orientation = "h", font = list(size = 16))
# shapes = lines
) %>%
# add_annotations(
# x = anno.data$Dates,
# y = anno.data$y,
# text = paste0("Difference between social distancing and <br>cautious return on ",
# format(anno.data$Dates, "%B %e"), ": ",
# format(anno.data$diff, big.mark = ",", trim = T, sci = F),
# " cases<br>"
# ),
# align = "left",
# font = list(size = 16),
# xref = "x",
# yref = "y",
# showarrow = F
# ) %>%
plotly::config(toImageButtonOptions = list(width = NULL, height = NULL))
# vroom_write(data, path = paste0(data_repo, today, "/plot5b.csv"),
# delim = ","
# )
p
}
|
# Compares diagnoses between the discovery and validation cohorts.
library(argparse)
library(data.table)
rm(list = ls())
# Get arguments.
parser <- ArgumentParser()
parser$add_argument('--discovery-input', required = TRUE)
parser$add_argument('--validation-input', required = TRUE)
parser$add_argument('--output', required = TRUE)
parser$add_argument('--seed', type = 'integer', default = 283971)
args <- parser$parse_args()
# Load the data.
message('Loading data')
dt.discovery <- fread(args$discovery_input)
dt.validation <- fread(args$validation_input)
# Calculate statistics.
message('Calculating statistics')
p.discovery <- table(dt.discovery$diagnosis)
p.discovery <- p.discovery / sum(p.discovery)
tab.validation <- table(dt.validation$diagnosis)
chisq.res <- chisq.test(tab.validation, p = p.discovery, simulate.p.value = TRUE, B = 20000)
# Write results.
message('Writing results')
sink(args$output)
cat('# Overall\n\n')
print(chisq.res)
cat('\n\n')
cat('# Std. residuals\n\n')
print(chisq.res$stdres)
sink()
|
/scripts/diagnoses/compare_diagnoses.R
|
permissive
|
morrislab/plos-medicine-joint-patterns
|
R
| false
| false
| 1,055
|
r
|
# Compares diagnoses between the discovery and validation cohorts.
library(argparse)
library(data.table)
rm(list = ls())
# Get arguments.
parser <- ArgumentParser()
parser$add_argument('--discovery-input', required = TRUE)
parser$add_argument('--validation-input', required = TRUE)
parser$add_argument('--output', required = TRUE)
parser$add_argument('--seed', type = 'integer', default = 283971)
args <- parser$parse_args()
# Load the data.
message('Loading data')
dt.discovery <- fread(args$discovery_input)
dt.validation <- fread(args$validation_input)
# Calculate statistics.
message('Calculating statistics')
p.discovery <- table(dt.discovery$diagnosis)
p.discovery <- p.discovery / sum(p.discovery)
tab.validation <- table(dt.validation$diagnosis)
chisq.res <- chisq.test(tab.validation, p = p.discovery, simulate.p.value = TRUE, B = 20000)
# Write results.
message('Writing results')
sink(args$output)
cat('# Overall\n\n')
print(chisq.res)
cat('\n\n')
cat('# Std. residuals\n\n')
print(chisq.res$stdres)
sink()
|
suppressPackageStartupMessages(library(ComplexHeatmap))
suppressPackageStartupMessages(library(circlize))
suppressPackageStartupMessages(library(tidyverse))
suppressPackageStartupMessages(library(cowplot))
suppressPackageStartupMessages(library(RColorBrewer))
suppressPackageStartupMessages(library(data.table))
suppressPackageStartupMessages(library(here))
suppressPackageStartupMessages(library(org.HSapiens.gencodev30.eg.db))
suppressPackageStartupMessages(library(argparse))
##
## This script plots the combined upset plots for pairwise DEG comparisons
##
rm(list = ls())
source("E:/Chris_UM/GitHub/omics_util/02_RNAseq_scripts/s02_DESeq2_functions.R")
source(file = "E:/Chris_UM/GitHub/omics_util/04_GO_enrichment/s01_topGO_functions.R")
###########################################################################
analysisName <- "DEG_pair_comparison"
outDir <- here::here("analysis", "04_DEG_compare")
outPrefix <- paste(outDir, analysisName, sep = "/")
file_RNAseq_info <- here::here("data", "reference_data", "DESeq2_DEG_info.txt")
diffDataPath <- here::here("analysis", "02_DESeq2_diff")
cutoff_fdr <- 0.05
cutoff_lfc <- 0.585
cutoff_up <- cutoff_lfc
cutoff_down <- -1 * cutoff_lfc
col_lfc <- "log2FoldChange"
orgDb <- org.HSapiens.gencodev30.eg.db
keggOrg <- 'hsa'
col_degOrgdbKey <- "ENSEMBL_VERSION"
col_kegg <- "NCBI_ID"
col_gsea <- "NCBI_ID"
col_topGO <- "ENSEMBL"
col_geneName <- "GENE_NAME"
file_topGO <- "E:/Chris_UM/Database/Human/GRCh38p12.gencode30/annotation_resources/geneid2go.HSapiens.GRCh38p12.topGO.map"
file_msigDesc <- "E:/Chris_UM/Database/Human/GRCh38p12.gencode30/annotation_resources/msigDB_geneset_desc.tab"
file_config <- here::here("analysis", "04_DEG_compare", "DEG_pair_compare.conf.tab")
###########################################################################
paircompConf <- suppressMessages(readr::read_tsv(file = file_config))
degResults <- union(paircompConf$deg1, paircompConf$deg2)
rnaseqInfo <- get_diff_info(degInfoFile = file_RNAseq_info, dataPath = diffDataPath) %>%
dplyr::filter(comparison %in% degResults)
rnaseqInfoList <- purrr::transpose(rnaseqInfo) %>%
purrr::set_names(nm = map(., "comparison"))
## function to extract the log2FoldChange, padj and diff coulumns for each DEG result file
get_foldchange <- function(degFile, name, lfcCol = "log2FoldChange", otherCols = NULL){
degs <- suppressMessages(readr::read_tsv(file = degFile)) %>%
dplyr::mutate(
diff = dplyr::case_when(
!!sym(lfcCol) >= cutoff_up & padj <= cutoff_fdr ~ "up",
!!sym(lfcCol) <= cutoff_down & padj <= cutoff_fdr ~ "down",
TRUE ~ "noDEG"
),
contrast = !!name
) %>%
# dplyr::filter(diff != "noDEG") %>%
dplyr::select(geneId, !!lfcCol, padj, diff, contrast, !!!otherCols)
return(degs)
}
i <- 1
degData <- NULL
degLists <- purrr::map(
.x = rnaseqInfoList,
.f = function(x){
dt <- get_foldchange(degFile = x$deg, name = x$comparison,
lfcCol = col_lfc, otherCols = c("ENSEMBL")) %>%
dplyr::filter(diff != "noDEG") %>%
dplyr::mutate(
drug = x$drug,
concentration = x$concentration,
time = x$time
) %>%
tidyr::unite(col = "group", sep = ".", drug, diff, remove = FALSE)
split(x = dt$geneId, f = dt$group)
}
)
cmList <- purrr::transpose(paircompConf) %>%
purrr::set_names(nm = map(., "degPairId")) %>%
purrr::map(
.f = function(x){
cm <- make_comb_mat(c(degLists[[x$deg1]], degLists[[x$deg2]]), mode = "distinct")
}
)
sapply(cmList, comb_size)
cmNormList <- normalize_comb_mat(cmList)
sapply(cmNormList, comb_size)
sapply(cmNormList, set_name)
sapply(cmNormList, set_size)
sapply(cmNormList, comb_name)
sapply(cmNormList, comb_degree)
tmpCm <- cmNormList[[1]]
set_name(tmpCm)
set_size(tmpCm)
comb_name(tmpCm)
comb_size(tmpCm)
comb_degree(tmpCm)
## identify the up-up and down-down combinations and use different color
grpsDD <- grepl(pattern = ".down", set_name(tmpCm))
combDD <- paste(as.numeric(grpsDD), collapse = "")
grpsUU <- grepl(pattern = ".up", set_name(tmpCm))
combUU <- paste(as.numeric(grpsUU), collapse = "")
colorComb <- structure(rep("black", times = length(comb_name(tmpCm))), names = comb_name(tmpCm))
colorComb[c(combDD, combUU)] <- c("blue", "red")
## show the up-up and down-down combination first
combOrder <- forcats::as_factor(comb_name(tmpCm)) %>%
forcats::fct_relevel(combDD, combUU) %>%
order()
ht_list <- NULL
for (i in seq_along(cmNormList)) {
## generate Upset plot
pt <- UpSet(
m = cmNormList[[i]],
pt_size = unit(7, "mm"), lwd = 3,
set_order = set_name(tmpCm),
comb_order = combOrder,
comb_col = colorComb,
row_title = names(cmNormList)[i],
top_annotation = HeatmapAnnotation(
foo = anno_empty(border = FALSE),
"combSize" = anno_text(
x = paste("(", comb_size(cmNormList[[i]]), ")", sep = ""),
just = "center", rot = 0
),
"Intersection\nsize" = anno_barplot(
x = comb_size(cmNormList[[i]]),
border = FALSE,
gp = gpar(fill = colorComb),
height = unit(4, "cm")
),
annotation_name_side = "left",
annotation_name_rot = 0
),
right_annotation = NULL,
# right_annotation = upset_right_annotation(
# m = cmNormList[[i]], bar_width = 0.5
# ),
row_names_max_width = max_text_width(
set_name(cmNormList[[i]]), gp = gpar(fontsize = 12)
),
width = unit(12, "cm"), height = unit(3, "cm")
)
ht_list <- ht_list %v% pt
}
pdf(file = paste(outPrefix, ".combined_upset.pdf", sep = ""), width = 8, height = 14)
# png(filename = paste(outPrefix, ".overlap_upset.png", sep = ""), height = 1500, width = 4000, res = 200)
draw(ht_list)
dev.off()
|
/scripts/06_RNAseq_DEG_pairs_comp.upset.R
|
no_license
|
lakhanp1/39_Ben_RNAseq2_Tet
|
R
| false
| false
| 6,008
|
r
|
suppressPackageStartupMessages(library(ComplexHeatmap))
suppressPackageStartupMessages(library(circlize))
suppressPackageStartupMessages(library(tidyverse))
suppressPackageStartupMessages(library(cowplot))
suppressPackageStartupMessages(library(RColorBrewer))
suppressPackageStartupMessages(library(data.table))
suppressPackageStartupMessages(library(here))
suppressPackageStartupMessages(library(org.HSapiens.gencodev30.eg.db))
suppressPackageStartupMessages(library(argparse))
##
## This script plots the combined upset plots for pairwise DEG comparisons
##
rm(list = ls())
source("E:/Chris_UM/GitHub/omics_util/02_RNAseq_scripts/s02_DESeq2_functions.R")
source(file = "E:/Chris_UM/GitHub/omics_util/04_GO_enrichment/s01_topGO_functions.R")
###########################################################################
analysisName <- "DEG_pair_comparison"
outDir <- here::here("analysis", "04_DEG_compare")
outPrefix <- paste(outDir, analysisName, sep = "/")
file_RNAseq_info <- here::here("data", "reference_data", "DESeq2_DEG_info.txt")
diffDataPath <- here::here("analysis", "02_DESeq2_diff")
cutoff_fdr <- 0.05
cutoff_lfc <- 0.585
cutoff_up <- cutoff_lfc
cutoff_down <- -1 * cutoff_lfc
col_lfc <- "log2FoldChange"
orgDb <- org.HSapiens.gencodev30.eg.db
keggOrg <- 'hsa'
col_degOrgdbKey <- "ENSEMBL_VERSION"
col_kegg <- "NCBI_ID"
col_gsea <- "NCBI_ID"
col_topGO <- "ENSEMBL"
col_geneName <- "GENE_NAME"
file_topGO <- "E:/Chris_UM/Database/Human/GRCh38p12.gencode30/annotation_resources/geneid2go.HSapiens.GRCh38p12.topGO.map"
file_msigDesc <- "E:/Chris_UM/Database/Human/GRCh38p12.gencode30/annotation_resources/msigDB_geneset_desc.tab"
file_config <- here::here("analysis", "04_DEG_compare", "DEG_pair_compare.conf.tab")
###########################################################################
paircompConf <- suppressMessages(readr::read_tsv(file = file_config))
degResults <- union(paircompConf$deg1, paircompConf$deg2)
rnaseqInfo <- get_diff_info(degInfoFile = file_RNAseq_info, dataPath = diffDataPath) %>%
dplyr::filter(comparison %in% degResults)
rnaseqInfoList <- purrr::transpose(rnaseqInfo) %>%
purrr::set_names(nm = map(., "comparison"))
## function to extract the log2FoldChange, padj and diff coulumns for each DEG result file
get_foldchange <- function(degFile, name, lfcCol = "log2FoldChange", otherCols = NULL){
degs <- suppressMessages(readr::read_tsv(file = degFile)) %>%
dplyr::mutate(
diff = dplyr::case_when(
!!sym(lfcCol) >= cutoff_up & padj <= cutoff_fdr ~ "up",
!!sym(lfcCol) <= cutoff_down & padj <= cutoff_fdr ~ "down",
TRUE ~ "noDEG"
),
contrast = !!name
) %>%
# dplyr::filter(diff != "noDEG") %>%
dplyr::select(geneId, !!lfcCol, padj, diff, contrast, !!!otherCols)
return(degs)
}
i <- 1
degData <- NULL
degLists <- purrr::map(
.x = rnaseqInfoList,
.f = function(x){
dt <- get_foldchange(degFile = x$deg, name = x$comparison,
lfcCol = col_lfc, otherCols = c("ENSEMBL")) %>%
dplyr::filter(diff != "noDEG") %>%
dplyr::mutate(
drug = x$drug,
concentration = x$concentration,
time = x$time
) %>%
tidyr::unite(col = "group", sep = ".", drug, diff, remove = FALSE)
split(x = dt$geneId, f = dt$group)
}
)
cmList <- purrr::transpose(paircompConf) %>%
purrr::set_names(nm = map(., "degPairId")) %>%
purrr::map(
.f = function(x){
cm <- make_comb_mat(c(degLists[[x$deg1]], degLists[[x$deg2]]), mode = "distinct")
}
)
sapply(cmList, comb_size)
cmNormList <- normalize_comb_mat(cmList)
sapply(cmNormList, comb_size)
sapply(cmNormList, set_name)
sapply(cmNormList, set_size)
sapply(cmNormList, comb_name)
sapply(cmNormList, comb_degree)
tmpCm <- cmNormList[[1]]
set_name(tmpCm)
set_size(tmpCm)
comb_name(tmpCm)
comb_size(tmpCm)
comb_degree(tmpCm)
## identify the up-up and down-down combinations and use different color
grpsDD <- grepl(pattern = ".down", set_name(tmpCm))
combDD <- paste(as.numeric(grpsDD), collapse = "")
grpsUU <- grepl(pattern = ".up", set_name(tmpCm))
combUU <- paste(as.numeric(grpsUU), collapse = "")
colorComb <- structure(rep("black", times = length(comb_name(tmpCm))), names = comb_name(tmpCm))
colorComb[c(combDD, combUU)] <- c("blue", "red")
## show the up-up and down-down combination first
combOrder <- forcats::as_factor(comb_name(tmpCm)) %>%
forcats::fct_relevel(combDD, combUU) %>%
order()
ht_list <- NULL
for (i in seq_along(cmNormList)) {
## generate Upset plot
pt <- UpSet(
m = cmNormList[[i]],
pt_size = unit(7, "mm"), lwd = 3,
set_order = set_name(tmpCm),
comb_order = combOrder,
comb_col = colorComb,
row_title = names(cmNormList)[i],
top_annotation = HeatmapAnnotation(
foo = anno_empty(border = FALSE),
"combSize" = anno_text(
x = paste("(", comb_size(cmNormList[[i]]), ")", sep = ""),
just = "center", rot = 0
),
"Intersection\nsize" = anno_barplot(
x = comb_size(cmNormList[[i]]),
border = FALSE,
gp = gpar(fill = colorComb),
height = unit(4, "cm")
),
annotation_name_side = "left",
annotation_name_rot = 0
),
right_annotation = NULL,
# right_annotation = upset_right_annotation(
# m = cmNormList[[i]], bar_width = 0.5
# ),
row_names_max_width = max_text_width(
set_name(cmNormList[[i]]), gp = gpar(fontsize = 12)
),
width = unit(12, "cm"), height = unit(3, "cm")
)
ht_list <- ht_list %v% pt
}
pdf(file = paste(outPrefix, ".combined_upset.pdf", sep = ""), width = 8, height = 14)
# png(filename = paste(outPrefix, ".overlap_upset.png", sep = ""), height = 1500, width = 4000, res = 200)
draw(ht_list)
dev.off()
|
#' Color the Leaves of a Dendrogram Based on a Spectra Object
#'
#' *Internal function.* This function colors the leaves of a dendrogram object. The code was taken
#' from the help files.
#'
#' @param n A node in a dendrogram object.
#'
#' @param spectra `r .writeDoc_Spectra1()`
#'
#' @return Returns a node with the label color properties set.
#'
#' @author `r .writeDoc_Authors("BH")`
#'
#' @keywords internal
#'
#' @export
#' @importFrom stats is.leaf
#'
.colLeaf <- function(n, spectra) { # this is called iteratively by dendrapply
# A little trick to color leaves properly, derived from the archives
# Part of the ChemoSpec package
# Bryan Hanson, DePauw University, June 2008
if (is.leaf(n)) {
a <- attributes(n)
i <- match(a$label, spectra$names)
attr(n, "nodePar") <- c(a$nodePar, list(
lab.col = spectra$colors[i],
pch = NA
))
}
n
}
|
/R/colLeaf.R
|
no_license
|
cran/ChemoSpecUtils
|
R
| false
| false
| 887
|
r
|
#' Color the Leaves of a Dendrogram Based on a Spectra Object
#'
#' *Internal function.* This function colors the leaves of a dendrogram object. The code was taken
#' from the help files.
#'
#' @param n A node in a dendrogram object.
#'
#' @param spectra `r .writeDoc_Spectra1()`
#'
#' @return Returns a node with the label color properties set.
#'
#' @author `r .writeDoc_Authors("BH")`
#'
#' @keywords internal
#'
#' @export
#' @importFrom stats is.leaf
#'
.colLeaf <- function(n, spectra) { # this is called iteratively by dendrapply
# A little trick to color leaves properly, derived from the archives
# Part of the ChemoSpec package
# Bryan Hanson, DePauw University, June 2008
if (is.leaf(n)) {
a <- attributes(n)
i <- match(a$label, spectra$names)
attr(n, "nodePar") <- c(a$nodePar, list(
lab.col = spectra$colors[i],
pch = NA
))
}
n
}
|
## ----setup, include = FALSE----------------------------------------------
library(SSP)
library(ggplot2)
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.retina=2,
fig.align='center',
fig.width = 7,
fig.height = 5,
warning = FALSE,
message = FALSE
)
## ----eval=FALSE----------------------------------------------------------
# library(SSP)
# data(micromollusk)
#
# #Estimation of parameters
# par.mic <- assempar(data = micromollusk, type = "P/A")
#
# #Simulation of data
# sim.mic <- simdata(Par = par.mic, cases = 20, N = 100, site = 1)
#
# # Quality of simulated data
# qua.mic <- datquality(data = micromollusk, dat.sim = sim.mic, Par = par.mic, transformation = "none", method = "jaccard")
#
# #Sampling and estimation of MultSE
# samp.mic <- sampsd(sim.mic, par.mic, transformation = "P/A", method = "jaccard", n = 50, m = 1, k = 10)
#
# #Summarizing results
# sum.mic <- summary_ssp(results = samp.mic, multi.site = FALSE)
#
# #Identification of optimal effort
# opt.mic <- ioptimum(xx = sum.mic, multi.site = FALSE)
#
# #plot
# fig.1 <- plot_ssp(xx = sum.mic, opt = opt.mic, multi.site = FALSE)
# fig.1
## ---- echo = FALSE, out.width='100%', fig.align='center', fig.cap='Fig. 1. MultSE and sampling effort relationship using micromollusk simulated data'----
knitr::include_graphics('fig1.png')
## ----eval=FALSE----------------------------------------------------------
# data(sponges)
#
# #Estimation of parameters
# par.spo <- assempar(data = sponges, type = "counts")
#
# #Simulation of data
# sim.spo <- simdata(Par = par.spo, cases = 10, N = 20, sites = 20)
#
# # Quality of simulated data
# qua.spo <- datquality(data = sponges, dat.sim = sim.spo, Par = par.spo, transformation = "square root", method = "bray")
#
# #Sampling and estimation of MultSE
# samp.spo <- sampsd(sim.spo, par.spo, transformation = "square root",
# method = "bray", n = 20, m = 20, k = 10)
#
# #Summarizing results
# sum.spo <- summary_ssp(results = samp.spo, multi.site = TRUE)
#
# #Identification of optimal effort
#
# opt.spo <- ioptimum(xx = sum.spo, multi.site = TRUE)
#
# #plot
# fig.2 <- plot_ssp(xx = sum.spo, opt = opt.spo, multi.site = TRUE)
# fig.2
#
## ---- echo = FALSE, out.width='100%', fig.align='center', fig.cap='Fig. 2. MultSE and sampling effort relationship using sponge simulated data'----
knitr::include_graphics('fig2.png')
## ------------------------------------------------------------------------
dat<-sponges[,2:length(sponges)]
#Square root transformation of abundances
dat.t<-sqrt(dat)
#Bray-Curtys
library(vegan)
bc<-vegdist(dat.t, method = "bray")
#function to estimate components of variation in PERMANOVA
cv.permanova <- function(D, y) {
D = as.matrix(D)
N = dim(D)[1]
g = length(levels(y[,1]))
X = model.matrix(~y[,1]) #model matrix
H = X %*% solve(t(X) %*% X) %*% t(X) #Hat matrix
I = diag(N) #Identity matrix
A = -0.5 * D^2
G = A - apply(A, 1, mean) %o% rep(1, N) - rep(1, N) %o% apply(A, 2, mean) + mean(A)
MS1 = sum(G * t(H))/(g - 1) #Mean square of sites
MS2 = sum(G * t(I - H))/(N - g) #Mean square of residuals
CV1 = (MS1 - MS2)/(N/g)# Components of variation of sites
CV2 = MS2 # Components of variation of samples
CV = c(CV1, CV2)
sqrtCV = sqrt(CV)
return(sqrtCV) #square root of components of variation
}
cv<-cv.permanova(D = bc, y = sponges)
cv
|
/vignettes/SSP-guide.R
|
no_license
|
edlinguerra/SSP
|
R
| false
| false
| 3,449
|
r
|
## ----setup, include = FALSE----------------------------------------------
library(SSP)
library(ggplot2)
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.retina=2,
fig.align='center',
fig.width = 7,
fig.height = 5,
warning = FALSE,
message = FALSE
)
## ----eval=FALSE----------------------------------------------------------
# library(SSP)
# data(micromollusk)
#
# #Estimation of parameters
# par.mic <- assempar(data = micromollusk, type = "P/A")
#
# #Simulation of data
# sim.mic <- simdata(Par = par.mic, cases = 20, N = 100, site = 1)
#
# # Quality of simulated data
# qua.mic <- datquality(data = micromollusk, dat.sim = sim.mic, Par = par.mic, transformation = "none", method = "jaccard")
#
# #Sampling and estimation of MultSE
# samp.mic <- sampsd(sim.mic, par.mic, transformation = "P/A", method = "jaccard", n = 50, m = 1, k = 10)
#
# #Summarizing results
# sum.mic <- summary_ssp(results = samp.mic, multi.site = FALSE)
#
# #Identification of optimal effort
# opt.mic <- ioptimum(xx = sum.mic, multi.site = FALSE)
#
# #plot
# fig.1 <- plot_ssp(xx = sum.mic, opt = opt.mic, multi.site = FALSE)
# fig.1
## ---- echo = FALSE, out.width='100%', fig.align='center', fig.cap='Fig. 1. MultSE and sampling effort relationship using micromollusk simulated data'----
knitr::include_graphics('fig1.png')
## ----eval=FALSE----------------------------------------------------------
# data(sponges)
#
# #Estimation of parameters
# par.spo <- assempar(data = sponges, type = "counts")
#
# #Simulation of data
# sim.spo <- simdata(Par = par.spo, cases = 10, N = 20, sites = 20)
#
# # Quality of simulated data
# qua.spo <- datquality(data = sponges, dat.sim = sim.spo, Par = par.spo, transformation = "square root", method = "bray")
#
# #Sampling and estimation of MultSE
# samp.spo <- sampsd(sim.spo, par.spo, transformation = "square root",
# method = "bray", n = 20, m = 20, k = 10)
#
# #Summarizing results
# sum.spo <- summary_ssp(results = samp.spo, multi.site = TRUE)
#
# #Identification of optimal effort
#
# opt.spo <- ioptimum(xx = sum.spo, multi.site = TRUE)
#
# #plot
# fig.2 <- plot_ssp(xx = sum.spo, opt = opt.spo, multi.site = TRUE)
# fig.2
#
## ---- echo = FALSE, out.width='100%', fig.align='center', fig.cap='Fig. 2. MultSE and sampling effort relationship using sponge simulated data'----
knitr::include_graphics('fig2.png')
## ------------------------------------------------------------------------
dat<-sponges[,2:length(sponges)]
#Square root transformation of abundances
dat.t<-sqrt(dat)
#Bray-Curtys
library(vegan)
bc<-vegdist(dat.t, method = "bray")
#function to estimate components of variation in PERMANOVA
cv.permanova <- function(D, y) {
D = as.matrix(D)
N = dim(D)[1]
g = length(levels(y[,1]))
X = model.matrix(~y[,1]) #model matrix
H = X %*% solve(t(X) %*% X) %*% t(X) #Hat matrix
I = diag(N) #Identity matrix
A = -0.5 * D^2
G = A - apply(A, 1, mean) %o% rep(1, N) - rep(1, N) %o% apply(A, 2, mean) + mean(A)
MS1 = sum(G * t(H))/(g - 1) #Mean square of sites
MS2 = sum(G * t(I - H))/(N - g) #Mean square of residuals
CV1 = (MS1 - MS2)/(N/g)# Components of variation of sites
CV2 = MS2 # Components of variation of samples
CV = c(CV1, CV2)
sqrtCV = sqrt(CV)
return(sqrtCV) #square root of components of variation
}
cv<-cv.permanova(D = bc, y = sponges)
cv
|
# this is needed to make tensorflow happy
Sys.setenv(MKL_THREADING_LAYER = "GNU")
log <- file(snakemake@log[[1]], open="wt")
sink(log)
sink(log, type="message")
library(SingleCellExperiment)
library(tensorflow)
library(cellassign)
library(ComplexHeatmap)
library(viridis)
library(ggsci)
is.float <- function(x) {
(typeof(x) == "double") && (x %% 1 != 0)
}
sce <- readRDS(snakemake@input[["sce"]])
parent <- snakemake@wildcards[["parent"]]
# get parent fit and filter sce to those cells
parent_fit <- snakemake@input[["fit"]]
if(length(parent_fit) > 0) {
parent_fit <- readRDS(parent_fit)$cell_type
is_parent_type <- rownames(parent_fit)[parent_fit$cell_type == parent]
sce <- sce[, is_parent_type]
}
markers <- read.table(snakemake@input[["markers"]], row.names = NULL, header = TRUE, sep="\t", stringsAsFactors = FALSE, na.strings = "")
markers[is.na(markers$parent), "parent"] <- "root"
markers <- markers[markers$parent == parent, ]
# convert markers into something cellAssign understands
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
get_genes <- function (x) {
if(is.na(x)) {
vector()
} else {
sapply(strsplit(x, ","), trim)
}
}
genes <- vector()
for(g in markers$genes) {
genes <- c(genes, get_genes(g))
}
genes <- sort(unique(genes))
if(length(genes) < 1) {
stop("Markers have to contain at least two different genes in union.")
}
marker_mat <- matrix(0, nrow = nrow(markers), ncol = length(genes))
colnames(marker_mat) <- genes
rownames(marker_mat) <- markers$name
for(i in 1:nrow(markers)) {
cell_type <- markers[i, "name"]
marker_mat[cell_type, ] <- genes %in% get_genes(markers[i, "genes"])
}
marker_mat <- t(marker_mat)
marker_mat <- marker_mat[rownames(marker_mat) %in% rownames(sce),, drop=FALSE]
# apply cellAssign
sce <- sce[rownames(marker_mat), ]
# remove genes with 0 counts in all cells and cells with 0 counts in all genes
sce <- sce[rowSums(counts(sce)) != 0, colSums(counts(sce)) != 0]
# obtain batch effect model
model <- readRDS(snakemake@input[["design_matrix"]])
# constrain to selected cells and remove intercept (not allowed for cellassign)
model <- model[colnames(sce), colnames(model) != "(Intercept)"]
# normalize float columns (as recommended in cellassign manual)
float_cols <- apply(model, 2, is.float)
model[, float_cols] <- apply(model[, float_cols], 2, scale)
if(nrow(sce) == 0) {
stop("Markers do not match any gene names in the count matrix.")
}
# fit
fit <- cellassign(exprs_obj = sce, marker_gene_info = marker_mat, s = sizeFactors(sce), learning_rate = 1e-2, B = 20, shrinkage = TRUE, X = model)
# add cell names to results
cells <- colnames(sce)
rownames(fit$mle_params$gamma) <- cells
fit$cell_type <- data.frame(cell_type = fit$cell_type)
rownames(fit$cell_type) <- cells
saveRDS(fit, file = snakemake@output[["fit"]])
save.image()
# plot heatmap
source(file.path(snakemake@scriptdir, "common.R"))
sce <- assign_celltypes(fit, sce, snakemake@params[["min_gamma"]])
pdf(file = snakemake@output[["heatmap"]])
pal <- pal_d3("category20")(ncol(marker_mat))
names(pal) <- colnames(marker_mat)
celltype <- HeatmapAnnotation(df = data.frame(celltype = colData(sce)$celltype), col = list(celltype = pal))
Heatmap(logcounts(sce), col = viridis(100), clustering_distance_columns = "canberra", clustering_distance_rows = "canberra", use_raster = TRUE, show_row_dend = FALSE, show_column_dend = FALSE, show_column_names = FALSE, top_annotation = celltype, name = "logcounts")
dev.off()
|
/scripts/cellassign.R
|
permissive
|
koesterlab/single-cell-rna-seq
|
R
| false
| false
| 3,501
|
r
|
# this is needed to make tensorflow happy
Sys.setenv(MKL_THREADING_LAYER = "GNU")
log <- file(snakemake@log[[1]], open="wt")
sink(log)
sink(log, type="message")
library(SingleCellExperiment)
library(tensorflow)
library(cellassign)
library(ComplexHeatmap)
library(viridis)
library(ggsci)
is.float <- function(x) {
(typeof(x) == "double") && (x %% 1 != 0)
}
sce <- readRDS(snakemake@input[["sce"]])
parent <- snakemake@wildcards[["parent"]]
# get parent fit and filter sce to those cells
parent_fit <- snakemake@input[["fit"]]
if(length(parent_fit) > 0) {
parent_fit <- readRDS(parent_fit)$cell_type
is_parent_type <- rownames(parent_fit)[parent_fit$cell_type == parent]
sce <- sce[, is_parent_type]
}
markers <- read.table(snakemake@input[["markers"]], row.names = NULL, header = TRUE, sep="\t", stringsAsFactors = FALSE, na.strings = "")
markers[is.na(markers$parent), "parent"] <- "root"
markers <- markers[markers$parent == parent, ]
# convert markers into something cellAssign understands
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
get_genes <- function (x) {
if(is.na(x)) {
vector()
} else {
sapply(strsplit(x, ","), trim)
}
}
genes <- vector()
for(g in markers$genes) {
genes <- c(genes, get_genes(g))
}
genes <- sort(unique(genes))
if(length(genes) < 1) {
stop("Markers have to contain at least two different genes in union.")
}
marker_mat <- matrix(0, nrow = nrow(markers), ncol = length(genes))
colnames(marker_mat) <- genes
rownames(marker_mat) <- markers$name
for(i in 1:nrow(markers)) {
cell_type <- markers[i, "name"]
marker_mat[cell_type, ] <- genes %in% get_genes(markers[i, "genes"])
}
marker_mat <- t(marker_mat)
marker_mat <- marker_mat[rownames(marker_mat) %in% rownames(sce),, drop=FALSE]
# apply cellAssign
sce <- sce[rownames(marker_mat), ]
# remove genes with 0 counts in all cells and cells with 0 counts in all genes
sce <- sce[rowSums(counts(sce)) != 0, colSums(counts(sce)) != 0]
# obtain batch effect model
model <- readRDS(snakemake@input[["design_matrix"]])
# constrain to selected cells and remove intercept (not allowed for cellassign)
model <- model[colnames(sce), colnames(model) != "(Intercept)"]
# normalize float columns (as recommended in cellassign manual)
float_cols <- apply(model, 2, is.float)
model[, float_cols] <- apply(model[, float_cols], 2, scale)
if(nrow(sce) == 0) {
stop("Markers do not match any gene names in the count matrix.")
}
# fit
fit <- cellassign(exprs_obj = sce, marker_gene_info = marker_mat, s = sizeFactors(sce), learning_rate = 1e-2, B = 20, shrinkage = TRUE, X = model)
# add cell names to results
cells <- colnames(sce)
rownames(fit$mle_params$gamma) <- cells
fit$cell_type <- data.frame(cell_type = fit$cell_type)
rownames(fit$cell_type) <- cells
saveRDS(fit, file = snakemake@output[["fit"]])
save.image()
# plot heatmap
source(file.path(snakemake@scriptdir, "common.R"))
sce <- assign_celltypes(fit, sce, snakemake@params[["min_gamma"]])
pdf(file = snakemake@output[["heatmap"]])
pal <- pal_d3("category20")(ncol(marker_mat))
names(pal) <- colnames(marker_mat)
celltype <- HeatmapAnnotation(df = data.frame(celltype = colData(sce)$celltype), col = list(celltype = pal))
Heatmap(logcounts(sce), col = viridis(100), clustering_distance_columns = "canberra", clustering_distance_rows = "canberra", use_raster = TRUE, show_row_dend = FALSE, show_column_dend = FALSE, show_column_names = FALSE, top_annotation = celltype, name = "logcounts")
dev.off()
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/erp_preprocess.R
\name{artrejOptions}
\alias{artrejOptions}
\title{Options for artifact rejection}
\usage{
artrejOptions(sampling_freq = 1000, channels = "all",
apply_maxgrad = TRUE, maxgrad_limit = 50, maxgrad_mark = c(-200, 200),
apply_diffrange = TRUE, diffrange_limit = c(0.5, 100),
diffrange_mark = c(-200, 200), diffrange_interval = 200,
apply_amplrange = TRUE, amplrange_limit = c(-200, 200),
amplrange_mark = c(-200, 200))
}
\arguments{
\item{sampling_freq}{numeric value, the sampling frequency of the EEG-data}
\item{channels}{character vector containing the name or index of channels
which are subject to artifact rejection. If set to "all" (default), all
channels are included.}
\item{apply_maxgrad}{logical value, if set to TRUE (default), the maximum
gradient criterion is applied.}
\item{maxgrad_limit}{numeric value, the maximum gradient / millisecond
(default: 50)}
\item{maxgrad_mark}{numeric vector of length 2; the placement of the Bad
Interval mark in milliseconds before and after the occurence of maxgrad_limit
violation (default: c(-200, 200))}
\item{apply_diffrange}{logical value, if set to TRUE (default), the difference
range criterion is applied.}
\item{diffrange_limit}{numeric vector of length 2, the minimum and maximum
voltage difference in a given interval (default: 200)}
\item{diffrange_mark}{numeric vector of length 2; the placement of the Bad
Interval mark in milliseconds before and after the occurence of diffrange_limit
violation (default: c(-200, 200))}
\item{diffrange_interval}{numeric value, the length of interval for the
difference range criterion in milliseconds (default: 200)}
\item{apply_amplrange}{logical value, if set to TRUE (default), the amplitude
range criterion is applied.}
\item{amplrange_limit}{numeric vector of length 2, the minimum and maximum
voltage in the whole segment (default: c(-200, 200))}
\item{amplrange_mark}{numeric vector of length 2; the placement of the Bad
Interval mark in milliseconds before and after the occurence of
amplrange_limit violation (default: c(-200, 200))}
}
\value{
A list object with all parameters.
}
\description{
\code{artrejOptions} allows to set the parameters of the artifact rejection
methods.
}
\details{
The short definitions of the possible artifact rejection criteria
are as follows:
\itemize{
\item{Maximum gradient:}{The absolute difference between the voltages
measured at successive milliseconds.}
\item{Difference range:}{The minimum and maximum difference between the
maximum and minimum voltages in a given sampling interval.}
\item{Amplitude range:}{The minimum and maximum voltages in the segments.}
}
}
\note{
The algorithm takes care of the sampling frequency for all parameters
which are provided in milliseconds (or /ms) and makes adjustments if needed.
However, *_mark parameters are not used since only segmented data can be
analyzed in the present version of artifactRejection().
}
|
/man/artrejOptions.Rd
|
no_license
|
kapilsaxena33/eegR
|
R
| false
| false
| 3,016
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/erp_preprocess.R
\name{artrejOptions}
\alias{artrejOptions}
\title{Options for artifact rejection}
\usage{
artrejOptions(sampling_freq = 1000, channels = "all",
apply_maxgrad = TRUE, maxgrad_limit = 50, maxgrad_mark = c(-200, 200),
apply_diffrange = TRUE, diffrange_limit = c(0.5, 100),
diffrange_mark = c(-200, 200), diffrange_interval = 200,
apply_amplrange = TRUE, amplrange_limit = c(-200, 200),
amplrange_mark = c(-200, 200))
}
\arguments{
\item{sampling_freq}{numeric value, the sampling frequency of the EEG-data}
\item{channels}{character vector containing the name or index of channels
which are subject to artifact rejection. If set to "all" (default), all
channels are included.}
\item{apply_maxgrad}{logical value, if set to TRUE (default), the maximum
gradient criterion is applied.}
\item{maxgrad_limit}{numeric value, the maximum gradient / millisecond
(default: 50)}
\item{maxgrad_mark}{numeric vector of length 2; the placement of the Bad
Interval mark in milliseconds before and after the occurence of maxgrad_limit
violation (default: c(-200, 200))}
\item{apply_diffrange}{logical value, if set to TRUE (default), the difference
range criterion is applied.}
\item{diffrange_limit}{numeric vector of length 2, the minimum and maximum
voltage difference in a given interval (default: 200)}
\item{diffrange_mark}{numeric vector of length 2; the placement of the Bad
Interval mark in milliseconds before and after the occurence of diffrange_limit
violation (default: c(-200, 200))}
\item{diffrange_interval}{numeric value, the length of interval for the
difference range criterion in milliseconds (default: 200)}
\item{apply_amplrange}{logical value, if set to TRUE (default), the amplitude
range criterion is applied.}
\item{amplrange_limit}{numeric vector of length 2, the minimum and maximum
voltage in the whole segment (default: c(-200, 200))}
\item{amplrange_mark}{numeric vector of length 2; the placement of the Bad
Interval mark in milliseconds before and after the occurence of
amplrange_limit violation (default: c(-200, 200))}
}
\value{
A list object with all parameters.
}
\description{
\code{artrejOptions} allows to set the parameters of the artifact rejection
methods.
}
\details{
The short definitions of the possible artifact rejection criteria
are as follows:
\itemize{
\item{Maximum gradient:}{The absolute difference between the voltages
measured at successive milliseconds.}
\item{Difference range:}{The minimum and maximum difference between the
maximum and minimum voltages in a given sampling interval.}
\item{Amplitude range:}{The minimum and maximum voltages in the segments.}
}
}
\note{
The algorithm takes care of the sampling frequency for all parameters
which are provided in milliseconds (or /ms) and makes adjustments if needed.
However, *_mark parameters are not used since only segmented data can be
analyzed in the present version of artifactRejection().
}
|
#' Detection of outliers of zero-inflated data
#'
#' detects outliers in compositional zero-inflated data
#' @param x a data frame
#' @param impute imputation method internally used
#' @details XXX
#' @return XXX
#' @export
#' @author Matthias Templ
#' @examples
#' ### Installing and loading required packages
#' data(expenditures)
zeroOut <- function(x, impute="knn"){
## @Matthias Templ, TU WIEN, 2012
rownames(x) <- 1:nrow(x)
ind <- 1:ncol(x)
D <- ncol(x)
## 1. Imputiere
if(impute %in% c("impKNNa","knna","KNNa")){
xi <- impKNNa(x)$xImp
} else if (impute %in% c("knn", "KNN", "kNN")){
xi <- kNN(x, imp_var = FALSE)
# } else if (impute %in% c("fry", "Fry", "FRY")){
# xi <- rmzero(x, minval=0.01, delta=0.01)
} else if (impute %in% c("IRMI","irmi","Irmi")){
xi <- impCoda(x, init="geometricmean")$xImp
} else {
stop("wrong method for imputation specified")
}
x <- cbind(x, ID=1:nrow(x))
xi <- cbind(xi, ID=1:nrow(x))
## make sure that xi is a data.frame:
if(class(xi)=="matrix") xi <- data.frame(xi)
w <- is.na(x[, ind])
# w <- apply(w, 2, as.integer)
s <- apply(w, 1, paste, collapse=":")
# xi <- cbind(xi, id=1:nrow(x)) #new
# x <- cbind(x, id=1:nrow(x)) #new
xs <- split(xi, s)
getSortIndex <- function(x, s){
xs <- split(x, s)
## TRUE when zero
lapply(xs, function(x){
is.na(x[1,])
})
}
si <- getSortIndex(x[,ind], s)
zneworder <- xs
mah <- pval <- mahcorr <- IDlist <- list()
for(i in 1:length(xs)){
index <- names(xs[i])
index <- as.logical(strsplit(index, ":")[[1]])
sortedxs <- xs[[i]]
wt <- which(index)
wf <- which(!index)
sortedxs <- sortedxs[, c(wt,wf)]
zneworder <- isomLR(sortedxs)
zcovs <- robustbase::covMcd(zneworder)
## took only last columns of xs
if(length(wf) == 2){
p <- ncol(zneworder)
zscore <- (zneworder[, p] - zcovs$center[p]) / sqrt(zcovs$cov[p,p])
mah[[i]] <- abs(zscore)
pval[[i]] <- pnorm(mah[[i]])
mahcorr[[i]] <- mah[[i]] / qnorm(0.975)
names(mahcorr[[i]]) <- names(pval[[i]]) <- names(mah[[i]]) <- rownames(xs[[i]])
} else if(length(wf) > 2){
noneff <- c((length(wt) + 1):(D-1))
mah[[i]] <- sqrt(as.numeric(mahalanobis(zneworder[, noneff], center=zcovs$center[noneff], cov=zcovs$cov[noneff, noneff])))
pval[[i]] <- pchisq((mah[[i]])^2, length(wf))
mahcorr[[i]] <- mah[[i]] / sqrt(qchisq(0.975, ncol(zneworder[, noneff])))
names(mahcorr[[i]]) <- names(pval[[i]]) <- names(mah[[i]]) <- rownames(xs[[i]])
} else{
mah[[i]] <- NA
pval[[i]] <- NA
mahcorr[[i]] <- NA
}
IDlist[[i]] <- xs[[i]][ncol(xs[[i]])]
}
## list --> data.frame
nam <- names(unlist(mahcorr))
df <- data.frame("mah"=as.numeric(unlist(mah)), "pval"=as.numeric(unlist(pval)), "mahcorr"=as.numeric(unlist(mahcorr)),
"ID"=nam)
df <- merge(x, df, by="ID")
df <- cbind(df, "outlier"=df$mahcorr > 1)
return(df)
}
|
/robCompositions/R/zeroOut.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 3,006
|
r
|
#' Detection of outliers of zero-inflated data
#'
#' detects outliers in compositional zero-inflated data
#' @param x a data frame
#' @param impute imputation method internally used
#' @details XXX
#' @return XXX
#' @export
#' @author Matthias Templ
#' @examples
#' ### Installing and loading required packages
#' data(expenditures)
zeroOut <- function(x, impute="knn"){
## @Matthias Templ, TU WIEN, 2012
rownames(x) <- 1:nrow(x)
ind <- 1:ncol(x)
D <- ncol(x)
## 1. Imputiere
if(impute %in% c("impKNNa","knna","KNNa")){
xi <- impKNNa(x)$xImp
} else if (impute %in% c("knn", "KNN", "kNN")){
xi <- kNN(x, imp_var = FALSE)
# } else if (impute %in% c("fry", "Fry", "FRY")){
# xi <- rmzero(x, minval=0.01, delta=0.01)
} else if (impute %in% c("IRMI","irmi","Irmi")){
xi <- impCoda(x, init="geometricmean")$xImp
} else {
stop("wrong method for imputation specified")
}
x <- cbind(x, ID=1:nrow(x))
xi <- cbind(xi, ID=1:nrow(x))
## make sure that xi is a data.frame:
if(class(xi)=="matrix") xi <- data.frame(xi)
w <- is.na(x[, ind])
# w <- apply(w, 2, as.integer)
s <- apply(w, 1, paste, collapse=":")
# xi <- cbind(xi, id=1:nrow(x)) #new
# x <- cbind(x, id=1:nrow(x)) #new
xs <- split(xi, s)
getSortIndex <- function(x, s){
xs <- split(x, s)
## TRUE when zero
lapply(xs, function(x){
is.na(x[1,])
})
}
si <- getSortIndex(x[,ind], s)
zneworder <- xs
mah <- pval <- mahcorr <- IDlist <- list()
for(i in 1:length(xs)){
index <- names(xs[i])
index <- as.logical(strsplit(index, ":")[[1]])
sortedxs <- xs[[i]]
wt <- which(index)
wf <- which(!index)
sortedxs <- sortedxs[, c(wt,wf)]
zneworder <- isomLR(sortedxs)
zcovs <- robustbase::covMcd(zneworder)
## took only last columns of xs
if(length(wf) == 2){
p <- ncol(zneworder)
zscore <- (zneworder[, p] - zcovs$center[p]) / sqrt(zcovs$cov[p,p])
mah[[i]] <- abs(zscore)
pval[[i]] <- pnorm(mah[[i]])
mahcorr[[i]] <- mah[[i]] / qnorm(0.975)
names(mahcorr[[i]]) <- names(pval[[i]]) <- names(mah[[i]]) <- rownames(xs[[i]])
} else if(length(wf) > 2){
noneff <- c((length(wt) + 1):(D-1))
mah[[i]] <- sqrt(as.numeric(mahalanobis(zneworder[, noneff], center=zcovs$center[noneff], cov=zcovs$cov[noneff, noneff])))
pval[[i]] <- pchisq((mah[[i]])^2, length(wf))
mahcorr[[i]] <- mah[[i]] / sqrt(qchisq(0.975, ncol(zneworder[, noneff])))
names(mahcorr[[i]]) <- names(pval[[i]]) <- names(mah[[i]]) <- rownames(xs[[i]])
} else{
mah[[i]] <- NA
pval[[i]] <- NA
mahcorr[[i]] <- NA
}
IDlist[[i]] <- xs[[i]][ncol(xs[[i]])]
}
## list --> data.frame
nam <- names(unlist(mahcorr))
df <- data.frame("mah"=as.numeric(unlist(mah)), "pval"=as.numeric(unlist(pval)), "mahcorr"=as.numeric(unlist(mahcorr)),
"ID"=nam)
df <- merge(x, df, by="ID")
df <- cbind(df, "outlier"=df$mahcorr > 1)
return(df)
}
|
# make SCALED rasters
### this code only needs to be run to regenerate the raster stack.
predTemplate <-raster('E:/NASData/Eddy/RefRaster.tif')
SST_jan_2009 <- scale(resample(raster('E:/NASData/AcoustoVisualDE/SST/SST_jan_2009.tif'),
predTemplate),center = covars_Joint_min.train["SST"],
scale = covars_Joint_max.train["SST"]-covars_Joint_min.train["SST"])
SST_july_2009 <- scale(resample(raster('E:/NASData/AcoustoVisualDE/SST/SST_july_2009.tif'),
predTemplate),center = covars_Joint_min.train["SST"],
scale = covars_Joint_max.train["SST"]-covars_Joint_min.train["SST"])
SSH_jan_2009 <- scale(resample(raster('E:/NASData/AcoustoVisualDE/SSH/SSH_jan2009.tif'),
predTemplate),center = covars_Joint_min.train["SSH"],
scale = covars_Joint_max.train["SSH"]-covars_Joint_min.train["SSH"])
SSH_july_2009 <- scale(resample(raster('E:/NASData/AcoustoVisualDE/SSH/SSH_july2009.tif'),
predTemplate),center = covars_Joint_min.train["SSH"],
scale = covars_Joint_max.train["SSH"]-covars_Joint_min.train["SSH"])
log10_CHL_jan_2009 <- scale(log10(resample(raster('E:/NASData/AcoustoVisualDE/CHL/CHL_jan2009.tif'),
predTemplate)),center = covars_Joint_min.train["log10_CHL"],scale =
covars_Joint_max.train["log10_CHL"]-covars_Joint_min.train["log10_CHL"])
log10_CHL_july_2009 <- scale(log10(resample(raster('E:/NASData/AcoustoVisualDE/CHL/CHL_july2009.tif'),
predTemplate)),center = covars_Joint_min.train["log10_CHL"],scale =
covars_Joint_max.train["log10_CHL"]-covars_Joint_min.train["log10_CHL"])
log10_HYCOM_MLD_jan2009 <- scale(
resample(raster('E:/NASData/AcoustoVisualDE/MLD/log10_mld_jan2009.tif'),
predTemplate),center = covars_Joint_min.train["log10_HYCOM_MLD"],
scale = covars_Joint_max.train["log10_HYCOM_MLD"]-covars_Joint_min.train["log10_HYCOM_MLD"])
log10_HYCOM_MLD_july2009 <- scale(
resample(raster('E:/NASData/AcoustoVisualDE/MLD/log10_mld_july2009.tif'),
predTemplate),center = covars_Joint_min.train["log10_HYCOM_MLD"],
scale = covars_Joint_max.train["log10_HYCOM_MLD"]-covars_Joint_min.train["log10_HYCOM_MLD"])
SAL0_jan_2009 <- scale(resample(raster('E:/NASData/AcoustoVisualDE/Salinity/sal0_jan2009.tif'),
predTemplate),center = covars_Joint_min.train["HYCOM_SALIN_0"],
scale = covars_Joint_max.train["HYCOM_SALIN_0"]-covars_Joint_min.train["HYCOM_SALIN_0"])
SAL0_july_2009 <- scale(resample(raster('E:/NASData/AcoustoVisualDE/Salinity/sal0_july2009.tif'),
predTemplate),center = covars_Joint_min.train["HYCOM_SALIN_0"],
scale = covars_Joint_max.train["HYCOM_SALIN_0"]-covars_Joint_min.train["HYCOM_SALIN_0"])
SAL100_jan_2009 <- scale(resample(raster('E:/NASData/AcoustoVisualDE/Salinity/sal100_jan2009_albers.tif'),
predTemplate),center = covars_Joint_min.train["HYCOM_SALIN_100"],
scale = covars_Joint_max.train["HYCOM_SALIN_100"]-covars_Joint_min.train["HYCOM_SALIN_100"])
SAL100_july_2009 <- scale(resample(raster('E:/NASData/AcoustoVisualDE/Salinity/sal100_july2009_albers.tif'),
predTemplate),center = covars_Joint_min.train["HYCOM_SALIN_100"],
scale = covars_Joint_max.train["HYCOM_SALIN_100"]-covars_Joint_min.train["HYCOM_SALIN_100"])
log10_HYCOM_MAG_0_jan2009 <- scale(
resample(raster('E:/NASData/AcoustoVisualDE/HYCOM_Mag/log10_mag_jan2009.tif'),
predTemplate),center = covars_Joint_min.train['log10_HYCOM_MAG_0'],
scale = covars_Joint_max.train['log10_HYCOM_MAG_0']-covars_Joint_min.train['log10_HYCOM_MAG_0'])
log10_HYCOM_MAG_0_july2009 <- scale(
resample(raster('E:/NASData/AcoustoVisualDE/HYCOM_Mag/log10_mag_july2009.tif'),
predTemplate),center = covars_Joint_min.train['log10_HYCOM_MAG_0'],
scale = covars_Joint_max.train['log10_HYCOM_MAG_0']-covars_Joint_min.train['log10_HYCOM_MAG_0'])
HYCOM_UPVEL_jan2009 <- scale(
resample(raster('E:/NASData/AcoustoVisualDE/HYCOM_UpVel/proj_HYCOM_upvel_50_jan09.tif'),
predTemplate),center = covars_Joint_min.train['HYCOM_UPVEL_50'],
scale = covars_Joint_max.train['HYCOM_UPVEL_50']-covars_Joint_min.train['HYCOM_UPVEL_50'])
HYCOM_UPVEL_july2009 <- scale(
resample(raster('E:/NASData/AcoustoVisualDE/HYCOM_UpVel/proj_HYCOM_upvel_50_july09.tif'),
predTemplate),center = covars_Joint_min.train['HYCOM_UPVEL_50'],
scale = covars_Joint_max.train['HYCOM_UPVEL_50']-covars_Joint_min.train['HYCOM_UPVEL_50'])
log10_Cayula_jan_2009 <-
scale(resample(raster('E:/NASData/AcoustoVisualDE/Cayula/log10_CayulaFront_jan2009.tif'),
predTemplate),center = covars_Joint_min.train["log10_FrontDist_Cayula"],
scale = covars_Joint_max.train["log10_FrontDist_Cayula"]-covars_Joint_min.train["log10_FrontDist_Cayula"])
log10_Cayula_july_2009 <-
scale(resample(raster('E:/NASData/AcoustoVisualDE/Cayula/log10_CayulaFront_july2009.tif'),
predTemplate),center = covars_Joint_min.train["log10_FrontDist_Cayula"],
scale = covars_Joint_max.train["log10_FrontDist_Cayula"]-covars_Joint_min.train["log10_FrontDist_Cayula"])
Eddy_jan_2009 <- scale(resample(
raster('E:/NASData/Eddy/JPL_ManualEddyDist/Climatologies/Projected/eddyDist_Jan_2009_proj.tif'),
predTemplate),center = covars_Joint_min.train["EddyDist"],
scale = covars_Joint_max.train["EddyDist"]-covars_Joint_min.train["EddyDist"])
Eddy_july_2009 <- scale(resample(raster(
'E:/NASData/Eddy/JPL_ManualEddyDist/Climatologies/Projected/eddyDist_July_2009_proj.tif'),
predTemplate),center = covars_Joint_min.train["EddyDist"],
scale = covars_Joint_max.train["EddyDist"]-covars_Joint_min.train["EddyDist"])
Neg_Eddy_jan_2009 <- scale(resample(raster(
'E:/NASData/AcoustoVisualDE/EddyDist/DistNegMSLA_eddy_polarities_2009015.img')/1000,
predTemplate),center = covars_Joint_min.train["Neg_EddyDist"],
scale = covars_Joint_max.train["Neg_EddyDist"]-covars_Joint_min.train["Neg_EddyDist"])
Neg_Eddy_july_2009 <- scale(resample(raster(
'E:/NASData/AcoustoVisualDE/EddyDist/Neg_EddyDist_july2009.tif')/1000,
predTemplate),center = covars_Joint_min.train["Neg_EddyDist"],
scale = covars_Joint_max.train["Neg_EddyDist"]-covars_Joint_min.train["Neg_EddyDist"])
Pos_Eddy_jan_2009 <- scale(
resample(raster('E:/NASData/AcoustoVisualDE/EddyDist/PosEddyDist_Jan_2009.tif'),
predTemplate),center = covars_Joint_min.train["Pos_EddyDist"],
scale = covars_Joint_max.train["Pos_EddyDist"]-covars_Joint_min.train["Pos_EddyDist"])
Pos_Eddy_july_2009 <- scale(
resample(raster('E:/NASData/AcoustoVisualDE/EddyDist/PosEddyDist_Jul_2009.tif'),
predTemplate),center = covars_Joint_min.train["Pos_EddyDist"],
scale = covars_Joint_max.train["Pos_EddyDist"]-covars_Joint_min.train["Pos_EddyDist"])
jan2009_rasters <- brick(log10_CHL_jan_2009,SST_jan_2009,SSH_jan_2009,#log10_Cayula_jan_2009,
SAL0_jan_2009,#SAL100_jan_2009, Eddy_jan_2009,
Neg_Eddy_jan_2009,Pos_Eddy_jan_2009,
log10_HYCOM_MAG_0_jan2009,log10_HYCOM_MLD_jan2009,HYCOM_UPVEL_jan2009)
july2009_rasters <- brick(log10_CHL_july_2009,SST_july_2009,SSH_july_2009,#log10_Cayula_july_2009,
SAL0_july_2009,#SAL100_july_2009,Eddy_july_2009,
Neg_Eddy_july_2009,Pos_Eddy_july_2009,
log10_HYCOM_MAG_0_july2009,log10_HYCOM_MLD_july2009,HYCOM_UPVEL_july2009)
names(jan2009_rasters)<-c('log10_CHL','SST','SSH',
'HYCOM_SALIN_0',#'HYCOM_SALIN_100','EddyDist',
'Neg_EddyDist',"Pos_EddyDist",
'log10_HYCOM_MAG_0','log10_HYCOM_MLD','HYCOM_UPVEL_50')#'log10_FrontDist_Cayula',
names(july2009_rasters)<-c('log10_CHL','SST','SSH',
'HYCOM_SALIN_0',#'HYCOM_SALIN_100','EddyDist',
'Neg_EddyDist',"Pos_EddyDist",
'log10_HYCOM_MAG_0','log10_HYCOM_MLD','HYCOM_UPVEL_50')#'log10_FrontDist_Cayula',
save(jan2009_rasters,july2009_rasters,
file = 'E:/NASData/AcoustoVisualDE/AcoustoVisualDE/2009_prediction_rasters_scaled.Rdata')
|
/compute_scaled_2009_prediction_rasters.R
|
no_license
|
kfrasier/AcoustoVisualDE
|
R
| false
| false
| 8,568
|
r
|
# make SCALED rasters
### this code only needs to be run to regenerate the raster stack.
predTemplate <-raster('E:/NASData/Eddy/RefRaster.tif')
SST_jan_2009 <- scale(resample(raster('E:/NASData/AcoustoVisualDE/SST/SST_jan_2009.tif'),
predTemplate),center = covars_Joint_min.train["SST"],
scale = covars_Joint_max.train["SST"]-covars_Joint_min.train["SST"])
SST_july_2009 <- scale(resample(raster('E:/NASData/AcoustoVisualDE/SST/SST_july_2009.tif'),
predTemplate),center = covars_Joint_min.train["SST"],
scale = covars_Joint_max.train["SST"]-covars_Joint_min.train["SST"])
SSH_jan_2009 <- scale(resample(raster('E:/NASData/AcoustoVisualDE/SSH/SSH_jan2009.tif'),
predTemplate),center = covars_Joint_min.train["SSH"],
scale = covars_Joint_max.train["SSH"]-covars_Joint_min.train["SSH"])
SSH_july_2009 <- scale(resample(raster('E:/NASData/AcoustoVisualDE/SSH/SSH_july2009.tif'),
predTemplate),center = covars_Joint_min.train["SSH"],
scale = covars_Joint_max.train["SSH"]-covars_Joint_min.train["SSH"])
log10_CHL_jan_2009 <- scale(log10(resample(raster('E:/NASData/AcoustoVisualDE/CHL/CHL_jan2009.tif'),
predTemplate)),center = covars_Joint_min.train["log10_CHL"],scale =
covars_Joint_max.train["log10_CHL"]-covars_Joint_min.train["log10_CHL"])
log10_CHL_july_2009 <- scale(log10(resample(raster('E:/NASData/AcoustoVisualDE/CHL/CHL_july2009.tif'),
predTemplate)),center = covars_Joint_min.train["log10_CHL"],scale =
covars_Joint_max.train["log10_CHL"]-covars_Joint_min.train["log10_CHL"])
log10_HYCOM_MLD_jan2009 <- scale(
resample(raster('E:/NASData/AcoustoVisualDE/MLD/log10_mld_jan2009.tif'),
predTemplate),center = covars_Joint_min.train["log10_HYCOM_MLD"],
scale = covars_Joint_max.train["log10_HYCOM_MLD"]-covars_Joint_min.train["log10_HYCOM_MLD"])
log10_HYCOM_MLD_july2009 <- scale(
resample(raster('E:/NASData/AcoustoVisualDE/MLD/log10_mld_july2009.tif'),
predTemplate),center = covars_Joint_min.train["log10_HYCOM_MLD"],
scale = covars_Joint_max.train["log10_HYCOM_MLD"]-covars_Joint_min.train["log10_HYCOM_MLD"])
SAL0_jan_2009 <- scale(resample(raster('E:/NASData/AcoustoVisualDE/Salinity/sal0_jan2009.tif'),
predTemplate),center = covars_Joint_min.train["HYCOM_SALIN_0"],
scale = covars_Joint_max.train["HYCOM_SALIN_0"]-covars_Joint_min.train["HYCOM_SALIN_0"])
SAL0_july_2009 <- scale(resample(raster('E:/NASData/AcoustoVisualDE/Salinity/sal0_july2009.tif'),
predTemplate),center = covars_Joint_min.train["HYCOM_SALIN_0"],
scale = covars_Joint_max.train["HYCOM_SALIN_0"]-covars_Joint_min.train["HYCOM_SALIN_0"])
SAL100_jan_2009 <- scale(resample(raster('E:/NASData/AcoustoVisualDE/Salinity/sal100_jan2009_albers.tif'),
predTemplate),center = covars_Joint_min.train["HYCOM_SALIN_100"],
scale = covars_Joint_max.train["HYCOM_SALIN_100"]-covars_Joint_min.train["HYCOM_SALIN_100"])
SAL100_july_2009 <- scale(resample(raster('E:/NASData/AcoustoVisualDE/Salinity/sal100_july2009_albers.tif'),
predTemplate),center = covars_Joint_min.train["HYCOM_SALIN_100"],
scale = covars_Joint_max.train["HYCOM_SALIN_100"]-covars_Joint_min.train["HYCOM_SALIN_100"])
log10_HYCOM_MAG_0_jan2009 <- scale(
resample(raster('E:/NASData/AcoustoVisualDE/HYCOM_Mag/log10_mag_jan2009.tif'),
predTemplate),center = covars_Joint_min.train['log10_HYCOM_MAG_0'],
scale = covars_Joint_max.train['log10_HYCOM_MAG_0']-covars_Joint_min.train['log10_HYCOM_MAG_0'])
log10_HYCOM_MAG_0_july2009 <- scale(
resample(raster('E:/NASData/AcoustoVisualDE/HYCOM_Mag/log10_mag_july2009.tif'),
predTemplate),center = covars_Joint_min.train['log10_HYCOM_MAG_0'],
scale = covars_Joint_max.train['log10_HYCOM_MAG_0']-covars_Joint_min.train['log10_HYCOM_MAG_0'])
HYCOM_UPVEL_jan2009 <- scale(
resample(raster('E:/NASData/AcoustoVisualDE/HYCOM_UpVel/proj_HYCOM_upvel_50_jan09.tif'),
predTemplate),center = covars_Joint_min.train['HYCOM_UPVEL_50'],
scale = covars_Joint_max.train['HYCOM_UPVEL_50']-covars_Joint_min.train['HYCOM_UPVEL_50'])
HYCOM_UPVEL_july2009 <- scale(
resample(raster('E:/NASData/AcoustoVisualDE/HYCOM_UpVel/proj_HYCOM_upvel_50_july09.tif'),
predTemplate),center = covars_Joint_min.train['HYCOM_UPVEL_50'],
scale = covars_Joint_max.train['HYCOM_UPVEL_50']-covars_Joint_min.train['HYCOM_UPVEL_50'])
log10_Cayula_jan_2009 <-
scale(resample(raster('E:/NASData/AcoustoVisualDE/Cayula/log10_CayulaFront_jan2009.tif'),
predTemplate),center = covars_Joint_min.train["log10_FrontDist_Cayula"],
scale = covars_Joint_max.train["log10_FrontDist_Cayula"]-covars_Joint_min.train["log10_FrontDist_Cayula"])
log10_Cayula_july_2009 <-
scale(resample(raster('E:/NASData/AcoustoVisualDE/Cayula/log10_CayulaFront_july2009.tif'),
predTemplate),center = covars_Joint_min.train["log10_FrontDist_Cayula"],
scale = covars_Joint_max.train["log10_FrontDist_Cayula"]-covars_Joint_min.train["log10_FrontDist_Cayula"])
Eddy_jan_2009 <- scale(resample(
raster('E:/NASData/Eddy/JPL_ManualEddyDist/Climatologies/Projected/eddyDist_Jan_2009_proj.tif'),
predTemplate),center = covars_Joint_min.train["EddyDist"],
scale = covars_Joint_max.train["EddyDist"]-covars_Joint_min.train["EddyDist"])
Eddy_july_2009 <- scale(resample(raster(
'E:/NASData/Eddy/JPL_ManualEddyDist/Climatologies/Projected/eddyDist_July_2009_proj.tif'),
predTemplate),center = covars_Joint_min.train["EddyDist"],
scale = covars_Joint_max.train["EddyDist"]-covars_Joint_min.train["EddyDist"])
Neg_Eddy_jan_2009 <- scale(resample(raster(
'E:/NASData/AcoustoVisualDE/EddyDist/DistNegMSLA_eddy_polarities_2009015.img')/1000,
predTemplate),center = covars_Joint_min.train["Neg_EddyDist"],
scale = covars_Joint_max.train["Neg_EddyDist"]-covars_Joint_min.train["Neg_EddyDist"])
Neg_Eddy_july_2009 <- scale(resample(raster(
'E:/NASData/AcoustoVisualDE/EddyDist/Neg_EddyDist_july2009.tif')/1000,
predTemplate),center = covars_Joint_min.train["Neg_EddyDist"],
scale = covars_Joint_max.train["Neg_EddyDist"]-covars_Joint_min.train["Neg_EddyDist"])
Pos_Eddy_jan_2009 <- scale(
resample(raster('E:/NASData/AcoustoVisualDE/EddyDist/PosEddyDist_Jan_2009.tif'),
predTemplate),center = covars_Joint_min.train["Pos_EddyDist"],
scale = covars_Joint_max.train["Pos_EddyDist"]-covars_Joint_min.train["Pos_EddyDist"])
Pos_Eddy_july_2009 <- scale(
resample(raster('E:/NASData/AcoustoVisualDE/EddyDist/PosEddyDist_Jul_2009.tif'),
predTemplate),center = covars_Joint_min.train["Pos_EddyDist"],
scale = covars_Joint_max.train["Pos_EddyDist"]-covars_Joint_min.train["Pos_EddyDist"])
jan2009_rasters <- brick(log10_CHL_jan_2009,SST_jan_2009,SSH_jan_2009,#log10_Cayula_jan_2009,
SAL0_jan_2009,#SAL100_jan_2009, Eddy_jan_2009,
Neg_Eddy_jan_2009,Pos_Eddy_jan_2009,
log10_HYCOM_MAG_0_jan2009,log10_HYCOM_MLD_jan2009,HYCOM_UPVEL_jan2009)
july2009_rasters <- brick(log10_CHL_july_2009,SST_july_2009,SSH_july_2009,#log10_Cayula_july_2009,
SAL0_july_2009,#SAL100_july_2009,Eddy_july_2009,
Neg_Eddy_july_2009,Pos_Eddy_july_2009,
log10_HYCOM_MAG_0_july2009,log10_HYCOM_MLD_july2009,HYCOM_UPVEL_july2009)
names(jan2009_rasters)<-c('log10_CHL','SST','SSH',
'HYCOM_SALIN_0',#'HYCOM_SALIN_100','EddyDist',
'Neg_EddyDist',"Pos_EddyDist",
'log10_HYCOM_MAG_0','log10_HYCOM_MLD','HYCOM_UPVEL_50')#'log10_FrontDist_Cayula',
names(july2009_rasters)<-c('log10_CHL','SST','SSH',
'HYCOM_SALIN_0',#'HYCOM_SALIN_100','EddyDist',
'Neg_EddyDist',"Pos_EddyDist",
'log10_HYCOM_MAG_0','log10_HYCOM_MLD','HYCOM_UPVEL_50')#'log10_FrontDist_Cayula',
save(jan2009_rasters,july2009_rasters,
file = 'E:/NASData/AcoustoVisualDE/AcoustoVisualDE/2009_prediction_rasters_scaled.Rdata')
|
# Calculate the accumulated growing degree days (GDD) and days to flowering
# (DTF) for the C. f. phenology project. This script uses Julian dates of
# earliest germinant (EG) and earliest flower/pod (DF) and daily temperatures
# from PRISM for 2013 and 2014 at Shakopee, MN to calculate GDD. The base
# temperature is 10C, which is a value used to model soybean growth in MN.
# TK and AN
# Input: PhenMod_G.rda and PRISM daily min/max temps
# Output: Per-loc GDD and DTF values and distributions of GDD and DTF
# Requires: PhenMod_G
setwd("/Users/tomkono/Dropbox/GitHub/Nashoba_Kono_Phenology")
#setwd("/Users/amber-nashoba/Dropbox/GitHubRepositories/Nashoba_Kono_Phenology")
load("Results/RDA/PhenMod_G.rda")
################################################################################
################################################################################
################################################################################
#
# Section 1: Calculation of GDD and DTF for each Loc in each cohort
#
################################################################################
################################################################################
################################################################################
# Read the daily temperatures at SCG
temp_daily <- read.csv("Data/SCG_PRISM_Temp_Daily_MinMax.csv", header=TRUE)
# Then, define a function that will claculate growing degree days. We use a
# base temperature of 10C (50F), as is done for soybean.
gdd <- function(Loc, Year) {
t_base <- 10
# Turn the Julian date into a month/day/year date for lookup in the
# PRISM daily temperatures data.
# First, define an "origin" - this is Julian day 0. It is Dec 31 of the
# previous year.
origin <- paste(Year-1, '12-31', sep="-")
# Then, convert the Julian day numbers into m/d/y date format for lookup
# This is SUPER gross and ugly - here's a breakdown, from inside-out:
# - as.numeric, since Date objects just have numbers
# - as.Date to convert from Julian date to Y-M-D format
# - as.character so that we can split it and get month and day info
# - strsplit to separate month and day
# - unlist, since strsplit returns a list
# - as.numeric to remove leading zeroes from month and day
germ <- as.numeric(unlist(strsplit(as.character(as.Date(as.numeric(Loc["EarliestGerm"]),origin=origin)), split="-")))
flowpod <- as.numeric(unlist(strsplit(as.character(as.Date(as.numeric(Loc["EarliestFlowPod"]),origin=origin)), split="-")))
# The year is the first element in the converted dates. We need to take
# it from four digits down to two.
y <- substr(germ[1], 3, 4)
# then make the appropriate m/d/y string to lookup
germ <- paste(germ[2], germ[3], y, sep="/")
flowpod <- paste(flowpod[2], flowpod[3], y, sep="/")
# Then, get the date range from germ to flowering. We assume the data are
# in order from early to late
germ_day <- which(temp_daily$Date == germ)
fp_day <- which(temp_daily$Date == flowpod)
# Then, returnt he accumulated GDDs over the period from germination to
# flowering.
acc_gdds <- 0
for(i in germ_day:fp_day) {
mean_temp <- mean(temp_daily$T_Max[i], temp_daily$T_Min[i])
gdd_day <- mean_temp - t_base
acc_gdds <- acc_gdds + gdd_day
}
return(acc_gdds)
}
gdd_g1y13 <- apply(g1y13_final, 1, gdd, Year=2013)
gdd_g2y14 <- apply(g2y14_final, 1, gdd, Year=2014)
gdd_g1y14 <- apply(g1y14_final, 1, gdd, Year=2014)
# Append the GDDs accumulated from germ to flowering to the cohort data frames
g1y13_final <- data.frame(g1y13_final, GDD=gdd_g1y13)
g2y14_final <- data.frame(g2y14_final, GDD=gdd_g2y14)
g1y14_final <- data.frame(g1y14_final, GDD=gdd_g1y14)
# Make expressions for the G_Y_ names
name2013 <- c(expression(paste("G"[1], "Y"[13])))
name2013p <- c(expression(paste("G"[2], "Y"[14])))
name2014 <- c(expression(paste("G"[1], "Y"[14])))
days_to_maturity_g1y13 <- apply(g1y13_final, 1, function(x) { return(as.numeric(x["EarliestFlowPod"]) - as.numeric(x["EarliestGerm"]))})
days_to_maturity_g2y14 <- apply(g2y14_final, 1, function(x) { return(as.numeric(x["EarliestFlowPod"]) - as.numeric(x["EarliestGerm"]))})
days_to_maturity_g1y14 <- apply(g1y14_final, 1, function(x) { return(as.numeric(x["EarliestFlowPod"]) - as.numeric(x["EarliestGerm"]))})
# Save the data frames with accumulated GDD to flowering and DTF
g1y13_final <- data.frame(g1y13_final, DTF=days_to_maturity_g1y13)
g2y14_final <- data.frame(g2y14_final, DTF=days_to_maturity_g2y14)
g1y14_final <- data.frame(g1y14_final, DTF=days_to_maturity_g1y14)
save(g1y13_final, g2y14_final, g1y14_final, file="Results/RDA/Cohorts_with_GDDtoFlower.rda")
pdf(file="Results/PhenFigures/GDD_Distributions.pdf", height=6, width=6)
plot(
density(gdd_g1y13),
main="Accumulated GDD to Flowering",
xlab="Total Accumulated GDD",
ylab="Density",
xlim=c(500, 1700),
lwd=2,
lty=1)
lines(density(gdd_g2y14), col="red", lwd=2, lty=2)
lines(density(gdd_g1y14), col="red", lwd=2, lty=1)
legend(
"topright",
c(name2013, name2013p, name2014),
col=c("black", "red", "red"),
lwd=2,
lty=c(1, 2, 1))
dev.off()
pdf(file="Results/PhenFigures/DTF_Distributions.pdf", height=6, width=6)
plot(
density(days_to_maturity_g1y13),
main="Days From Germination to Flowering",
xlab="Days to Flowering",
ylab="Density",
xlim=c(20, 110),
lwd=2,
lty=1)
lines(density(days_to_maturity_g2y14), col="red", lwd=2, lty=2)
lines(density(days_to_maturity_g1y14), col="red", lwd=2, lty=1)
legend(
"topright",
c(name2013, name2013p, name2014),
col=c("black", "red", "red"),
lwd=2,
lty=c(1, 2, 1))
dev.off()
################################################################################
################################################################################
################################################################################
#
# Section 2: Mean GDD and DTF by maternal family
#
################################################################################
################################################################################
################################################################################
g1y13_mat <- list()
for(mat in unique(g1y13_final$Mat)) {
# Find where each Mat occurs in the data
matloc <- g1y13_final$Mat == mat
# Get the corresponding Locs, as characters
matloc <- as.character(g1y13_final$Loc[matloc])
# Remove NAs, append to the list
g1y13_mat[[mat]] <- matloc[!is.na(matloc)]
}
# Then, for each maternal family, get the mean GDD of the Locs
mean_gdd_g1y13 <- function(Locs) {
# Get which Locs from the 2013 data are in the supplied vector of Locs
mat_GDD_locs <- g1y13_final$Loc %in% Locs
if(all(!mat_GDD_locs)) {
return(NA)
}
# Get the sum of the corresponding NumSeeds values
GDD <- mean(g1y13_final$GDD[mat_GDD_locs])
return(GDD)
}
mean_dtf_g1y13 <- function(Locs) {
# Get which Locs from the 2013 data are in the supplied vector of Locs
mat_dtf_locs <- g1y13_final$Loc %in% Locs
if(all(!mat_dtf_locs)) {
return(NA)
}
# Get the sum of the corresponding NumSeeds values
dtf <- mean(g1y13_final$DTF[mat_dtf_locs])
return(dtf)
}
mat_GDD_g1y13 <- lapply(g1y13_mat, mean_gdd_g1y13)
mat_DTF_g1y13 <- lapply(g1y13_mat, mean_dtf_g1y13)
# Stick 2013 into a data frame
matfam_GDD_DTF_g1y13 <- data.frame(
Mat=names(mat_GDD_g1y13),
GDD_G1Y13=as.numeric(mat_GDD_g1y13),
DTF_G1Y13=as.numeric(mat_DTF_g1y13)
)
# Remove NA rows
matfam_GDD_DTF_g1y13 <- matfam_GDD_DTF_g1y13[!is.na(matfam_GDD_DTF_g1y13$Mat) & !is.na(matfam_GDD_DTF_g1y13$GDD) & !is.na(matfam_GDD_DTF_g1y13$DTF), ]
# Calculate G2Y14 maternal GDD means in the same way
mean_gdd_g2y14 <- function(Locs) {
mat_GDD_locs <- g2y14_final$Loc %in% Locs
if(all(!mat_GDD_locs)) {
return(NA)
}
GDD <- mean(g2y14_final$GDD[mat_GDD_locs])
return(GDD)
}
mean_dtf_g2y14 <- function(Locs) {
mat_dtf_locs <- g2y14_final$Loc %in% Locs
if(all(!mat_dtf_locs)) {
return(NA)
}
dtf <- mean(g2y14_final$DTF[mat_dtf_locs])
return(dtf)
}
mat_GDD_g2y14 <- lapply(g1y13_mat, mean_gdd_g2y14)
mat_DTF_g2y14 <- lapply(g1y13_mat, mean_dtf_g2y14)
matfam_GDD_DTF_g2y14 <- data.frame(
Mat=names(mat_GDD_g2y14),
GDD_G2Y14=as.numeric(mat_GDD_g2y14),
DTF_G2Y14=as.numeric(mat_DTF_g2y14)
)
# Remove NA rows
matfam_GDD_DTF_g2y14 <- matfam_GDD_DTF_g2y14[!is.na(matfam_GDD_DTF_g2y14$Mat) & !is.na(matfam_GDD_DTF_g2y14$GDD) & !is.na(matfam_GDD_DTF_g2y14$DTF), ]
# Calculate the same for G1Y14
g1y14_mat <- list()
for(mat in unique(g1y14_final$Mat)) {
# Find where each Mat occurs in the data
matloc <- g1y14_final$Mat == mat
# Get the corresponding Locs, as characters
matloc <- as.character(g1y14_final$Loc[matloc])
# Remove NAs, append to the list
g1y14_mat[[mat]] <- matloc[!is.na(matloc)]
}
mean_gdd_g1y14 <- function(Locs) {
# Get which Locs from the 2013 data are in the supplied vector of Locs
mat_GDD_locs <- g1y14_final$Loc %in% Locs
if(all(!mat_GDD_locs)) {
return(NA)
}
# Get the sum of the corresponding NumSeeds values
GDD <- mean(g1y14_final$GDD[mat_GDD_locs])
return(GDD)
}
mean_dtf_g1y14 <- function(Locs) {
# Get which Locs from the 2013 data are in the supplied vector of Locs
mat_dtf_locs <- g1y14_final$Loc %in% Locs
if(all(!mat_dtf_locs)) {
return(NA)
}
# Get the sum of the corresponding NumSeeds values
dtf <- mean(g1y14_final$DTF[mat_dtf_locs])
return(dtf)
}
mat_GDD_g1y14 <- lapply(g1y13_mat, mean_gdd_g1y14)
mat_DTF_g1y14 <- lapply(g1y13_mat, mean_dtf_g1y14)
matfam_GDD_DTF_g1y14 <- data.frame(
Mat=names(mat_GDD_g1y14),
GDD_G1Y14=as.numeric(mat_GDD_g1y14),
DTF_G1Y14=as.numeric(mat_DTF_g1y14)
)
# Remove NA rows
matfam_GDD_DTF_g1y14 <- matfam_GDD_DTF_g1y14[!is.na(matfam_GDD_DTF_g1y14$Mat) & !is.na(matfam_GDD_DTF_g1y14$GDD) & !is.na(matfam_GDD_DTF_g1y14$DTF), ]
################################################################################
################################################################################
################################################################################
#
# Section 3: Mean GDD and DTF by paternal family. Note that there is no
# paternal family information for G2Y14
#
################################################################################
################################################################################
################################################################################
g1y13_pat <- list()
for(pat in unique(g1y13_final$Pat)) {
# Find where each pat occurs in the data
patloc <- g1y13_final$Pat == pat
# Get the corresponding Locs, as characters
patloc <- as.character(g1y13_final$Loc[patloc])
# Remove NAs, append to the list
g1y13_pat[[pat]] <- patloc[!is.na(patloc)]
}
# Then, for each paternal family, get the mean GDD of the Locs
mean_gdd_g1y13 <- function(Locs) {
# Get which Locs from the 2013 data are in the supplied vector of Locs
pat_GDD_locs <- g1y13_final$Loc %in% Locs
if(all(!pat_GDD_locs)) {
return(NA)
}
# Get the sum of the corresponding NumSeeds values
GDD <- mean(g1y13_final$GDD[pat_GDD_locs])
return(GDD)
}
mean_dtf_g1y13 <- function(Locs) {
# Get which Locs from the 2013 data are in the supplied vector of Locs
pat_dtf_locs <- g1y13_final$Loc %in% Locs
if(all(!pat_dtf_locs)) {
return(NA)
}
# Get the sum of the corresponding NumSeeds values
dtf <- mean(g1y13_final$DTF[pat_dtf_locs])
return(dtf)
}
pat_GDD_g1y13 <- lapply(g1y13_pat, mean_gdd_g1y13)
pat_DTF_g1y13 <- lapply(g1y13_pat, mean_dtf_g1y13)
# Stick 2013 into a data frame
patfam_GDD_DTF_g1y13 <- data.frame(
Pat=names(pat_GDD_g1y13),
GDD_G1Y13=as.numeric(pat_GDD_g1y13),
DTF_G1Y13=as.numeric(pat_DTF_g1y13)
)
# Remove NA rows
patfam_GDD_DTF_g1y13 <- patfam_GDD_DTF_g1y13[!is.na(patfam_GDD_DTF_g1y13$Pat) & !is.na(patfam_GDD_DTF_g1y13$GDD) & !is.na(patfam_GDD_DTF_g1y13$DTF), ]
# Calculate the same for G1Y14
g1y14_pat <- list()
for(pat in unique(g1y14_final$Pat)) {
# Find where each pat occurs in the data
patloc <- g1y14_final$Pat == pat
# Get the corresponding Locs, as characters
patloc <- as.character(g1y14_final$Loc[patloc])
# Remove NAs, append to the list
g1y14_pat[[pat]] <- patloc[!is.na(patloc)]
}
mean_gdd_g1y14 <- function(Locs) {
# Get which Locs from the 2013 data are in the supplied vector of Locs
pat_GDD_locs <- g1y14_final$Loc %in% Locs
if(all(!pat_GDD_locs)) {
return(NA)
}
# Get the sum of the corresponding NumSeeds values
GDD <- mean(g1y14_final$GDD[pat_GDD_locs])
return(GDD)
}
mean_dtf_g1y14 <- function(Locs) {
# Get which Locs from the 2013 data are in the supplied vector of Locs
pat_dtf_locs <- g1y14_final$Loc %in% Locs
if(all(!pat_dtf_locs)) {
return(NA)
}
# Get the sum of the corresponding NumSeeds values
dtf <- mean(g1y14_final$DTF[pat_dtf_locs])
return(dtf)
}
pat_GDD_g1y14 <- lapply(g1y13_pat, mean_gdd_g1y14)
pat_DTF_g1y14 <- lapply(g1y13_pat, mean_dtf_g1y14)
patfam_GDD_DTF_g1y14 <- data.frame(
Pat=names(pat_GDD_g1y14),
GDD_G1Y14=as.numeric(pat_GDD_g1y14),
DTF_G1Y14=as.numeric(pat_DTF_g1y14)
)
# Remove NA rows
patfam_GDD_DTF_g1y14 <- patfam_GDD_DTF_g1y14[!is.na(patfam_GDD_DTF_g1y14$Pat) & !is.na(patfam_GDD_DTF_g1y14$GDD) & !is.na(patfam_GDD_DTF_g1y14$DTF), ]
################################################################################
################################################################################
################################################################################
#
# Section 4: Print the numbers for the table
#
################################################################################
################################################################################
################################################################################
# Make a data frame so that the table prints nicely
tabledat <- data.frame(
G1Y13=c(
round(mean(g1y13_final$GDD, na.rm=TRUE), 1),
round(sd(g1y13_final$GDD, na.rm=TRUE), 1),
round(mean(matfam_GDD_DTF_g1y13$GDD_G1Y13, na.rm=TRUE), 1),
round(sd(matfam_GDD_DTF_g1y13$GDD_G1Y13, na.rm=TRUE), 1),
round(mean(patfam_GDD_DTF_g1y13$GDD_G1Y13, na.rm=TRUE), 1),
round(sd(patfam_GDD_DTF_g1y13$GDD_G1Y13, na.rm=TRUE), 1),
round(mean(g1y13_final$DTF, na.rm=TRUE), 1),
round(sd(g1y13_final$DTF, na.rm=TRUE), 1),
round(mean(matfam_GDD_DTF_g1y13$DTF_G1Y13, na.rm=TRUE), 1),
round(sd(matfam_GDD_DTF_g1y13$DTF_G1Y13, na.rm=TRUE), 1),
round(mean(patfam_GDD_DTF_g1y13$DTF_G1Y13, na.rm=TRUE), 1),
round(sd(patfam_GDD_DTF_g1y13$DTF_G1Y13, na.rm=TRUE), 1)
),
G2Y14=c(
round(mean(g2y14_final$GDD, na.rm=TRUE), 1),
round(sd(g2y14_final$GDD, na.rm=TRUE), 1),
round(mean(matfam_GDD_DTF_g2y14$GDD_G2Y14, na.rm=TRUE), 1),
round(sd(matfam_GDD_DTF_g2y14$GDD_G2Y14, na.rm=TRUE), 1),
"NA",
"NA",
round(mean(g2y14_final$DTF, na.rm=TRUE), 1),
round(sd(g2y14_final$DTF, na.rm=TRUE), 1),
round(mean(matfam_GDD_DTF_g2y14$DTF_G2Y14, na.rm=TRUE), 1),
round(sd(matfam_GDD_DTF_g2y14$DTF_G2Y14, na.rm=TRUE), 1),
"NA",
"NA"
),
G1Y14=c(
round(mean(g1y14_final$GDD, na.rm=TRUE), 1),
round(sd(g1y14_final$GDD, na.rm=TRUE), 1),
round(mean(matfam_GDD_DTF_g1y14$GDD_G1Y14, na.rm=TRUE), 1),
round(sd(matfam_GDD_DTF_g1y14$GDD_G1Y14, na.rm=TRUE), 1),
round(mean(patfam_GDD_DTF_g1y14$GDD_G1Y14, na.rm=TRUE), 1),
round(sd(patfam_GDD_DTF_g1y14$GDD_G1Y14, na.rm=TRUE), 1),
round(mean(g1y14_final$DTF, na.rm=TRUE), 1),
round(sd(g1y14_final$DTF, na.rm=TRUE), 1),
round(mean(matfam_GDD_DTF_g1y14$DTF_G1Y14, na.rm=TRUE), 1),
round(sd(matfam_GDD_DTF_g1y14$DTF_G1Y14, na.rm=TRUE), 1),
round(mean(patfam_GDD_DTF_g1y14$DTF_G1Y14, na.rm=TRUE), 1),
round(sd(patfam_GDD_DTF_g1y14$DTF_G1Y14, na.rm=TRUE), 1)
)
)
rownames(tabledat) <- c(
"Pop Mean GDD",
"Pop SD GDD",
"Mean Mat GDD",
"SD Mat GDD",
"Mean Pat GDD",
"SD Pat GDD",
"Pop Mean DTF",
"Pop SD DTF",
"Mean Mat DTF",
"SD Mat DTF",
"Mean Pat DTF",
"SD Pat DTF"
)
print(tabledat)
|
/Scripts/Calculate_DTF_GDD.R
|
no_license
|
Nashobahomma/Nashoba_Kono_Phenology
|
R
| false
| false
| 16,794
|
r
|
# Calculate the accumulated growing degree days (GDD) and days to flowering
# (DTF) for the C. f. phenology project. This script uses Julian dates of
# earliest germinant (EG) and earliest flower/pod (DF) and daily temperatures
# from PRISM for 2013 and 2014 at Shakopee, MN to calculate GDD. The base
# temperature is 10C, which is a value used to model soybean growth in MN.
# TK and AN
# Input: PhenMod_G.rda and PRISM daily min/max temps
# Output: Per-loc GDD and DTF values and distributions of GDD and DTF
# Requires: PhenMod_G
setwd("/Users/tomkono/Dropbox/GitHub/Nashoba_Kono_Phenology")
#setwd("/Users/amber-nashoba/Dropbox/GitHubRepositories/Nashoba_Kono_Phenology")
load("Results/RDA/PhenMod_G.rda")
################################################################################
################################################################################
################################################################################
#
# Section 1: Calculation of GDD and DTF for each Loc in each cohort
#
################################################################################
################################################################################
################################################################################
# Read the daily temperatures at SCG
temp_daily <- read.csv("Data/SCG_PRISM_Temp_Daily_MinMax.csv", header=TRUE)
# Then, define a function that will claculate growing degree days. We use a
# base temperature of 10C (50F), as is done for soybean.
gdd <- function(Loc, Year) {
t_base <- 10
# Turn the Julian date into a month/day/year date for lookup in the
# PRISM daily temperatures data.
# First, define an "origin" - this is Julian day 0. It is Dec 31 of the
# previous year.
origin <- paste(Year-1, '12-31', sep="-")
# Then, convert the Julian day numbers into m/d/y date format for lookup
# This is SUPER gross and ugly - here's a breakdown, from inside-out:
# - as.numeric, since Date objects just have numbers
# - as.Date to convert from Julian date to Y-M-D format
# - as.character so that we can split it and get month and day info
# - strsplit to separate month and day
# - unlist, since strsplit returns a list
# - as.numeric to remove leading zeroes from month and day
germ <- as.numeric(unlist(strsplit(as.character(as.Date(as.numeric(Loc["EarliestGerm"]),origin=origin)), split="-")))
flowpod <- as.numeric(unlist(strsplit(as.character(as.Date(as.numeric(Loc["EarliestFlowPod"]),origin=origin)), split="-")))
# The year is the first element in the converted dates. We need to take
# it from four digits down to two.
y <- substr(germ[1], 3, 4)
# then make the appropriate m/d/y string to lookup
germ <- paste(germ[2], germ[3], y, sep="/")
flowpod <- paste(flowpod[2], flowpod[3], y, sep="/")
# Then, get the date range from germ to flowering. We assume the data are
# in order from early to late
germ_day <- which(temp_daily$Date == germ)
fp_day <- which(temp_daily$Date == flowpod)
# Then, returnt he accumulated GDDs over the period from germination to
# flowering.
acc_gdds <- 0
for(i in germ_day:fp_day) {
mean_temp <- mean(temp_daily$T_Max[i], temp_daily$T_Min[i])
gdd_day <- mean_temp - t_base
acc_gdds <- acc_gdds + gdd_day
}
return(acc_gdds)
}
gdd_g1y13 <- apply(g1y13_final, 1, gdd, Year=2013)
gdd_g2y14 <- apply(g2y14_final, 1, gdd, Year=2014)
gdd_g1y14 <- apply(g1y14_final, 1, gdd, Year=2014)
# Append the GDDs accumulated from germ to flowering to the cohort data frames
g1y13_final <- data.frame(g1y13_final, GDD=gdd_g1y13)
g2y14_final <- data.frame(g2y14_final, GDD=gdd_g2y14)
g1y14_final <- data.frame(g1y14_final, GDD=gdd_g1y14)
# Make expressions for the G_Y_ names
name2013 <- c(expression(paste("G"[1], "Y"[13])))
name2013p <- c(expression(paste("G"[2], "Y"[14])))
name2014 <- c(expression(paste("G"[1], "Y"[14])))
days_to_maturity_g1y13 <- apply(g1y13_final, 1, function(x) { return(as.numeric(x["EarliestFlowPod"]) - as.numeric(x["EarliestGerm"]))})
days_to_maturity_g2y14 <- apply(g2y14_final, 1, function(x) { return(as.numeric(x["EarliestFlowPod"]) - as.numeric(x["EarliestGerm"]))})
days_to_maturity_g1y14 <- apply(g1y14_final, 1, function(x) { return(as.numeric(x["EarliestFlowPod"]) - as.numeric(x["EarliestGerm"]))})
# Save the data frames with accumulated GDD to flowering and DTF
g1y13_final <- data.frame(g1y13_final, DTF=days_to_maturity_g1y13)
g2y14_final <- data.frame(g2y14_final, DTF=days_to_maturity_g2y14)
g1y14_final <- data.frame(g1y14_final, DTF=days_to_maturity_g1y14)
save(g1y13_final, g2y14_final, g1y14_final, file="Results/RDA/Cohorts_with_GDDtoFlower.rda")
pdf(file="Results/PhenFigures/GDD_Distributions.pdf", height=6, width=6)
plot(
density(gdd_g1y13),
main="Accumulated GDD to Flowering",
xlab="Total Accumulated GDD",
ylab="Density",
xlim=c(500, 1700),
lwd=2,
lty=1)
lines(density(gdd_g2y14), col="red", lwd=2, lty=2)
lines(density(gdd_g1y14), col="red", lwd=2, lty=1)
legend(
"topright",
c(name2013, name2013p, name2014),
col=c("black", "red", "red"),
lwd=2,
lty=c(1, 2, 1))
dev.off()
pdf(file="Results/PhenFigures/DTF_Distributions.pdf", height=6, width=6)
plot(
density(days_to_maturity_g1y13),
main="Days From Germination to Flowering",
xlab="Days to Flowering",
ylab="Density",
xlim=c(20, 110),
lwd=2,
lty=1)
lines(density(days_to_maturity_g2y14), col="red", lwd=2, lty=2)
lines(density(days_to_maturity_g1y14), col="red", lwd=2, lty=1)
legend(
"topright",
c(name2013, name2013p, name2014),
col=c("black", "red", "red"),
lwd=2,
lty=c(1, 2, 1))
dev.off()
################################################################################
################################################################################
################################################################################
#
# Section 2: Mean GDD and DTF by maternal family
#
################################################################################
################################################################################
################################################################################
g1y13_mat <- list()
for(mat in unique(g1y13_final$Mat)) {
# Find where each Mat occurs in the data
matloc <- g1y13_final$Mat == mat
# Get the corresponding Locs, as characters
matloc <- as.character(g1y13_final$Loc[matloc])
# Remove NAs, append to the list
g1y13_mat[[mat]] <- matloc[!is.na(matloc)]
}
# Then, for each maternal family, get the mean GDD of the Locs
mean_gdd_g1y13 <- function(Locs) {
# Get which Locs from the 2013 data are in the supplied vector of Locs
mat_GDD_locs <- g1y13_final$Loc %in% Locs
if(all(!mat_GDD_locs)) {
return(NA)
}
# Get the sum of the corresponding NumSeeds values
GDD <- mean(g1y13_final$GDD[mat_GDD_locs])
return(GDD)
}
mean_dtf_g1y13 <- function(Locs) {
# Get which Locs from the 2013 data are in the supplied vector of Locs
mat_dtf_locs <- g1y13_final$Loc %in% Locs
if(all(!mat_dtf_locs)) {
return(NA)
}
# Get the sum of the corresponding NumSeeds values
dtf <- mean(g1y13_final$DTF[mat_dtf_locs])
return(dtf)
}
mat_GDD_g1y13 <- lapply(g1y13_mat, mean_gdd_g1y13)
mat_DTF_g1y13 <- lapply(g1y13_mat, mean_dtf_g1y13)
# Stick 2013 into a data frame
matfam_GDD_DTF_g1y13 <- data.frame(
Mat=names(mat_GDD_g1y13),
GDD_G1Y13=as.numeric(mat_GDD_g1y13),
DTF_G1Y13=as.numeric(mat_DTF_g1y13)
)
# Remove NA rows
matfam_GDD_DTF_g1y13 <- matfam_GDD_DTF_g1y13[!is.na(matfam_GDD_DTF_g1y13$Mat) & !is.na(matfam_GDD_DTF_g1y13$GDD) & !is.na(matfam_GDD_DTF_g1y13$DTF), ]
# Calculate G2Y14 maternal GDD means in the same way
mean_gdd_g2y14 <- function(Locs) {
mat_GDD_locs <- g2y14_final$Loc %in% Locs
if(all(!mat_GDD_locs)) {
return(NA)
}
GDD <- mean(g2y14_final$GDD[mat_GDD_locs])
return(GDD)
}
mean_dtf_g2y14 <- function(Locs) {
mat_dtf_locs <- g2y14_final$Loc %in% Locs
if(all(!mat_dtf_locs)) {
return(NA)
}
dtf <- mean(g2y14_final$DTF[mat_dtf_locs])
return(dtf)
}
mat_GDD_g2y14 <- lapply(g1y13_mat, mean_gdd_g2y14)
mat_DTF_g2y14 <- lapply(g1y13_mat, mean_dtf_g2y14)
matfam_GDD_DTF_g2y14 <- data.frame(
Mat=names(mat_GDD_g2y14),
GDD_G2Y14=as.numeric(mat_GDD_g2y14),
DTF_G2Y14=as.numeric(mat_DTF_g2y14)
)
# Remove NA rows
matfam_GDD_DTF_g2y14 <- matfam_GDD_DTF_g2y14[!is.na(matfam_GDD_DTF_g2y14$Mat) & !is.na(matfam_GDD_DTF_g2y14$GDD) & !is.na(matfam_GDD_DTF_g2y14$DTF), ]
# Calculate the same for G1Y14
g1y14_mat <- list()
for(mat in unique(g1y14_final$Mat)) {
# Find where each Mat occurs in the data
matloc <- g1y14_final$Mat == mat
# Get the corresponding Locs, as characters
matloc <- as.character(g1y14_final$Loc[matloc])
# Remove NAs, append to the list
g1y14_mat[[mat]] <- matloc[!is.na(matloc)]
}
mean_gdd_g1y14 <- function(Locs) {
# Get which Locs from the 2013 data are in the supplied vector of Locs
mat_GDD_locs <- g1y14_final$Loc %in% Locs
if(all(!mat_GDD_locs)) {
return(NA)
}
# Get the sum of the corresponding NumSeeds values
GDD <- mean(g1y14_final$GDD[mat_GDD_locs])
return(GDD)
}
mean_dtf_g1y14 <- function(Locs) {
# Get which Locs from the 2013 data are in the supplied vector of Locs
mat_dtf_locs <- g1y14_final$Loc %in% Locs
if(all(!mat_dtf_locs)) {
return(NA)
}
# Get the sum of the corresponding NumSeeds values
dtf <- mean(g1y14_final$DTF[mat_dtf_locs])
return(dtf)
}
mat_GDD_g1y14 <- lapply(g1y13_mat, mean_gdd_g1y14)
mat_DTF_g1y14 <- lapply(g1y13_mat, mean_dtf_g1y14)
matfam_GDD_DTF_g1y14 <- data.frame(
Mat=names(mat_GDD_g1y14),
GDD_G1Y14=as.numeric(mat_GDD_g1y14),
DTF_G1Y14=as.numeric(mat_DTF_g1y14)
)
# Remove NA rows
matfam_GDD_DTF_g1y14 <- matfam_GDD_DTF_g1y14[!is.na(matfam_GDD_DTF_g1y14$Mat) & !is.na(matfam_GDD_DTF_g1y14$GDD) & !is.na(matfam_GDD_DTF_g1y14$DTF), ]
################################################################################
################################################################################
################################################################################
#
# Section 3: Mean GDD and DTF by paternal family. Note that there is no
# paternal family information for G2Y14
#
################################################################################
################################################################################
################################################################################
g1y13_pat <- list()
for(pat in unique(g1y13_final$Pat)) {
# Find where each pat occurs in the data
patloc <- g1y13_final$Pat == pat
# Get the corresponding Locs, as characters
patloc <- as.character(g1y13_final$Loc[patloc])
# Remove NAs, append to the list
g1y13_pat[[pat]] <- patloc[!is.na(patloc)]
}
# Then, for each paternal family, get the mean GDD of the Locs
mean_gdd_g1y13 <- function(Locs) {
# Get which Locs from the 2013 data are in the supplied vector of Locs
pat_GDD_locs <- g1y13_final$Loc %in% Locs
if(all(!pat_GDD_locs)) {
return(NA)
}
# Get the sum of the corresponding NumSeeds values
GDD <- mean(g1y13_final$GDD[pat_GDD_locs])
return(GDD)
}
mean_dtf_g1y13 <- function(Locs) {
# Get which Locs from the 2013 data are in the supplied vector of Locs
pat_dtf_locs <- g1y13_final$Loc %in% Locs
if(all(!pat_dtf_locs)) {
return(NA)
}
# Get the sum of the corresponding NumSeeds values
dtf <- mean(g1y13_final$DTF[pat_dtf_locs])
return(dtf)
}
pat_GDD_g1y13 <- lapply(g1y13_pat, mean_gdd_g1y13)
pat_DTF_g1y13 <- lapply(g1y13_pat, mean_dtf_g1y13)
# Stick 2013 into a data frame
patfam_GDD_DTF_g1y13 <- data.frame(
Pat=names(pat_GDD_g1y13),
GDD_G1Y13=as.numeric(pat_GDD_g1y13),
DTF_G1Y13=as.numeric(pat_DTF_g1y13)
)
# Remove NA rows
patfam_GDD_DTF_g1y13 <- patfam_GDD_DTF_g1y13[!is.na(patfam_GDD_DTF_g1y13$Pat) & !is.na(patfam_GDD_DTF_g1y13$GDD) & !is.na(patfam_GDD_DTF_g1y13$DTF), ]
# Calculate the same for G1Y14
g1y14_pat <- list()
for(pat in unique(g1y14_final$Pat)) {
# Find where each pat occurs in the data
patloc <- g1y14_final$Pat == pat
# Get the corresponding Locs, as characters
patloc <- as.character(g1y14_final$Loc[patloc])
# Remove NAs, append to the list
g1y14_pat[[pat]] <- patloc[!is.na(patloc)]
}
mean_gdd_g1y14 <- function(Locs) {
# Get which Locs from the 2013 data are in the supplied vector of Locs
pat_GDD_locs <- g1y14_final$Loc %in% Locs
if(all(!pat_GDD_locs)) {
return(NA)
}
# Get the sum of the corresponding NumSeeds values
GDD <- mean(g1y14_final$GDD[pat_GDD_locs])
return(GDD)
}
mean_dtf_g1y14 <- function(Locs) {
# Get which Locs from the 2013 data are in the supplied vector of Locs
pat_dtf_locs <- g1y14_final$Loc %in% Locs
if(all(!pat_dtf_locs)) {
return(NA)
}
# Get the sum of the corresponding NumSeeds values
dtf <- mean(g1y14_final$DTF[pat_dtf_locs])
return(dtf)
}
pat_GDD_g1y14 <- lapply(g1y13_pat, mean_gdd_g1y14)
pat_DTF_g1y14 <- lapply(g1y13_pat, mean_dtf_g1y14)
patfam_GDD_DTF_g1y14 <- data.frame(
Pat=names(pat_GDD_g1y14),
GDD_G1Y14=as.numeric(pat_GDD_g1y14),
DTF_G1Y14=as.numeric(pat_DTF_g1y14)
)
# Remove NA rows
patfam_GDD_DTF_g1y14 <- patfam_GDD_DTF_g1y14[!is.na(patfam_GDD_DTF_g1y14$Pat) & !is.na(patfam_GDD_DTF_g1y14$GDD) & !is.na(patfam_GDD_DTF_g1y14$DTF), ]
################################################################################
################################################################################
################################################################################
#
# Section 4: Print the numbers for the table
#
################################################################################
################################################################################
################################################################################
# Make a data frame so that the table prints nicely
tabledat <- data.frame(
G1Y13=c(
round(mean(g1y13_final$GDD, na.rm=TRUE), 1),
round(sd(g1y13_final$GDD, na.rm=TRUE), 1),
round(mean(matfam_GDD_DTF_g1y13$GDD_G1Y13, na.rm=TRUE), 1),
round(sd(matfam_GDD_DTF_g1y13$GDD_G1Y13, na.rm=TRUE), 1),
round(mean(patfam_GDD_DTF_g1y13$GDD_G1Y13, na.rm=TRUE), 1),
round(sd(patfam_GDD_DTF_g1y13$GDD_G1Y13, na.rm=TRUE), 1),
round(mean(g1y13_final$DTF, na.rm=TRUE), 1),
round(sd(g1y13_final$DTF, na.rm=TRUE), 1),
round(mean(matfam_GDD_DTF_g1y13$DTF_G1Y13, na.rm=TRUE), 1),
round(sd(matfam_GDD_DTF_g1y13$DTF_G1Y13, na.rm=TRUE), 1),
round(mean(patfam_GDD_DTF_g1y13$DTF_G1Y13, na.rm=TRUE), 1),
round(sd(patfam_GDD_DTF_g1y13$DTF_G1Y13, na.rm=TRUE), 1)
),
G2Y14=c(
round(mean(g2y14_final$GDD, na.rm=TRUE), 1),
round(sd(g2y14_final$GDD, na.rm=TRUE), 1),
round(mean(matfam_GDD_DTF_g2y14$GDD_G2Y14, na.rm=TRUE), 1),
round(sd(matfam_GDD_DTF_g2y14$GDD_G2Y14, na.rm=TRUE), 1),
"NA",
"NA",
round(mean(g2y14_final$DTF, na.rm=TRUE), 1),
round(sd(g2y14_final$DTF, na.rm=TRUE), 1),
round(mean(matfam_GDD_DTF_g2y14$DTF_G2Y14, na.rm=TRUE), 1),
round(sd(matfam_GDD_DTF_g2y14$DTF_G2Y14, na.rm=TRUE), 1),
"NA",
"NA"
),
G1Y14=c(
round(mean(g1y14_final$GDD, na.rm=TRUE), 1),
round(sd(g1y14_final$GDD, na.rm=TRUE), 1),
round(mean(matfam_GDD_DTF_g1y14$GDD_G1Y14, na.rm=TRUE), 1),
round(sd(matfam_GDD_DTF_g1y14$GDD_G1Y14, na.rm=TRUE), 1),
round(mean(patfam_GDD_DTF_g1y14$GDD_G1Y14, na.rm=TRUE), 1),
round(sd(patfam_GDD_DTF_g1y14$GDD_G1Y14, na.rm=TRUE), 1),
round(mean(g1y14_final$DTF, na.rm=TRUE), 1),
round(sd(g1y14_final$DTF, na.rm=TRUE), 1),
round(mean(matfam_GDD_DTF_g1y14$DTF_G1Y14, na.rm=TRUE), 1),
round(sd(matfam_GDD_DTF_g1y14$DTF_G1Y14, na.rm=TRUE), 1),
round(mean(patfam_GDD_DTF_g1y14$DTF_G1Y14, na.rm=TRUE), 1),
round(sd(patfam_GDD_DTF_g1y14$DTF_G1Y14, na.rm=TRUE), 1)
)
)
rownames(tabledat) <- c(
"Pop Mean GDD",
"Pop SD GDD",
"Mean Mat GDD",
"SD Mat GDD",
"Mean Pat GDD",
"SD Pat GDD",
"Pop Mean DTF",
"Pop SD DTF",
"Mean Mat DTF",
"SD Mat DTF",
"Mean Pat DTF",
"SD Pat DTF"
)
print(tabledat)
|
#' Create a condition object
#'
#' These constructors make it easy to create subclassed conditions.
#' Conditions are objects that power the error system in R. They can
#' also be used for passing messages to pre-established handlers.
#'
#' `cnd()` creates objects inheriting from `condition`. Conditions
#' created with `error_cnd()`, `warning_cnd()` and `message_cnd()`
#' inherit from `error`, `warning` or `message`.
#'
#' @section Lifecycle:
#'
#' The `.type` and `.msg` arguments have been renamed to `.subclass`
#' and `message`. They are deprecated as of rlang 0.3.0.
#'
#' @param .subclass The condition subclass.
#' @param ... Named data fields stored inside the condition
#' object. These dots are evaluated with [explicit
#' splicing][tidy-dots].
#' @param message A default message to inform the user about the
#' condition when it is signalled.
#' @param trace A `trace` object created by [trace_back()].
#' @param parent A parent condition object created by [abort()].
#' @seealso [cnd_signal()], [with_handlers()].
#' @export
#' @examples
#' # Create a condition inheriting from the s3 type "foo":
#' cnd <- cnd("foo")
#'
#' # Signal the condition to potential handlers. Since this is a bare
#' # condition the signal has no effect if no handlers are set up:
#' cnd_signal(cnd)
#'
#' # When a relevant handler is set up, the signal causes the handler
#' # to be called:
#' with_handlers(cnd_signal(cnd), foo = exiting(function(c) "caught!"))
#'
#' # Handlers can be thrown or executed inplace. See with_handlers()
#' # documentation for more on this.
#'
#' # Signalling an error condition aborts the current computation:
#' err <- error_cnd("foo", message = "I am an error")
#' try(cnd_signal(err))
cnd <- function(.subclass, ..., message = "") {
if (missing(.subclass)) {
abort("Bare conditions must be subclassed")
}
.Call(rlang_new_condition, .subclass, message, dots_list(...))
}
#' @rdname cnd
#' @export
error_cnd <- function(.subclass = NULL,
...,
message = "",
trace = NULL,
parent = NULL) {
if (!is_null(trace) && !inherits(trace, "rlang_trace")) {
abort("`trace` must be NULL or an rlang backtrace")
}
if (!is_null(parent) && !inherits(parent, "condition")) {
abort("`parent` must be NULL or a condition object")
}
fields <- dots_list(trace = trace, parent = parent, ...)
.Call(rlang_new_condition, c(.subclass, "rlang_error", "error"), message, fields)
}
#' @rdname cnd
#' @export
warning_cnd <- function(.subclass = NULL, ..., message = "") {
.Call(rlang_new_condition, c(.subclass, "warning"), message, dots_list(...))
}
#' @rdname cnd
#' @export
message_cnd <- function(.subclass = NULL, ..., message = "") {
.Call(rlang_new_condition, c(.subclass, "message"), message, dots_list(...))
}
#' Is object a condition?
#' @param x An object to test.
#' @export
is_condition <- function(x) {
inherits(x, "condition")
}
#' What type is a condition?
#'
#' Use `cnd_type()` to check what type a condition is.
#'
#' @param cnd A condition object.
#' @return A string, either `"condition"`, `"message"`, `"warning"`,
#' `"error"` or `"interrupt"`.
#' @export
#' @examples
#' cnd_type(catch_cnd(abort("Abort!")))
#' cnd_type(catch_cnd(interrupt()))
cnd_type <- function(cnd) {
.Call(rlang_cnd_type, cnd)
}
#' Signal a condition object
#'
#' @description
#'
#' The type of signal depends on the class of the condition:
#'
#' * A message is signalled if the condition inherits from
#' `"message"`. This is equivalent to signalling with [inform()] or
#' [base::message()].
#'
#' * A warning is signalled if the condition inherits from
#' `"warning"`. This is equivalent to signalling with [warn()] or
#' [base::warning()].
#'
#' * An error is signalled if the condition inherits from
#' `"error"`. This is equivalent to signalling with [abort()] or
#' [base::stop()].
#'
#' * An interrupt is signalled if the condition inherits from
#' `"interrupt"`. This is equivalent to signalling with
#' [interrupt()].
#'
#' Use [cnd_type()] to determine the type of a condition.
#'
#'
#' @section Lifecycle:
#'
#' * `.cnd` has been renamed to `cnd` and is deprecated as of rlang 0.3.0.
#'
#' * The `.mufflable` argument is deprecated as of rlang 0.3.0 and no
#' longer has any effect. Non-critical conditions are always
#' signalled with a muffle restart.
#'
#' * Creating a condition object with [cnd_signal()] is deprecated as
#' of rlang 0.3.0. Please use [signal()] instead.
#'
#' @param cnd A condition object (see [cnd()]).
#' @param .cnd,.mufflable These arguments are deprecated.
#' @seealso [abort()], [warn()] and [inform()] for creating and
#' signalling structured R conditions. See [with_handlers()] for
#' establishing condition handlers.
#' @export
#' @examples
#' # The type of signal depends on the class. If the condition
#' # inherits from "warning", a warning is issued:
#' cnd <- warning_cnd("my_warning_class", message = "This is a warning")
#' cnd_signal(cnd)
#'
#' # If it inherits from "error", an error is raised:
#' cnd <- error_cnd("my_error_class", message = "This is an error")
#' try(cnd_signal(cnd))
cnd_signal <- function(cnd, .cnd, .mufflable) {
validate_cnd_signal_args(cnd, .cnd, .mufflable)
invisible(.Call(rlang_cnd_signal, cnd))
}
validate_cnd_signal_args <- function(cnd, .cnd, .mufflable,
env = parent.frame()) {
if (is_character(cnd)) {
warn_deprecated(paste_line(
"Creating a condition with `cnd_signal()` is deprecated as of rlang 0.3.0.",
"Please use `signal()` instead."
))
env$cnd <- cnd(cnd)
}
if (!missing(.cnd)) {
warn_deprecated(paste_line(
"The `.cnd` argument is deprecated as of rlang 0.3.0.",
"Please use `cnd` instead."
))
if (is_character(.cnd)) {
warn_deprecated(paste_line(
"Creating a condition with `cnd_signal()` is deprecated as of rlang 0.3.0.",
"Please use `signal()` instead."
))
.cnd <- cnd(.cnd)
}
env$cnd <- .cnd
}
if (!missing(.mufflable)) {
warn_deprecated(
"`.mufflable` is deprecated as of rlang 0.3.0 and no longer has any effect"
)
}
}
#' Signal an error, warning, or message
#'
#' @description
#'
#' These functions are equivalent to base functions [base::stop()],
#' [base::warning()] and [base::message()], but make it easy to supply
#' condition metadata:
#'
#' * Supply `.subclass` to create a classed condition. Typed
#' conditions can be captured or handled selectively, allowing for
#' finer-grained error handling.
#'
#' * Supply metadata with named `...` arguments. This data will be
#' stored in the condition object and can be examined by handlers.
#'
#' `interrupt()` allows R code to simulate a user interrupt of the
#' kind that is signalled with `Ctrl-C`. It is currently not possible
#' to create custom interrupt condition objects.
#'
#'
#' @section Backtrace:
#'
#' Unlike `stop()` and `warning()`, these functions don't include call
#' information by default. This saves you from typing `call. = FALSE`
#' and produces cleaner error messages.
#'
#' A backtrace is always saved into error objects. You can print a
#' simplified backtrace of the last error by calling [last_error()]
#' and a full backtrace with `summary(last_error())`.
#'
#' You can also display a backtrace with the error message by setting
#' the option `rlang_backtrace_on_error`. It supports the following
#' values:
#'
#' * `"reminder"`: Invite users to call `rlang::last_error()` to see a
#' backtrace.
#' * `"branch"`: Display a simplified backtrace.
#' * `"collapse"`: Display a collapsed backtrace tree.
#' * `"full"`: Display a full backtrace tree.
#' * `"none"`: Display nothing.
#'
#' @section Mufflable conditions:
#'
#' Signalling a condition with `inform()` or `warn()` causes a message
#' to be displayed in the console. These messages can be muffled with
#' [base::suppressMessages()] or [base::suppressWarnings()].
#'
#' On recent R versions (>= R 3.5.0), interrupts are typically
#' signalled with a `"resume"` restart. This is however not
#' guaranteed.
#'
#'
#' @section Lifecycle:
#'
#' These functions were changed in rlang 0.3.0 to take condition
#' metadata with `...`. Consequently:
#'
#' * All arguments were renamed to be prefixed with a dot, except for
#' `type` which was renamed to `.subclass`.
#' * `.call` (previously `call`) can no longer be passed positionally.
#'
#' @inheritParams cnd
#' @param message The message to display.
#' @param .subclass Subclass of the condition. This allows your users
#' to selectively handle the conditions signalled by your functions.
#' @param ... Additional data to be stored in the condition object.
#' @param call Defunct as of rlang 0.4.0. Storing the full
#' backtrace is now preferred to storing a simple call.
#' @param msg,type These arguments were renamed to `message` and
#' `.subclass` and are defunct as of rlang 0.4.0.
#'
#' @seealso [with_abort()] to convert all errors to rlang errors.
#' @examples
#' # These examples are guarded to avoid throwing errors
#' if (FALSE) {
#'
#' # Signal an error with a message just like stop():
#' abort("Something bad happened")
#'
#' # Give a class to the error:
#' abort("Something bad happened", "somepkg_bad_error")
#'
#' # This will allow your users to handle the error selectively
#' tryCatch(
#' somepkg_function(),
#' somepkg_bad_error = function(err) {
#' warn(err$message) # Demote the error to a warning
#' NA # Return an alternative value
#' }
#' )
#'
#' # You can also specify metadata that will be stored in the condition:
#' abort("Something bad happened", "somepkg_bad_error", data = 1:10)
#'
#' # This data can then be consulted by user handlers:
#' tryCatch(
#' somepkg_function(),
#' somepkg_bad_error = function(err) {
#' # Compute an alternative return value with the data:
#' recover_error(err$data)
#' }
#' )
#'
#' # If you call low-level APIs it is good practice to catch technical
#' # errors and rethrow them with a more meaningful message. Pass on
#' # the caught error as `parent` to get a nice decomposition of
#' # errors and backtraces:
#' file <- "http://foo.bar/baz"
#' tryCatch(
#' download(file),
#' error = function(err) {
#' msg <- sprintf("Can't download `%s`", file)
#' abort(msg, parent = err)
#' })
#'
#' # Unhandled errors are saved automatically by `abort()` and can be
#' # retrieved with `last_error()`. The error prints with a simplified
#' # backtrace:
#' abort("Saved error?")
#' last_error()
#'
#' # Use `summary()` to print the full backtrace and the condition fields:
#' summary(last_error())
#'
#' }
#' @export
abort <- function(message, .subclass = NULL,
...,
trace = NULL,
call = NULL,
parent = NULL,
msg, type) {
validate_signal_args(msg, type, call)
if (is_null(trace) && is_null(peek_option("rlang__disable_trace_capture"))) {
# Prevents infloops when rlang throws during trace capture
scoped_options("rlang__disable_trace_capture" = TRUE)
trace <- trace_back()
if (is_null(parent)) {
context <- trace_length(trace)
} else {
context <- find_capture_context()
}
trace <- trace_trim_context(trace, context)
}
# Only collapse lengthy vectors because `paste0()` removes the class
# of glue strings
if (length(message) > 1) {
message <- paste0(message, collapse = "\n")
}
cnd <- error_cnd(.subclass,
...,
message = message,
parent = parent,
trace = trace
)
stop(cnd)
}
trace_trim_context <- function(trace, frame = caller_env()) {
if (is_environment(frame)) {
idx <- detect_index(trace$envs, identical, env_label(frame))
} else if (is_scalar_integerish(frame)) {
idx <- frame
} else {
abort("`frame` must be a frame environment or index")
}
to_trim <- seq2(idx, trace_length(trace))
if (length(to_trim)) {
trace <- trace_subset(trace, -to_trim)
}
trace
}
# FIXME: Find more robust strategy of stripping catching context
find_capture_context <- function(n = 3L) {
sys_parent <- sys.parent(n)
thrower_frame <- sys.frame(sys_parent)
call <- sys.call(sys_parent)
frame <- sys.frame(sys_parent)
if (!is_call(call, "tryCatchOne") || !env_inherits(frame, ns_env("base"))) {
return(thrower_frame)
}
sys_parents <- sys.parents()
while (!is_call(call, "tryCatch")) {
sys_parent <- sys_parents[sys_parent]
call <- sys.call(sys_parent)
}
next_parent <- sys_parents[sys_parent]
call <- sys.call(next_parent)
if (is_call(call, "with_handlers")) {
sys_parent <- next_parent
}
sys.frame(sys_parent)
}
#' @rdname abort
#' @export
warn <- function(message, .subclass = NULL, ..., call = NULL, msg, type) {
validate_signal_args(msg, type, call)
message <- paste0(message, collapse = "\n")
cnd <- warning_cnd(.subclass, ..., message = message)
warning(cnd)
}
#' @rdname abort
#' @export
inform <- function(message, .subclass = NULL, ..., call = NULL, msg, type) {
validate_signal_args(msg, type, call)
message <- paste0(message, collapse = "\n")
message <- paste0(message, "\n")
cnd <- message_cnd(.subclass, ..., message = message)
message(cnd)
}
#' @rdname abort
#' @export
signal <- function(message, .subclass, ...) {
message <- paste0(message, collapse = "\n")
cnd <- cnd(.subclass, ..., message = message)
cnd_signal(cnd)
}
validate_signal_args <- function(msg, type, call) {
if (!missing(msg)) {
stop_defunct("`msg` has been renamed to `message` and is deprecated as of rlang 0.3.0")
}
if (!missing(type)) {
stop_defunct("`type` has been renamed to `.subclass` and is deprecated as of rlang 0.3.0")
}
if (!is_null(call)) {
stop_defunct("`call` is deprecated as of rlang 0.3.0")
}
}
#' @rdname abort
#' @export
interrupt <- function() {
.Call(rlang_interrupt)
}
#' Muffle a condition
#'
#' Unlike [exiting()] handlers, [calling()] handlers must be explicit
#' that they have handled a condition to stop it from propagating to
#' other handlers. Use `cnd_muffle()` within a calling handler (or as
#' a calling handler, see examples) to prevent any other handlers from
#' being called for that condition.
#'
#'
#' @section Mufflable conditions:
#'
#' Most conditions signalled by base R are muffable, although the name
#' of the restart varies. cnd_muffle() will automatically call the
#' correct restart for you. It is compatible with the following
#' conditions:
#'
#' * `warning` and `message` conditions. In this case `cnd_muffle()`
#' is equivalent to [base::suppressMessages()] and
#' [base::suppressWarnings()].
#'
#' * Bare conditions signalled with `signal()` or [cnd_signal()]. Note
#' that conditions signalled with [base::signalCondition()] are not
#' mufflable.
#'
#' * Interrupts are sometimes signalled with a `resume` restart on
#' recent R versions. When this is the case, you can muffle the
#' interrupt with `cnd_muffle()`. Check if a restart is available
#' with `base::findRestart("resume")`.
#'
#' If you call `cnd_muffle()` with a condition that is not mufflable
#' you will cause a new error to be signalled.
#'
#' * Errors are not mufflable since they are signalled in critical
#' situations where execution cannot continue safely.
#'
#' * Conditions captured with [base::tryCatch()], [with_handlers()] or
#' [catch_cnd()] are no longer mufflable. Muffling restarts _must_
#' be called from a [calling] handler.
#'
#' @param cnd A condition to muffle.
#'
#' @export
#' @examples
#' fn <- function() {
#' inform("Beware!", "my_particular_msg")
#' inform("On your guard!")
#' "foobar"
#' }
#'
#' # Let's install a muffling handler for the condition thrown by `fn()`.
#' # This will suppress all `my_particular_wng` warnings but let other
#' # types of warnings go through:
#' with_handlers(fn(),
#' my_particular_msg = calling(function(cnd) {
#' inform("Dealt with this particular message")
#' cnd_muffle(cnd)
#' })
#' )
#'
#' # Note how execution of `fn()` continued normally after dealing
#' # with that particular message.
#'
#' # cnd_muffle() can also be passed to with_handlers() as a calling
#' # handler:
#' with_handlers(fn(),
#' my_particular_msg = calling(cnd_muffle)
#' )
cnd_muffle <- function(cnd) {
restart <- switch(cnd_type(cnd),
message = "muffleMessage",
warning = "muffleWarning",
interrupt = "resume",
"rlang_muffle"
)
if (!is_null(findRestart(restart))) {
invokeRestart(restart)
}
abort("Can't find a muffling restart")
}
#' Catch a condition
#'
#' This is a small wrapper around `tryCatch()` that captures any
#' condition signalled while evaluating its argument. It is useful for
#' situations where you expect a specific condition to be signalled,
#' for debugging, and for unit testing.
#'
#' @param expr Expression to be evaluated with a catching condition
#' handler.
#' @param classes A character vector of condition classes to catch. By
#' default, catches all conditions.
#' @return A condition if any was signalled, `NULL` otherwise.
#' @export
#' @examples
#' catch_cnd(10)
#' catch_cnd(abort("an error"))
#' catch_cnd(cnd_signal("my_condition", .msg = "a condition"))
catch_cnd <- function(expr, classes = "condition") {
stopifnot(is_character(classes))
handlers <- rep_named(classes, list(identity))
eval_bare(rlang::expr(
tryCatch(!!!handlers, {
force(expr)
return(NULL)
})
))
}
#' @export
print.rlang_error <- function(x,
...,
child = NULL,
simplify = c("branch", "collapse", "none"),
fields = FALSE) {
class <- class(x)[[1]]
if (class != "error") {
class <- paste0("error/", class)
}
if (is_null(child)) {
header <- bold(sprintf("<%s>", class))
} else {
header <- bold(sprintf("<parent: %s>", class))
}
if (is_string(x$message) && nzchar(x$message)) {
message <- x$message
} else {
message <- NULL
}
cat_line(
header,
message
)
trace <- x$trace
simplify <- arg_match(simplify, c("collapse", "branch", "none"))
if (!is_null(trace)) {
cat_line(bold("Backtrace:"))
if (!is_null(child)) {
# Trim common portions of backtrace
child_trace <- child$trace
common <- map_lgl(trace$envs, `%in%`, child_trace$envs)
trace <- trace_subset(trace, which(!common))
# Trim catching context if any
calls <- trace$calls
if (length(calls) && is_call(calls[[1]], c("tryCatch", "with_handlers", "catch_cnd"))) {
trace <- trace_subset_across(trace, -1, 1)
}
}
trace_lines <- format(trace, ..., simplify = simplify)
cat_line(trace_lines)
}
if (!is_null(x$parent)) {
print.rlang_error(x$parent, ..., child = x, simplify = simplify, fields = fields)
}
# Recommend printing the full backtrace. Only do it after having
# printed all parent errors first.
if (simplify == "branch" && is_null(x$parent) && !is_null(trace)) {
cat_line(silver("Call `rlang::last_trace()` to see the full backtrace"))
}
invisible(x)
}
# Last error to be returned in last_error()
last_error_env <- new.env(parent = emptyenv())
last_error_env$cnd <- NULL
#' @export
summary.rlang_error <- function(object, ...) {
print(object, simplify = "none", fields = TRUE)
}
#' @export
conditionMessage.rlang_error <- function(c) {
last_error_env$cnd <- c
message <- c$message
parents <- chr()
while (is_condition(c$parent)) {
c <- c$parent
parents <- chr(parents, c$message)
}
if (length(parents)) {
parents <- cli_branch(parents)
message <- paste_line(
message,
"Parents:",
parents
)
}
backtrace <- format_onerror_backtrace(c$trace)
paste_line(message, backtrace)
}
#' @export
as.character.rlang_error <- function(x, ...) {
# Don't generate backtrace or reminder in programmatic uses. Fixes
# backtraces in knitr.
x$message
}
#' Display backtrace on error
#'
#' @description
#'
#' Errors thrown with [abort()] automatically save a backtrace that
#' can be inspected by calling [last_error()]. Optionally, you can
#' also display the backtrace alongside the error message by setting
#' the option `rlang_backtrace_on_error` to one of the following
#' values:
#'
#' * `"reminder"`: Display a reminder that the backtrace can be
#' inspected by calling [rlang::last_error()].
#' * `"branch"`: Display a simplified backtrace.
#' * `"collapse"`: Display a collapsed backtrace tree.
#' * `"full"`: Display the full backtrace tree.
#'
#'
#' @section Promote base errors to rlang errors:
#'
#' Call `options(error = rlang::enframe)` to instrument base
#' errors with rlang features. This handler does two things:
#'
#' * It saves the base error as an rlang object. This allows you to
#' call [last_error()] to print the backtrace or inspect its data.
#'
#' * It prints the backtrace for the current error according to the
#' [`rlang_backtrace_on_error`] option.
#'
#' @name rlang_backtrace_on_error
#' @aliases add_backtrace
#'
#' @examples
#' # Display a simplified backtrace on error for both base and rlang
#' # errors:
#'
#' # options(
#' # rlang_backtrace_on_error = "branch",
#' # error = rlang::enframe
#' # )
#' # stop("foo")
NULL
format_onerror_backtrace <- function(trace) {
show_trace <- show_trace_p()
opts <- c("none", "reminder", "branch", "collapse", "full")
if (!is_string(show_trace) || !show_trace %in% opts) {
options(rlang_backtrace_on_error = NULL)
warn("Invalid `rlang_backtrace_on_error` option (resetting to `NULL`)")
return(NULL)
}
if (show_trace == "none") {
return(NULL)
}
if (show_trace == "branch") {
max_frames <- 10L
} else {
max_frames <- NULL
}
simplify <- switch(show_trace,
full = "none",
reminder = "branch", # Check size of backtrace branch
show_trace
)
backtrace_lines <- format(trace, simplify = simplify, max_frames = max_frames)
# Backtraces of size 0 and 1 are uninteresting
if (length(backtrace_lines) <= 1L) {
return(NULL)
}
if (show_trace == "reminder") {
if (is_interactive()) {
reminder <- silver("Call `rlang::last_error()` to see a backtrace")
} else {
reminder <- NULL
}
return(reminder)
}
paste_line(
"Backtrace:",
backtrace_lines
)
}
show_trace_p <- function() {
old_opt <- peek_option("rlang__backtrace_on_error")
if (!is_null(old_opt)) {
warn_deprecated(paste_line(
"`rlang__backtrace_on_error` is no longer experimental.",
"It has been renamed to `rlang_backtrace_on_error`. Please update your RProfile."
))
return(old_opt)
}
opt <- peek_option("rlang_backtrace_on_error")
if (!is_null(opt)) {
return(opt)
}
# FIXME: parameterise `is_interactive()`?
interactive <- with_options(
knitr.in.progress = NULL,
rstudio.notebook.executing = NULL,
is_interactive()
)
if (interactive) {
"reminder"
} else {
"full"
}
}
#' Add backtrace from error handler
#'
#' @description
#'
#' `entrace()` interrupts an error throw to add an [rlang
#' backtrace][trace_back()] to the error. The error throw is
#' immediately resumed. `cnd_entrace()` adds a backtrace to a
#' condition object, without any other effect. Both functions should
#' be called directly from an error handler.
#'
#' Set the `error` global option to `quote(rlang::entrace())` to
#' transform base errors to rlang errors. These enriched errors
#' include a backtrace. The RProfile is a good place to set the
#' handler.
#'
#' `entrace()` also works as a [calling][calling] handler, though it
#' is often more practical to use the higher-level function
#' [with_abort()].
#'
#' @inheritParams trace_back
#' @param cnd When `entrace()` is used as a calling handler, `cnd` is
#' the condition to handle.
#' @param ... Unused. These dots are for future extensions.
#'
#' @seealso [with_abort()] to promote conditions to rlang errors.
#' [cnd_entrace()] to manually add a backtrace to a condition.
#' @examples
#' if (FALSE) { # Not run
#'
#' # Set the error handler in your RProfile like this:
#' if (requireNamespace("rlang", quietly = TRUE)) {
#' options(error = rlang::entrace)
#' }
#'
#' }
#' @export
entrace <- function(cnd, ..., top = NULL, bottom = NULL) {
check_dots_empty(...)
if (!missing(cnd) && inherits(cnd, "rlang_error")) {
return()
}
if (is_null(bottom)) {
nframe <- sys.nframe() - 1
info <- signal_context_info(nframe)
bottom <- sys.frame(info[[2]])
}
trace <- trace_back(top = top, bottom = bottom)
if (missing(cnd)) {
entrace_handle_top(trace)
} else {
abort(cnd$message %||% "", error = cnd, trace = trace)
}
}
#' @rdname entrace
#' @export
cnd_entrace <- function(cnd, ..., top = NULL, bottom = NULL) {
check_dots_empty(...)
if (!is_null(cnd$trace)) {
return(cnd)
}
if (is_null(bottom)) {
nframe <- sys.parent() - 1
info <- signal_context_info(nframe)
bottom <- sys.frame(info[[2]])
}
cnd$trace <- trace_back(top = top, bottom = bottom)
cnd
}
#' Return information about signalling context
#'
#' @param nframe The depth of the frame to inspect. In a condition
#' handler, this would typically be `sys.nframe() - 1L`.
#'
#' @return A named list of two elements `type` and `depth`. The depth
#' is the call frame number of the signalling context. The type is
#' one of:
#'
#' * `"unknown"`
#' * `"stop_message"` for errors thrown with `base::stop("message")"
#' * `"stop_condition"` for errors thrown with `base::stop(cnd_object)`
#' * `"stop_native"` for errors thrown from C
#' * `"stop_rlang"` for errors thrown with `rlang::abort()`
#' * `"warning_message"` for warnings signalled with `base::warning("message")"
#' * `"warning_condition"` for warnings signalled with `base::warning(cnd_object)`
#' * `"warning_native"` for warnings signalled from C
#' * `"warning_promoted"` for warnings promoted to errors with `getOption("warn")`
#' * `"warning_rlang"` for warnings signalled with `rlang::warn()`
#' * `"message"` for messages signalled with `base::message()`
#' * `"message_rlang"` for messages signalled with `rlang::inform()`
#' * `"condition"` for conditions signalled with `base::signalCondition()`
#'
#' @keywords internal
#' @noRd
signal_context_info <- function(nframe) {
first <- sys_body(nframe)
if (is_same_body(first, body(.handleSimpleError))) {
if (is_same_body(sys_body(nframe - 1), body(stop))) {
return(list(type = "stop_message", depth = nframe - 2))
} else if (is_same_body(sys_body(nframe - 4), body(.signalSimpleWarning))) {
return(list(type = "warning_promoted", depth = nframe - 6))
} else {
return(list(type = "stop_native", depth = nframe - 1))
}
}
if (is_same_body(first, body(stop))) {
if (is_same_body(sys_body(nframe - 1), body(abort))) {
return(list(type = "stop_rlang", depth = nframe - 2))
} else {
return(list(type = "stop_condition", depth = nframe - 1))
}
}
if (is_same_body(first, body(signalCondition))) {
if (from_withrestarts(nframe - 1) && is_same_body(sys_body(nframe - 4), body(message))) {
if (is_same_body(sys_body(nframe - 5), body(inform))) {
return(list(type = "message_rlang", depth = nframe - 6))
} else {
return(list(type = "message", depth = nframe - 5))
}
} else {
return(list(type = "condition", depth = nframe - 1))
}
}
if (from_withrestarts(nframe)) {
withrestarts_caller <- sys_body(nframe - 3)
if (is_same_body(withrestarts_caller, body(.signalSimpleWarning))) {
if (is_same_body(sys_body(nframe - 4), body(warning))) {
return(list(type = "warning_message", depth = nframe - 5))
} else {
return(list(type = "warning_native", depth = nframe - 4))
}
} else if (is_same_body(withrestarts_caller, body(warning))) {
if (is_same_body(sys_body(nframe - 4), body(warn))) {
return(list(type = "warning_rlang", depth = nframe - 5))
} else {
return(list(type = "warning_condition", depth = nframe - 4))
}
}
}
list(type = "unknown", depth = nframe)
}
from_withrestarts <- function(nframe) {
is_call(sys.call(nframe), "doWithOneRestart") &&
is_same_body(sys_body(nframe - 2), body(withRestarts))
}
sys_body <- function(n) {
body(sys.function(n))
}
entrace_handle_top <- function(trace) {
# Happens with ctrl-c at top-level
if (!trace_length(trace)) {
return()
}
stop_call <- sys.call(-2)
stop_frame <- sys.frame(-2)
cnd <- stop_frame$cond
# False for errors thrown from the C side
from_stop <- is_call(stop_call, "stop", ns = c("", "base"))
# No need to do anything for rlang errors
if (from_stop && inherits(cnd, "rlang_error")) {
return(NULL)
}
if (from_stop) {
if (is_null(cnd)) {
msg_call <- quote(.makeMessage(..., domain = domain))
msg <- eval_bare(msg_call, stop_frame)
} else {
msg <- cnd$message
}
} else {
msg <- geterrmessage()
}
# Save a fake rlang error containing the backtrace
err <- error_cnd(message = msg, error = cnd, trace = trace, parent = cnd)
last_error_env$cnd <- err
# Print backtrace for current error
backtrace_lines <- format_onerror_backtrace(trace)
if (length(backtrace_lines)) {
cat_line(backtrace_lines)
}
NULL
}
add_backtrace <- function() {
# Warnings don't go through when error is being handled
msg <- "Warning: `add_backtrace()` is now exported as `entrace()` as of rlang 0.3.1"
cat_line(msg, file = stderr())
entrace(bottom = sys.frame(-1))
}
#' Promote all errors to rlang errors
#'
#' @description
#'
#' `with_abort()` promotes conditions as if they were thrown with
#' [abort()]. These errors embed a [backtrace][trace_back]. They are
#' particularly suitable to be set as *parent errors* (see `parent`
#' argument of [abort()]).
#'
#' @param expr An expression run in a context where errors are
#' promoted to rlang errors.
#' @param classes Character vector of condition classes that should be
#' promoted to rlang errors.
#'
#' @details
#'
#' `with_abort()` installs a [calling handler][calling] for errors and
#' rethrows non-rlang errors with [abort()]. However, error handlers
#' installed *within* `with_abort()` have priority. For this reason,
#' you should use [tryCatch()] and [exiting] handlers outside
#' `with_abort()` rather than inside.
#'
#' @examples
#' # For cleaner backtraces:
#' options(rlang_trace_top_env = current_env())
#'
#' # with_abort() automatically casts simple errors thrown by stop()
#' # to rlang errors:
#' fn <- function() stop("Base error")
#' try(with_abort(fn()))
#' last_error()
#'
#' # with_abort() is handy for rethrowing low level errors. The
#' # backtraces are then segmented between the low level and high
#' # level contexts.
#' low_level1 <- function() low_level2()
#' low_level2 <- function() stop("Low level error")
#'
#' high_level <- function() {
#' with_handlers(
#' with_abort(low_level1()),
#' error = ~ abort("High level error", parent = .x)
#' )
#' }
#'
#' try(high_level())
#' last_error()
#' summary(last_error())
#'
#' # Reset to default
#' options(rlang_trace_top_env = NULL)
#' @export
with_abort <- function(expr, classes = "error") {
handlers <- rep_named(classes, list(entrace))
handle_call <- rlang::expr(withCallingHandlers(expr, !!!handlers))
.Call(rlang_eval, handle_call, current_env())
}
|
/R/cnd.R
|
no_license
|
DocEd/rlang
|
R
| false
| false
| 31,640
|
r
|
#' Create a condition object
#'
#' These constructors make it easy to create subclassed conditions.
#' Conditions are objects that power the error system in R. They can
#' also be used for passing messages to pre-established handlers.
#'
#' `cnd()` creates objects inheriting from `condition`. Conditions
#' created with `error_cnd()`, `warning_cnd()` and `message_cnd()`
#' inherit from `error`, `warning` or `message`.
#'
#' @section Lifecycle:
#'
#' The `.type` and `.msg` arguments have been renamed to `.subclass`
#' and `message`. They are deprecated as of rlang 0.3.0.
#'
#' @param .subclass The condition subclass.
#' @param ... Named data fields stored inside the condition
#' object. These dots are evaluated with [explicit
#' splicing][tidy-dots].
#' @param message A default message to inform the user about the
#' condition when it is signalled.
#' @param trace A `trace` object created by [trace_back()].
#' @param parent A parent condition object created by [abort()].
#' @seealso [cnd_signal()], [with_handlers()].
#' @export
#' @examples
#' # Create a condition inheriting from the s3 type "foo":
#' cnd <- cnd("foo")
#'
#' # Signal the condition to potential handlers. Since this is a bare
#' # condition the signal has no effect if no handlers are set up:
#' cnd_signal(cnd)
#'
#' # When a relevant handler is set up, the signal causes the handler
#' # to be called:
#' with_handlers(cnd_signal(cnd), foo = exiting(function(c) "caught!"))
#'
#' # Handlers can be thrown or executed inplace. See with_handlers()
#' # documentation for more on this.
#'
#' # Signalling an error condition aborts the current computation:
#' err <- error_cnd("foo", message = "I am an error")
#' try(cnd_signal(err))
cnd <- function(.subclass, ..., message = "") {
if (missing(.subclass)) {
abort("Bare conditions must be subclassed")
}
.Call(rlang_new_condition, .subclass, message, dots_list(...))
}
#' @rdname cnd
#' @export
error_cnd <- function(.subclass = NULL,
...,
message = "",
trace = NULL,
parent = NULL) {
if (!is_null(trace) && !inherits(trace, "rlang_trace")) {
abort("`trace` must be NULL or an rlang backtrace")
}
if (!is_null(parent) && !inherits(parent, "condition")) {
abort("`parent` must be NULL or a condition object")
}
fields <- dots_list(trace = trace, parent = parent, ...)
.Call(rlang_new_condition, c(.subclass, "rlang_error", "error"), message, fields)
}
#' @rdname cnd
#' @export
warning_cnd <- function(.subclass = NULL, ..., message = "") {
.Call(rlang_new_condition, c(.subclass, "warning"), message, dots_list(...))
}
#' @rdname cnd
#' @export
message_cnd <- function(.subclass = NULL, ..., message = "") {
.Call(rlang_new_condition, c(.subclass, "message"), message, dots_list(...))
}
#' Is object a condition?
#' @param x An object to test.
#' @export
is_condition <- function(x) {
inherits(x, "condition")
}
#' What type is a condition?
#'
#' Use `cnd_type()` to check what type a condition is.
#'
#' @param cnd A condition object.
#' @return A string, either `"condition"`, `"message"`, `"warning"`,
#' `"error"` or `"interrupt"`.
#' @export
#' @examples
#' cnd_type(catch_cnd(abort("Abort!")))
#' cnd_type(catch_cnd(interrupt()))
cnd_type <- function(cnd) {
.Call(rlang_cnd_type, cnd)
}
#' Signal a condition object
#'
#' @description
#'
#' The type of signal depends on the class of the condition:
#'
#' * A message is signalled if the condition inherits from
#' `"message"`. This is equivalent to signalling with [inform()] or
#' [base::message()].
#'
#' * A warning is signalled if the condition inherits from
#' `"warning"`. This is equivalent to signalling with [warn()] or
#' [base::warning()].
#'
#' * An error is signalled if the condition inherits from
#' `"error"`. This is equivalent to signalling with [abort()] or
#' [base::stop()].
#'
#' * An interrupt is signalled if the condition inherits from
#' `"interrupt"`. This is equivalent to signalling with
#' [interrupt()].
#'
#' Use [cnd_type()] to determine the type of a condition.
#'
#'
#' @section Lifecycle:
#'
#' * `.cnd` has been renamed to `cnd` and is deprecated as of rlang 0.3.0.
#'
#' * The `.mufflable` argument is deprecated as of rlang 0.3.0 and no
#' longer has any effect. Non-critical conditions are always
#' signalled with a muffle restart.
#'
#' * Creating a condition object with [cnd_signal()] is deprecated as
#' of rlang 0.3.0. Please use [signal()] instead.
#'
#' @param cnd A condition object (see [cnd()]).
#' @param .cnd,.mufflable These arguments are deprecated.
#' @seealso [abort()], [warn()] and [inform()] for creating and
#' signalling structured R conditions. See [with_handlers()] for
#' establishing condition handlers.
#' @export
#' @examples
#' # The type of signal depends on the class. If the condition
#' # inherits from "warning", a warning is issued:
#' cnd <- warning_cnd("my_warning_class", message = "This is a warning")
#' cnd_signal(cnd)
#'
#' # If it inherits from "error", an error is raised:
#' cnd <- error_cnd("my_error_class", message = "This is an error")
#' try(cnd_signal(cnd))
cnd_signal <- function(cnd, .cnd, .mufflable) {
validate_cnd_signal_args(cnd, .cnd, .mufflable)
invisible(.Call(rlang_cnd_signal, cnd))
}
validate_cnd_signal_args <- function(cnd, .cnd, .mufflable,
env = parent.frame()) {
if (is_character(cnd)) {
warn_deprecated(paste_line(
"Creating a condition with `cnd_signal()` is deprecated as of rlang 0.3.0.",
"Please use `signal()` instead."
))
env$cnd <- cnd(cnd)
}
if (!missing(.cnd)) {
warn_deprecated(paste_line(
"The `.cnd` argument is deprecated as of rlang 0.3.0.",
"Please use `cnd` instead."
))
if (is_character(.cnd)) {
warn_deprecated(paste_line(
"Creating a condition with `cnd_signal()` is deprecated as of rlang 0.3.0.",
"Please use `signal()` instead."
))
.cnd <- cnd(.cnd)
}
env$cnd <- .cnd
}
if (!missing(.mufflable)) {
warn_deprecated(
"`.mufflable` is deprecated as of rlang 0.3.0 and no longer has any effect"
)
}
}
#' Signal an error, warning, or message
#'
#' @description
#'
#' These functions are equivalent to base functions [base::stop()],
#' [base::warning()] and [base::message()], but make it easy to supply
#' condition metadata:
#'
#' * Supply `.subclass` to create a classed condition. Typed
#' conditions can be captured or handled selectively, allowing for
#' finer-grained error handling.
#'
#' * Supply metadata with named `...` arguments. This data will be
#' stored in the condition object and can be examined by handlers.
#'
#' `interrupt()` allows R code to simulate a user interrupt of the
#' kind that is signalled with `Ctrl-C`. It is currently not possible
#' to create custom interrupt condition objects.
#'
#'
#' @section Backtrace:
#'
#' Unlike `stop()` and `warning()`, these functions don't include call
#' information by default. This saves you from typing `call. = FALSE`
#' and produces cleaner error messages.
#'
#' A backtrace is always saved into error objects. You can print a
#' simplified backtrace of the last error by calling [last_error()]
#' and a full backtrace with `summary(last_error())`.
#'
#' You can also display a backtrace with the error message by setting
#' the option `rlang_backtrace_on_error`. It supports the following
#' values:
#'
#' * `"reminder"`: Invite users to call `rlang::last_error()` to see a
#' backtrace.
#' * `"branch"`: Display a simplified backtrace.
#' * `"collapse"`: Display a collapsed backtrace tree.
#' * `"full"`: Display a full backtrace tree.
#' * `"none"`: Display nothing.
#'
#' @section Mufflable conditions:
#'
#' Signalling a condition with `inform()` or `warn()` causes a message
#' to be displayed in the console. These messages can be muffled with
#' [base::suppressMessages()] or [base::suppressWarnings()].
#'
#' On recent R versions (>= R 3.5.0), interrupts are typically
#' signalled with a `"resume"` restart. This is however not
#' guaranteed.
#'
#'
#' @section Lifecycle:
#'
#' These functions were changed in rlang 0.3.0 to take condition
#' metadata with `...`. Consequently:
#'
#' * All arguments were renamed to be prefixed with a dot, except for
#' `type` which was renamed to `.subclass`.
#' * `.call` (previously `call`) can no longer be passed positionally.
#'
#' @inheritParams cnd
#' @param message The message to display.
#' @param .subclass Subclass of the condition. This allows your users
#' to selectively handle the conditions signalled by your functions.
#' @param ... Additional data to be stored in the condition object.
#' @param call Defunct as of rlang 0.4.0. Storing the full
#' backtrace is now preferred to storing a simple call.
#' @param msg,type These arguments were renamed to `message` and
#' `.subclass` and are defunct as of rlang 0.4.0.
#'
#' @seealso [with_abort()] to convert all errors to rlang errors.
#' @examples
#' # These examples are guarded to avoid throwing errors
#' if (FALSE) {
#'
#' # Signal an error with a message just like stop():
#' abort("Something bad happened")
#'
#' # Give a class to the error:
#' abort("Something bad happened", "somepkg_bad_error")
#'
#' # This will allow your users to handle the error selectively
#' tryCatch(
#' somepkg_function(),
#' somepkg_bad_error = function(err) {
#' warn(err$message) # Demote the error to a warning
#' NA # Return an alternative value
#' }
#' )
#'
#' # You can also specify metadata that will be stored in the condition:
#' abort("Something bad happened", "somepkg_bad_error", data = 1:10)
#'
#' # This data can then be consulted by user handlers:
#' tryCatch(
#' somepkg_function(),
#' somepkg_bad_error = function(err) {
#' # Compute an alternative return value with the data:
#' recover_error(err$data)
#' }
#' )
#'
#' # If you call low-level APIs it is good practice to catch technical
#' # errors and rethrow them with a more meaningful message. Pass on
#' # the caught error as `parent` to get a nice decomposition of
#' # errors and backtraces:
#' file <- "http://foo.bar/baz"
#' tryCatch(
#' download(file),
#' error = function(err) {
#' msg <- sprintf("Can't download `%s`", file)
#' abort(msg, parent = err)
#' })
#'
#' # Unhandled errors are saved automatically by `abort()` and can be
#' # retrieved with `last_error()`. The error prints with a simplified
#' # backtrace:
#' abort("Saved error?")
#' last_error()
#'
#' # Use `summary()` to print the full backtrace and the condition fields:
#' summary(last_error())
#'
#' }
#' @export
abort <- function(message, .subclass = NULL,
...,
trace = NULL,
call = NULL,
parent = NULL,
msg, type) {
validate_signal_args(msg, type, call)
if (is_null(trace) && is_null(peek_option("rlang__disable_trace_capture"))) {
# Prevents infloops when rlang throws during trace capture
scoped_options("rlang__disable_trace_capture" = TRUE)
trace <- trace_back()
if (is_null(parent)) {
context <- trace_length(trace)
} else {
context <- find_capture_context()
}
trace <- trace_trim_context(trace, context)
}
# Only collapse lengthy vectors because `paste0()` removes the class
# of glue strings
if (length(message) > 1) {
message <- paste0(message, collapse = "\n")
}
cnd <- error_cnd(.subclass,
...,
message = message,
parent = parent,
trace = trace
)
stop(cnd)
}
trace_trim_context <- function(trace, frame = caller_env()) {
if (is_environment(frame)) {
idx <- detect_index(trace$envs, identical, env_label(frame))
} else if (is_scalar_integerish(frame)) {
idx <- frame
} else {
abort("`frame` must be a frame environment or index")
}
to_trim <- seq2(idx, trace_length(trace))
if (length(to_trim)) {
trace <- trace_subset(trace, -to_trim)
}
trace
}
# FIXME: Find more robust strategy of stripping catching context
find_capture_context <- function(n = 3L) {
sys_parent <- sys.parent(n)
thrower_frame <- sys.frame(sys_parent)
call <- sys.call(sys_parent)
frame <- sys.frame(sys_parent)
if (!is_call(call, "tryCatchOne") || !env_inherits(frame, ns_env("base"))) {
return(thrower_frame)
}
sys_parents <- sys.parents()
while (!is_call(call, "tryCatch")) {
sys_parent <- sys_parents[sys_parent]
call <- sys.call(sys_parent)
}
next_parent <- sys_parents[sys_parent]
call <- sys.call(next_parent)
if (is_call(call, "with_handlers")) {
sys_parent <- next_parent
}
sys.frame(sys_parent)
}
#' @rdname abort
#' @export
warn <- function(message, .subclass = NULL, ..., call = NULL, msg, type) {
validate_signal_args(msg, type, call)
message <- paste0(message, collapse = "\n")
cnd <- warning_cnd(.subclass, ..., message = message)
warning(cnd)
}
#' @rdname abort
#' @export
inform <- function(message, .subclass = NULL, ..., call = NULL, msg, type) {
validate_signal_args(msg, type, call)
message <- paste0(message, collapse = "\n")
message <- paste0(message, "\n")
cnd <- message_cnd(.subclass, ..., message = message)
message(cnd)
}
#' @rdname abort
#' @export
signal <- function(message, .subclass, ...) {
message <- paste0(message, collapse = "\n")
cnd <- cnd(.subclass, ..., message = message)
cnd_signal(cnd)
}
validate_signal_args <- function(msg, type, call) {
if (!missing(msg)) {
stop_defunct("`msg` has been renamed to `message` and is deprecated as of rlang 0.3.0")
}
if (!missing(type)) {
stop_defunct("`type` has been renamed to `.subclass` and is deprecated as of rlang 0.3.0")
}
if (!is_null(call)) {
stop_defunct("`call` is deprecated as of rlang 0.3.0")
}
}
#' @rdname abort
#' @export
interrupt <- function() {
.Call(rlang_interrupt)
}
#' Muffle a condition
#'
#' Unlike [exiting()] handlers, [calling()] handlers must be explicit
#' that they have handled a condition to stop it from propagating to
#' other handlers. Use `cnd_muffle()` within a calling handler (or as
#' a calling handler, see examples) to prevent any other handlers from
#' being called for that condition.
#'
#'
#' @section Mufflable conditions:
#'
#' Most conditions signalled by base R are muffable, although the name
#' of the restart varies. cnd_muffle() will automatically call the
#' correct restart for you. It is compatible with the following
#' conditions:
#'
#' * `warning` and `message` conditions. In this case `cnd_muffle()`
#' is equivalent to [base::suppressMessages()] and
#' [base::suppressWarnings()].
#'
#' * Bare conditions signalled with `signal()` or [cnd_signal()]. Note
#' that conditions signalled with [base::signalCondition()] are not
#' mufflable.
#'
#' * Interrupts are sometimes signalled with a `resume` restart on
#' recent R versions. When this is the case, you can muffle the
#' interrupt with `cnd_muffle()`. Check if a restart is available
#' with `base::findRestart("resume")`.
#'
#' If you call `cnd_muffle()` with a condition that is not mufflable
#' you will cause a new error to be signalled.
#'
#' * Errors are not mufflable since they are signalled in critical
#' situations where execution cannot continue safely.
#'
#' * Conditions captured with [base::tryCatch()], [with_handlers()] or
#' [catch_cnd()] are no longer mufflable. Muffling restarts _must_
#' be called from a [calling] handler.
#'
#' @param cnd A condition to muffle.
#'
#' @export
#' @examples
#' fn <- function() {
#' inform("Beware!", "my_particular_msg")
#' inform("On your guard!")
#' "foobar"
#' }
#'
#' # Let's install a muffling handler for the condition thrown by `fn()`.
#' # This will suppress all `my_particular_wng` warnings but let other
#' # types of warnings go through:
#' with_handlers(fn(),
#' my_particular_msg = calling(function(cnd) {
#' inform("Dealt with this particular message")
#' cnd_muffle(cnd)
#' })
#' )
#'
#' # Note how execution of `fn()` continued normally after dealing
#' # with that particular message.
#'
#' # cnd_muffle() can also be passed to with_handlers() as a calling
#' # handler:
#' with_handlers(fn(),
#' my_particular_msg = calling(cnd_muffle)
#' )
cnd_muffle <- function(cnd) {
restart <- switch(cnd_type(cnd),
message = "muffleMessage",
warning = "muffleWarning",
interrupt = "resume",
"rlang_muffle"
)
if (!is_null(findRestart(restart))) {
invokeRestart(restart)
}
abort("Can't find a muffling restart")
}
#' Catch a condition
#'
#' This is a small wrapper around `tryCatch()` that captures any
#' condition signalled while evaluating its argument. It is useful for
#' situations where you expect a specific condition to be signalled,
#' for debugging, and for unit testing.
#'
#' @param expr Expression to be evaluated with a catching condition
#' handler.
#' @param classes A character vector of condition classes to catch. By
#' default, catches all conditions.
#' @return A condition if any was signalled, `NULL` otherwise.
#' @export
#' @examples
#' catch_cnd(10)
#' catch_cnd(abort("an error"))
#' catch_cnd(cnd_signal("my_condition", .msg = "a condition"))
catch_cnd <- function(expr, classes = "condition") {
stopifnot(is_character(classes))
handlers <- rep_named(classes, list(identity))
eval_bare(rlang::expr(
tryCatch(!!!handlers, {
force(expr)
return(NULL)
})
))
}
#' @export
print.rlang_error <- function(x,
...,
child = NULL,
simplify = c("branch", "collapse", "none"),
fields = FALSE) {
class <- class(x)[[1]]
if (class != "error") {
class <- paste0("error/", class)
}
if (is_null(child)) {
header <- bold(sprintf("<%s>", class))
} else {
header <- bold(sprintf("<parent: %s>", class))
}
if (is_string(x$message) && nzchar(x$message)) {
message <- x$message
} else {
message <- NULL
}
cat_line(
header,
message
)
trace <- x$trace
simplify <- arg_match(simplify, c("collapse", "branch", "none"))
if (!is_null(trace)) {
cat_line(bold("Backtrace:"))
if (!is_null(child)) {
# Trim common portions of backtrace
child_trace <- child$trace
common <- map_lgl(trace$envs, `%in%`, child_trace$envs)
trace <- trace_subset(trace, which(!common))
# Trim catching context if any
calls <- trace$calls
if (length(calls) && is_call(calls[[1]], c("tryCatch", "with_handlers", "catch_cnd"))) {
trace <- trace_subset_across(trace, -1, 1)
}
}
trace_lines <- format(trace, ..., simplify = simplify)
cat_line(trace_lines)
}
if (!is_null(x$parent)) {
print.rlang_error(x$parent, ..., child = x, simplify = simplify, fields = fields)
}
# Recommend printing the full backtrace. Only do it after having
# printed all parent errors first.
if (simplify == "branch" && is_null(x$parent) && !is_null(trace)) {
cat_line(silver("Call `rlang::last_trace()` to see the full backtrace"))
}
invisible(x)
}
# Last error to be returned in last_error()
last_error_env <- new.env(parent = emptyenv())
last_error_env$cnd <- NULL
#' @export
summary.rlang_error <- function(object, ...) {
print(object, simplify = "none", fields = TRUE)
}
#' @export
conditionMessage.rlang_error <- function(c) {
last_error_env$cnd <- c
message <- c$message
parents <- chr()
while (is_condition(c$parent)) {
c <- c$parent
parents <- chr(parents, c$message)
}
if (length(parents)) {
parents <- cli_branch(parents)
message <- paste_line(
message,
"Parents:",
parents
)
}
backtrace <- format_onerror_backtrace(c$trace)
paste_line(message, backtrace)
}
#' @export
as.character.rlang_error <- function(x, ...) {
# Don't generate backtrace or reminder in programmatic uses. Fixes
# backtraces in knitr.
x$message
}
#' Display backtrace on error
#'
#' @description
#'
#' Errors thrown with [abort()] automatically save a backtrace that
#' can be inspected by calling [last_error()]. Optionally, you can
#' also display the backtrace alongside the error message by setting
#' the option `rlang_backtrace_on_error` to one of the following
#' values:
#'
#' * `"reminder"`: Display a reminder that the backtrace can be
#' inspected by calling [rlang::last_error()].
#' * `"branch"`: Display a simplified backtrace.
#' * `"collapse"`: Display a collapsed backtrace tree.
#' * `"full"`: Display the full backtrace tree.
#'
#'
#' @section Promote base errors to rlang errors:
#'
#' Call `options(error = rlang::enframe)` to instrument base
#' errors with rlang features. This handler does two things:
#'
#' * It saves the base error as an rlang object. This allows you to
#' call [last_error()] to print the backtrace or inspect its data.
#'
#' * It prints the backtrace for the current error according to the
#' [`rlang_backtrace_on_error`] option.
#'
#' @name rlang_backtrace_on_error
#' @aliases add_backtrace
#'
#' @examples
#' # Display a simplified backtrace on error for both base and rlang
#' # errors:
#'
#' # options(
#' # rlang_backtrace_on_error = "branch",
#' # error = rlang::enframe
#' # )
#' # stop("foo")
NULL
format_onerror_backtrace <- function(trace) {
show_trace <- show_trace_p()
opts <- c("none", "reminder", "branch", "collapse", "full")
if (!is_string(show_trace) || !show_trace %in% opts) {
options(rlang_backtrace_on_error = NULL)
warn("Invalid `rlang_backtrace_on_error` option (resetting to `NULL`)")
return(NULL)
}
if (show_trace == "none") {
return(NULL)
}
if (show_trace == "branch") {
max_frames <- 10L
} else {
max_frames <- NULL
}
simplify <- switch(show_trace,
full = "none",
reminder = "branch", # Check size of backtrace branch
show_trace
)
backtrace_lines <- format(trace, simplify = simplify, max_frames = max_frames)
# Backtraces of size 0 and 1 are uninteresting
if (length(backtrace_lines) <= 1L) {
return(NULL)
}
if (show_trace == "reminder") {
if (is_interactive()) {
reminder <- silver("Call `rlang::last_error()` to see a backtrace")
} else {
reminder <- NULL
}
return(reminder)
}
paste_line(
"Backtrace:",
backtrace_lines
)
}
show_trace_p <- function() {
old_opt <- peek_option("rlang__backtrace_on_error")
if (!is_null(old_opt)) {
warn_deprecated(paste_line(
"`rlang__backtrace_on_error` is no longer experimental.",
"It has been renamed to `rlang_backtrace_on_error`. Please update your RProfile."
))
return(old_opt)
}
opt <- peek_option("rlang_backtrace_on_error")
if (!is_null(opt)) {
return(opt)
}
# FIXME: parameterise `is_interactive()`?
interactive <- with_options(
knitr.in.progress = NULL,
rstudio.notebook.executing = NULL,
is_interactive()
)
if (interactive) {
"reminder"
} else {
"full"
}
}
#' Add backtrace from error handler
#'
#' @description
#'
#' `entrace()` interrupts an error throw to add an [rlang
#' backtrace][trace_back()] to the error. The error throw is
#' immediately resumed. `cnd_entrace()` adds a backtrace to a
#' condition object, without any other effect. Both functions should
#' be called directly from an error handler.
#'
#' Set the `error` global option to `quote(rlang::entrace())` to
#' transform base errors to rlang errors. These enriched errors
#' include a backtrace. The RProfile is a good place to set the
#' handler.
#'
#' `entrace()` also works as a [calling][calling] handler, though it
#' is often more practical to use the higher-level function
#' [with_abort()].
#'
#' @inheritParams trace_back
#' @param cnd When `entrace()` is used as a calling handler, `cnd` is
#' the condition to handle.
#' @param ... Unused. These dots are for future extensions.
#'
#' @seealso [with_abort()] to promote conditions to rlang errors.
#' [cnd_entrace()] to manually add a backtrace to a condition.
#' @examples
#' if (FALSE) { # Not run
#'
#' # Set the error handler in your RProfile like this:
#' if (requireNamespace("rlang", quietly = TRUE)) {
#' options(error = rlang::entrace)
#' }
#'
#' }
#' @export
entrace <- function(cnd, ..., top = NULL, bottom = NULL) {
check_dots_empty(...)
if (!missing(cnd) && inherits(cnd, "rlang_error")) {
return()
}
if (is_null(bottom)) {
nframe <- sys.nframe() - 1
info <- signal_context_info(nframe)
bottom <- sys.frame(info[[2]])
}
trace <- trace_back(top = top, bottom = bottom)
if (missing(cnd)) {
entrace_handle_top(trace)
} else {
abort(cnd$message %||% "", error = cnd, trace = trace)
}
}
#' @rdname entrace
#' @export
cnd_entrace <- function(cnd, ..., top = NULL, bottom = NULL) {
check_dots_empty(...)
if (!is_null(cnd$trace)) {
return(cnd)
}
if (is_null(bottom)) {
nframe <- sys.parent() - 1
info <- signal_context_info(nframe)
bottom <- sys.frame(info[[2]])
}
cnd$trace <- trace_back(top = top, bottom = bottom)
cnd
}
#' Return information about signalling context
#'
#' @param nframe The depth of the frame to inspect. In a condition
#' handler, this would typically be `sys.nframe() - 1L`.
#'
#' @return A named list of two elements `type` and `depth`. The depth
#' is the call frame number of the signalling context. The type is
#' one of:
#'
#' * `"unknown"`
#' * `"stop_message"` for errors thrown with `base::stop("message")"
#' * `"stop_condition"` for errors thrown with `base::stop(cnd_object)`
#' * `"stop_native"` for errors thrown from C
#' * `"stop_rlang"` for errors thrown with `rlang::abort()`
#' * `"warning_message"` for warnings signalled with `base::warning("message")"
#' * `"warning_condition"` for warnings signalled with `base::warning(cnd_object)`
#' * `"warning_native"` for warnings signalled from C
#' * `"warning_promoted"` for warnings promoted to errors with `getOption("warn")`
#' * `"warning_rlang"` for warnings signalled with `rlang::warn()`
#' * `"message"` for messages signalled with `base::message()`
#' * `"message_rlang"` for messages signalled with `rlang::inform()`
#' * `"condition"` for conditions signalled with `base::signalCondition()`
#'
#' @keywords internal
#' @noRd
signal_context_info <- function(nframe) {
first <- sys_body(nframe)
if (is_same_body(first, body(.handleSimpleError))) {
if (is_same_body(sys_body(nframe - 1), body(stop))) {
return(list(type = "stop_message", depth = nframe - 2))
} else if (is_same_body(sys_body(nframe - 4), body(.signalSimpleWarning))) {
return(list(type = "warning_promoted", depth = nframe - 6))
} else {
return(list(type = "stop_native", depth = nframe - 1))
}
}
if (is_same_body(first, body(stop))) {
if (is_same_body(sys_body(nframe - 1), body(abort))) {
return(list(type = "stop_rlang", depth = nframe - 2))
} else {
return(list(type = "stop_condition", depth = nframe - 1))
}
}
if (is_same_body(first, body(signalCondition))) {
if (from_withrestarts(nframe - 1) && is_same_body(sys_body(nframe - 4), body(message))) {
if (is_same_body(sys_body(nframe - 5), body(inform))) {
return(list(type = "message_rlang", depth = nframe - 6))
} else {
return(list(type = "message", depth = nframe - 5))
}
} else {
return(list(type = "condition", depth = nframe - 1))
}
}
if (from_withrestarts(nframe)) {
withrestarts_caller <- sys_body(nframe - 3)
if (is_same_body(withrestarts_caller, body(.signalSimpleWarning))) {
if (is_same_body(sys_body(nframe - 4), body(warning))) {
return(list(type = "warning_message", depth = nframe - 5))
} else {
return(list(type = "warning_native", depth = nframe - 4))
}
} else if (is_same_body(withrestarts_caller, body(warning))) {
if (is_same_body(sys_body(nframe - 4), body(warn))) {
return(list(type = "warning_rlang", depth = nframe - 5))
} else {
return(list(type = "warning_condition", depth = nframe - 4))
}
}
}
list(type = "unknown", depth = nframe)
}
from_withrestarts <- function(nframe) {
is_call(sys.call(nframe), "doWithOneRestart") &&
is_same_body(sys_body(nframe - 2), body(withRestarts))
}
sys_body <- function(n) {
body(sys.function(n))
}
entrace_handle_top <- function(trace) {
# Happens with ctrl-c at top-level
if (!trace_length(trace)) {
return()
}
stop_call <- sys.call(-2)
stop_frame <- sys.frame(-2)
cnd <- stop_frame$cond
# False for errors thrown from the C side
from_stop <- is_call(stop_call, "stop", ns = c("", "base"))
# No need to do anything for rlang errors
if (from_stop && inherits(cnd, "rlang_error")) {
return(NULL)
}
if (from_stop) {
if (is_null(cnd)) {
msg_call <- quote(.makeMessage(..., domain = domain))
msg <- eval_bare(msg_call, stop_frame)
} else {
msg <- cnd$message
}
} else {
msg <- geterrmessage()
}
# Save a fake rlang error containing the backtrace
err <- error_cnd(message = msg, error = cnd, trace = trace, parent = cnd)
last_error_env$cnd <- err
# Print backtrace for current error
backtrace_lines <- format_onerror_backtrace(trace)
if (length(backtrace_lines)) {
cat_line(backtrace_lines)
}
NULL
}
add_backtrace <- function() {
# Warnings don't go through when error is being handled
msg <- "Warning: `add_backtrace()` is now exported as `entrace()` as of rlang 0.3.1"
cat_line(msg, file = stderr())
entrace(bottom = sys.frame(-1))
}
#' Promote all errors to rlang errors
#'
#' @description
#'
#' `with_abort()` promotes conditions as if they were thrown with
#' [abort()]. These errors embed a [backtrace][trace_back]. They are
#' particularly suitable to be set as *parent errors* (see `parent`
#' argument of [abort()]).
#'
#' @param expr An expression run in a context where errors are
#' promoted to rlang errors.
#' @param classes Character vector of condition classes that should be
#' promoted to rlang errors.
#'
#' @details
#'
#' `with_abort()` installs a [calling handler][calling] for errors and
#' rethrows non-rlang errors with [abort()]. However, error handlers
#' installed *within* `with_abort()` have priority. For this reason,
#' you should use [tryCatch()] and [exiting] handlers outside
#' `with_abort()` rather than inside.
#'
#' @examples
#' # For cleaner backtraces:
#' options(rlang_trace_top_env = current_env())
#'
#' # with_abort() automatically casts simple errors thrown by stop()
#' # to rlang errors:
#' fn <- function() stop("Base error")
#' try(with_abort(fn()))
#' last_error()
#'
#' # with_abort() is handy for rethrowing low level errors. The
#' # backtraces are then segmented between the low level and high
#' # level contexts.
#' low_level1 <- function() low_level2()
#' low_level2 <- function() stop("Low level error")
#'
#' high_level <- function() {
#' with_handlers(
#' with_abort(low_level1()),
#' error = ~ abort("High level error", parent = .x)
#' )
#' }
#'
#' try(high_level())
#' last_error()
#' summary(last_error())
#'
#' # Reset to default
#' options(rlang_trace_top_env = NULL)
#' @export
with_abort <- function(expr, classes = "error") {
handlers <- rep_named(classes, list(entrace))
handle_call <- rlang::expr(withCallingHandlers(expr, !!!handlers))
.Call(rlang_eval, handle_call, current_env())
}
|
plot.ordPen <- function(x, whichlam = NULL, whichx = NULL, type = NULL,
xlab = NULL, ylab = NULL, main = NULL, xlim = NULL, ylim = NULL, col = NULL, ...)
{
px <- length(x$xlevels)
xgrp <- rep(1:px,x$xlevels)
tol <- .Machine$double.eps^0.5
if (is.null(whichlam))
whichlam <- 1:ncol(x$coef)
else if (!is.numeric(whichlam) | max(whichlam) > ncol(x$coef) |
any(abs(whichlam - round(whichlam)) > tol))
stop("incorrect whichlam")
if (rownames(x$coef)[1] == "intercept")
xcoefs <- x$coef[2:(length(xgrp)+1),whichlam,drop=FALSE]
else
xcoefs <- x$coef[1:length(xgrp),whichlam,drop=FALSE]
if (is.null(whichx))
whichx <- 1:px
else if (!is.numeric(whichx) | max(whichx) > px |
any(abs(whichx - round(whichx)) > tol))
stop("incorrect whichx")
if (is.null(xlab))
xlab <- "level"
if (is.null(ylab))
ylab <- "dummy coefficient"
if (is.null(type))
type <- "b"
noylims <- is.null(ylim)
nomain <- is.null(main)
nocol <- is.null(col)
multcol <- length(col) > 1
if (nocol)
cols <- grey(seq(0,0.7,length=length(whichlam)))
else if (multcol)
{
if (length(col) != length(whichlam))
stop("incorrect length(col)")
else
cols <- col
}
devAskNewPage(length(whichx)>1)
for (wx in whichx)
{
xlam <- xcoefs[xgrp==wx, ,drop=FALSE]
if (noylims)
ylim <- c(min(xlam),max(xlam))
if (nomain)
{
xname <- rownames(xlam)[1]
main <- substr(xname,1,nchar(xname)-2)
}
if (nocol | multcol)
col <- cols[1]
plot(1:nrow(xlam), xlam[,1], xlim = xlim, ylim = ylim, main = main,
xlab = xlab, ylab = ylab, type = type, col = col, ...)
if (ncol(xlam) > 1)
{
for (wl in 2:ncol(xlam))
{
if (nocol | multcol)
col <- cols[wl]
lines(1:nrow(xlam), xlam[,wl], type = type, col = col, ...)
}
}
}
}
|
/ordPens/R/plot.ordPen.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 2,201
|
r
|
plot.ordPen <- function(x, whichlam = NULL, whichx = NULL, type = NULL,
xlab = NULL, ylab = NULL, main = NULL, xlim = NULL, ylim = NULL, col = NULL, ...)
{
px <- length(x$xlevels)
xgrp <- rep(1:px,x$xlevels)
tol <- .Machine$double.eps^0.5
if (is.null(whichlam))
whichlam <- 1:ncol(x$coef)
else if (!is.numeric(whichlam) | max(whichlam) > ncol(x$coef) |
any(abs(whichlam - round(whichlam)) > tol))
stop("incorrect whichlam")
if (rownames(x$coef)[1] == "intercept")
xcoefs <- x$coef[2:(length(xgrp)+1),whichlam,drop=FALSE]
else
xcoefs <- x$coef[1:length(xgrp),whichlam,drop=FALSE]
if (is.null(whichx))
whichx <- 1:px
else if (!is.numeric(whichx) | max(whichx) > px |
any(abs(whichx - round(whichx)) > tol))
stop("incorrect whichx")
if (is.null(xlab))
xlab <- "level"
if (is.null(ylab))
ylab <- "dummy coefficient"
if (is.null(type))
type <- "b"
noylims <- is.null(ylim)
nomain <- is.null(main)
nocol <- is.null(col)
multcol <- length(col) > 1
if (nocol)
cols <- grey(seq(0,0.7,length=length(whichlam)))
else if (multcol)
{
if (length(col) != length(whichlam))
stop("incorrect length(col)")
else
cols <- col
}
devAskNewPage(length(whichx)>1)
for (wx in whichx)
{
xlam <- xcoefs[xgrp==wx, ,drop=FALSE]
if (noylims)
ylim <- c(min(xlam),max(xlam))
if (nomain)
{
xname <- rownames(xlam)[1]
main <- substr(xname,1,nchar(xname)-2)
}
if (nocol | multcol)
col <- cols[1]
plot(1:nrow(xlam), xlam[,1], xlim = xlim, ylim = ylim, main = main,
xlab = xlab, ylab = ylab, type = type, col = col, ...)
if (ncol(xlam) > 1)
{
for (wl in 2:ncol(xlam))
{
if (nocol | multcol)
col <- cols[wl]
lines(1:nrow(xlam), xlam[,wl], type = type, col = col, ...)
}
}
}
}
|
library(plyr)
library(ggplot2)
fishData <- read.delim('~/Dropbox/densitypaper/ExtractedData/CRL.txt',header=T,stringsAsFactors=F)
seqData <- read.delim('~/Dropbox/NucSeqData/htseq_CRL/CRL_rpkm.txt',header=T,stringsAsFactors=F)
seqReduced <- ddply(seqData,.(gene,total1_ex_rpkm),summarize,exonRPKM=sum(total1_ex_rpkm,total2_ex_rpkm)/2)
fishReduced <- ddply(fishData,.(gene),summarize,meanRNA=mean(cytoRNA))
tmp <- merge(fishReduced,seqReduced)
pdf('~/Dropbox/densitypaper/SupplementaryFigures/FISH_vs_Seq/rpkm_vs_fish.pdf',width=4.3,height=4)
ggplot(tmp,aes(x=meanRNA,y=exonRPKM)) +
geom_point(size=1.5) +
scale_x_log10() + scale_y_log10() +
theme_classic() +
xlab('Mean RNA Count (RNA-FISH)') + ylab('RPKM') +
theme(axis.title=element_text(size="12"), axis.text=element_text(size='12'))
dev.off()
summary(lm(exonRPKM ~ meanRNA, data=tmp))
|
/SupplementaryFigures/FISH_vs_Seq/rpkm_vs_fish.R
|
no_license
|
arjunrajlaboratory/DensityPaperDataAnalysis
|
R
| false
| false
| 855
|
r
|
library(plyr)
library(ggplot2)
fishData <- read.delim('~/Dropbox/densitypaper/ExtractedData/CRL.txt',header=T,stringsAsFactors=F)
seqData <- read.delim('~/Dropbox/NucSeqData/htseq_CRL/CRL_rpkm.txt',header=T,stringsAsFactors=F)
seqReduced <- ddply(seqData,.(gene,total1_ex_rpkm),summarize,exonRPKM=sum(total1_ex_rpkm,total2_ex_rpkm)/2)
fishReduced <- ddply(fishData,.(gene),summarize,meanRNA=mean(cytoRNA))
tmp <- merge(fishReduced,seqReduced)
pdf('~/Dropbox/densitypaper/SupplementaryFigures/FISH_vs_Seq/rpkm_vs_fish.pdf',width=4.3,height=4)
ggplot(tmp,aes(x=meanRNA,y=exonRPKM)) +
geom_point(size=1.5) +
scale_x_log10() + scale_y_log10() +
theme_classic() +
xlab('Mean RNA Count (RNA-FISH)') + ylab('RPKM') +
theme(axis.title=element_text(size="12"), axis.text=element_text(size='12'))
dev.off()
summary(lm(exonRPKM ~ meanRNA, data=tmp))
|
####################
## ObjectLs #####
####################
ObjectLs <- function(theta) {
# Description :
# Compute the value of least-squares object function.
# Usage :
# ObjectLs(theta)
# Arguments :
# theta : A vector of dimension p.
# Returns :
# A real value of the least-squares objective function.
if(Test.case=="gaussian") {
loss <- as.vector(y - X %*% theta)
derivative <- as.vector(t(X) %*% (y - X %*% theta))
result <- (qnorm(2, loss) ^ 2- qnorm(2, y) ^ 2)
return(result)
} else {
loss <- sum(y * as.vector(X %*% theta) - bFunction(X, theta))
tune.vector <- as.vector(t(y - MeanFunction(X, theta)) %*% X)
regularization <- qnorm(2, theta) ^ 2
result <- -loss
return(result)
}
}
|
/Additional functions/ObjectLs.R
|
permissive
|
LedererLab/tridge
|
R
| false
| false
| 784
|
r
|
####################
## ObjectLs #####
####################
ObjectLs <- function(theta) {
# Description :
# Compute the value of least-squares object function.
# Usage :
# ObjectLs(theta)
# Arguments :
# theta : A vector of dimension p.
# Returns :
# A real value of the least-squares objective function.
if(Test.case=="gaussian") {
loss <- as.vector(y - X %*% theta)
derivative <- as.vector(t(X) %*% (y - X %*% theta))
result <- (qnorm(2, loss) ^ 2- qnorm(2, y) ^ 2)
return(result)
} else {
loss <- sum(y * as.vector(X %*% theta) - bFunction(X, theta))
tune.vector <- as.vector(t(y - MeanFunction(X, theta)) %*% X)
regularization <- qnorm(2, theta) ^ 2
result <- -loss
return(result)
}
}
|
recurse.rbind = function(fun,dframe,cols){
if (length(cols) == 1) {
cols = cols[[1]]
tmp = list()
k = 1
for (cl in unique(dframe[,cols])){
tmp[[k]] = fun(dframe[dframe[,cols]==cl,])
k = k+1
}
ret = do.call(rbind,tmp)
} else {
col = cols[[1]]
tmp = list()
k = 1
for (cl in unique(dframe[,col])){
tmp[[k]] = recurse.rbind(fun,dframe[dframe[,col]==cl,],cols[2:length(cols)])
k = k+1
}
ret = do.call(rbind,tmp)
}
return(ret)
}
|
/R/tools.R
|
no_license
|
dill/inlabru
|
R
| false
| false
| 505
|
r
|
recurse.rbind = function(fun,dframe,cols){
if (length(cols) == 1) {
cols = cols[[1]]
tmp = list()
k = 1
for (cl in unique(dframe[,cols])){
tmp[[k]] = fun(dframe[dframe[,cols]==cl,])
k = k+1
}
ret = do.call(rbind,tmp)
} else {
col = cols[[1]]
tmp = list()
k = 1
for (cl in unique(dframe[,col])){
tmp[[k]] = recurse.rbind(fun,dframe[dframe[,col]==cl,],cols[2:length(cols)])
k = k+1
}
ret = do.call(rbind,tmp)
}
return(ret)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fnxs_general.R
\name{burns.data}
\alias{burns.data}
\title{A dataset containing the fitness values for recombinant poliovirus viruses.}
\format{A list of 5 elements containing integers (1 or 0) that indicate the absence or presence of mutated alleles at 4 loci and a numerical element with relative fitness values.}
\usage{
data(burns.data)
}
\description{
A dataset containing the fitness values for recombinant poliovirus viruses.
}
\references{
Burns, C.C., Shaw, J., Campagnoli, R., Jorba, J., Vincent, A., Quay, J., and Kew, O. (2006). Modulation of Poliovirus Replicative Fitness in HeLa Cells by Deoptimization of Synonymous Codon Usage in the Capsid Region. J Virol 80, 3259-3272.
}
|
/man/burns.data.Rd
|
no_license
|
jtvanleuven/Stickbreaker
|
R
| false
| true
| 770
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fnxs_general.R
\name{burns.data}
\alias{burns.data}
\title{A dataset containing the fitness values for recombinant poliovirus viruses.}
\format{A list of 5 elements containing integers (1 or 0) that indicate the absence or presence of mutated alleles at 4 loci and a numerical element with relative fitness values.}
\usage{
data(burns.data)
}
\description{
A dataset containing the fitness values for recombinant poliovirus viruses.
}
\references{
Burns, C.C., Shaw, J., Campagnoli, R., Jorba, J., Vincent, A., Quay, J., and Kew, O. (2006). Modulation of Poliovirus Replicative Fitness in HeLa Cells by Deoptimization of Synonymous Codon Usage in the Capsid Region. J Virol 80, 3259-3272.
}
|
\name{subset.fdata}
\Rdversion{1.1}
\alias{subset.fdata}
\title{ Subsetting }
\description{
Return subsets of \code{fdata} which meet conditions.
}
\usage{
\method{subset}{fdata}(x, subset, select, drop = TRUE,\ldots)
%\method{subset}{lfdata}(x, subset, select, drop = TRUE,\ldots)
}
\arguments{
\item{x}{ object to be subsetted (\code{fdata} class).}
\item{subset}{ logical expression indicating elements or rows to keep.}
\item{select}{ logical expression indicating points or columns to keep.}
\item{drop}{ passed on to \code{[} indexing operator.}
\item{\ldots}{further arguments to be passed to or from other methods.}
}
\value{
An object similar to \code{x} contain just the selected elements.
}
\seealso{
See \code{\link{subset}} and \code{\link{fdata}}.
}
%\keyword{multivariate}
|
/man/subset.fdata.Rd
|
no_license
|
dgorbachev/fda.usc
|
R
| false
| false
| 835
|
rd
|
\name{subset.fdata}
\Rdversion{1.1}
\alias{subset.fdata}
\title{ Subsetting }
\description{
Return subsets of \code{fdata} which meet conditions.
}
\usage{
\method{subset}{fdata}(x, subset, select, drop = TRUE,\ldots)
%\method{subset}{lfdata}(x, subset, select, drop = TRUE,\ldots)
}
\arguments{
\item{x}{ object to be subsetted (\code{fdata} class).}
\item{subset}{ logical expression indicating elements or rows to keep.}
\item{select}{ logical expression indicating points or columns to keep.}
\item{drop}{ passed on to \code{[} indexing operator.}
\item{\ldots}{further arguments to be passed to or from other methods.}
}
\value{
An object similar to \code{x} contain just the selected elements.
}
\seealso{
See \code{\link{subset}} and \code{\link{fdata}}.
}
%\keyword{multivariate}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudsearch_operations.R
\name{cloudsearch_define_index_field}
\alias{cloudsearch_define_index_field}
\title{Configures an IndexField for the search domain}
\usage{
cloudsearch_define_index_field(DomainName, IndexField)
}
\arguments{
\item{DomainName}{[required]}
\item{IndexField}{[required] The index field and field options you want to configure.}
}
\description{
Configures an \verb{<a>IndexField</a>} for the search domain. Used to create
new fields and modify existing ones. You must specify the name of the
domain you are configuring and an index field configuration. The index
field configuration specifies a unique name, the index field type, and
the options you want to configure for the field. The options you can
specify depend on the \verb{<a>IndexFieldType</a>}. If the field exists, the
new configuration replaces the old one. For more information, see
\href{https://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-index-fields.html}{Configuring Index Fields}
in the \emph{Amazon CloudSearch Developer Guide}.
}
\section{Request syntax}{
\preformatted{svc$define_index_field(
DomainName = "string",
IndexField = list(
IndexFieldName = "string",
IndexFieldType = "int"|"double"|"literal"|"text"|"date"|"latlon"|"int-array"|"double-array"|"literal-array"|"text-array"|"date-array",
IntOptions = list(
DefaultValue = 123,
SourceField = "string",
FacetEnabled = TRUE|FALSE,
SearchEnabled = TRUE|FALSE,
ReturnEnabled = TRUE|FALSE,
SortEnabled = TRUE|FALSE
),
DoubleOptions = list(
DefaultValue = 123.0,
SourceField = "string",
FacetEnabled = TRUE|FALSE,
SearchEnabled = TRUE|FALSE,
ReturnEnabled = TRUE|FALSE,
SortEnabled = TRUE|FALSE
),
LiteralOptions = list(
DefaultValue = "string",
SourceField = "string",
FacetEnabled = TRUE|FALSE,
SearchEnabled = TRUE|FALSE,
ReturnEnabled = TRUE|FALSE,
SortEnabled = TRUE|FALSE
),
TextOptions = list(
DefaultValue = "string",
SourceField = "string",
ReturnEnabled = TRUE|FALSE,
SortEnabled = TRUE|FALSE,
HighlightEnabled = TRUE|FALSE,
AnalysisScheme = "string"
),
DateOptions = list(
DefaultValue = "string",
SourceField = "string",
FacetEnabled = TRUE|FALSE,
SearchEnabled = TRUE|FALSE,
ReturnEnabled = TRUE|FALSE,
SortEnabled = TRUE|FALSE
),
LatLonOptions = list(
DefaultValue = "string",
SourceField = "string",
FacetEnabled = TRUE|FALSE,
SearchEnabled = TRUE|FALSE,
ReturnEnabled = TRUE|FALSE,
SortEnabled = TRUE|FALSE
),
IntArrayOptions = list(
DefaultValue = 123,
SourceFields = "string",
FacetEnabled = TRUE|FALSE,
SearchEnabled = TRUE|FALSE,
ReturnEnabled = TRUE|FALSE
),
DoubleArrayOptions = list(
DefaultValue = 123.0,
SourceFields = "string",
FacetEnabled = TRUE|FALSE,
SearchEnabled = TRUE|FALSE,
ReturnEnabled = TRUE|FALSE
),
LiteralArrayOptions = list(
DefaultValue = "string",
SourceFields = "string",
FacetEnabled = TRUE|FALSE,
SearchEnabled = TRUE|FALSE,
ReturnEnabled = TRUE|FALSE
),
TextArrayOptions = list(
DefaultValue = "string",
SourceFields = "string",
ReturnEnabled = TRUE|FALSE,
HighlightEnabled = TRUE|FALSE,
AnalysisScheme = "string"
),
DateArrayOptions = list(
DefaultValue = "string",
SourceFields = "string",
FacetEnabled = TRUE|FALSE,
SearchEnabled = TRUE|FALSE,
ReturnEnabled = TRUE|FALSE
)
)
)
}
}
\keyword{internal}
|
/cran/paws.analytics/man/cloudsearch_define_index_field.Rd
|
permissive
|
sanchezvivi/paws
|
R
| false
| true
| 3,767
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudsearch_operations.R
\name{cloudsearch_define_index_field}
\alias{cloudsearch_define_index_field}
\title{Configures an IndexField for the search domain}
\usage{
cloudsearch_define_index_field(DomainName, IndexField)
}
\arguments{
\item{DomainName}{[required]}
\item{IndexField}{[required] The index field and field options you want to configure.}
}
\description{
Configures an \verb{<a>IndexField</a>} for the search domain. Used to create
new fields and modify existing ones. You must specify the name of the
domain you are configuring and an index field configuration. The index
field configuration specifies a unique name, the index field type, and
the options you want to configure for the field. The options you can
specify depend on the \verb{<a>IndexFieldType</a>}. If the field exists, the
new configuration replaces the old one. For more information, see
\href{https://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-index-fields.html}{Configuring Index Fields}
in the \emph{Amazon CloudSearch Developer Guide}.
}
\section{Request syntax}{
\preformatted{svc$define_index_field(
DomainName = "string",
IndexField = list(
IndexFieldName = "string",
IndexFieldType = "int"|"double"|"literal"|"text"|"date"|"latlon"|"int-array"|"double-array"|"literal-array"|"text-array"|"date-array",
IntOptions = list(
DefaultValue = 123,
SourceField = "string",
FacetEnabled = TRUE|FALSE,
SearchEnabled = TRUE|FALSE,
ReturnEnabled = TRUE|FALSE,
SortEnabled = TRUE|FALSE
),
DoubleOptions = list(
DefaultValue = 123.0,
SourceField = "string",
FacetEnabled = TRUE|FALSE,
SearchEnabled = TRUE|FALSE,
ReturnEnabled = TRUE|FALSE,
SortEnabled = TRUE|FALSE
),
LiteralOptions = list(
DefaultValue = "string",
SourceField = "string",
FacetEnabled = TRUE|FALSE,
SearchEnabled = TRUE|FALSE,
ReturnEnabled = TRUE|FALSE,
SortEnabled = TRUE|FALSE
),
TextOptions = list(
DefaultValue = "string",
SourceField = "string",
ReturnEnabled = TRUE|FALSE,
SortEnabled = TRUE|FALSE,
HighlightEnabled = TRUE|FALSE,
AnalysisScheme = "string"
),
DateOptions = list(
DefaultValue = "string",
SourceField = "string",
FacetEnabled = TRUE|FALSE,
SearchEnabled = TRUE|FALSE,
ReturnEnabled = TRUE|FALSE,
SortEnabled = TRUE|FALSE
),
LatLonOptions = list(
DefaultValue = "string",
SourceField = "string",
FacetEnabled = TRUE|FALSE,
SearchEnabled = TRUE|FALSE,
ReturnEnabled = TRUE|FALSE,
SortEnabled = TRUE|FALSE
),
IntArrayOptions = list(
DefaultValue = 123,
SourceFields = "string",
FacetEnabled = TRUE|FALSE,
SearchEnabled = TRUE|FALSE,
ReturnEnabled = TRUE|FALSE
),
DoubleArrayOptions = list(
DefaultValue = 123.0,
SourceFields = "string",
FacetEnabled = TRUE|FALSE,
SearchEnabled = TRUE|FALSE,
ReturnEnabled = TRUE|FALSE
),
LiteralArrayOptions = list(
DefaultValue = "string",
SourceFields = "string",
FacetEnabled = TRUE|FALSE,
SearchEnabled = TRUE|FALSE,
ReturnEnabled = TRUE|FALSE
),
TextArrayOptions = list(
DefaultValue = "string",
SourceFields = "string",
ReturnEnabled = TRUE|FALSE,
HighlightEnabled = TRUE|FALSE,
AnalysisScheme = "string"
),
DateArrayOptions = list(
DefaultValue = "string",
SourceFields = "string",
FacetEnabled = TRUE|FALSE,
SearchEnabled = TRUE|FALSE,
ReturnEnabled = TRUE|FALSE
)
)
)
}
}
\keyword{internal}
|
\name{exboxes}
\alias{exboxes}
\docType{data}
\title{Example Box Sequence Object}
\description{
An example box sequence for demonstrating \pkg{sdtoolkit} functions that apply to box sequence objects. As of now these other functions are \code{seqinfo} and \code{dimplot}.
}
\usage{data(exboxes)}
\format{
The format is that of a box sequence object as output by \code{sdprim}. Please the \sQuote{Value} section of the documentation for \code{\link{sdprim}} for additional details.
}
%\details{
% ~~ If necessary, more details than the __description__ above ~~
%}
\source{
The object was generated by running \code{sdprim} on the common dataset \code{quakes}, with the fourth varable (\code{mag}) used as the output variable, thresholded at 5.0.
}
%\references{
% ~~ possibly secondary sources and usages ~~
%}
\examples{
data(exboxes)
}
\keyword{datasets}
|
/man/exboxes.rd
|
no_license
|
cran/sdtoolkit
|
R
| false
| false
| 886
|
rd
|
\name{exboxes}
\alias{exboxes}
\docType{data}
\title{Example Box Sequence Object}
\description{
An example box sequence for demonstrating \pkg{sdtoolkit} functions that apply to box sequence objects. As of now these other functions are \code{seqinfo} and \code{dimplot}.
}
\usage{data(exboxes)}
\format{
The format is that of a box sequence object as output by \code{sdprim}. Please the \sQuote{Value} section of the documentation for \code{\link{sdprim}} for additional details.
}
%\details{
% ~~ If necessary, more details than the __description__ above ~~
%}
\source{
The object was generated by running \code{sdprim} on the common dataset \code{quakes}, with the fourth varable (\code{mag}) used as the output variable, thresholded at 5.0.
}
%\references{
% ~~ possibly secondary sources and usages ~~
%}
\examples{
data(exboxes)
}
\keyword{datasets}
|
library(sarima)
### Name: arma_Q0Gardner
### Title: Computing the initial state covariance matrix of ARMA
### Aliases: arma_Q0Gardner arma_Q0bis arma_Q0naive arma_Q0gnbR
### Keywords: arma arima htest
### ** Examples
## arma_Q0Gardner(phi, theta, tol = .Machine$double.eps)
## arma_Q0bis(phi, theta, tol = .Machine$double.eps)
|
/data/genthat_extracted_code/sarima/examples/arma_Q0Gardner.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 334
|
r
|
library(sarima)
### Name: arma_Q0Gardner
### Title: Computing the initial state covariance matrix of ARMA
### Aliases: arma_Q0Gardner arma_Q0bis arma_Q0naive arma_Q0gnbR
### Keywords: arma arima htest
### ** Examples
## arma_Q0Gardner(phi, theta, tol = .Machine$double.eps)
## arma_Q0bis(phi, theta, tol = .Machine$double.eps)
|
library(sas7bdat)
library(nnet)
library(optmatch)
setwd("U:/")
source("simulation analysis/functionsMatching_6.R")
########
# DATA #
########
#Load data and construction analytic file
datH1 <- read.sas7bdat(file = "trauma_25_75_h1.sas7bdat")
datH2 <- read.sas7bdat(file = "trauma_25_75_h2.sas7bdat")
table(datH1$HOSP_TRAUMA,exclude = NULL)
table(datH2$HOSP_TRAUMA,exclude = NULL)
dat <- rbind(datH1,datH2[datH2$HOSP_TRAUMA %in% 0,])
#Treatment variable
table(dat$HOSP_TRAUMA,exclude = NULL)
dat$HOSP_TRAUMA <- factor(dat$HOSP_TRAUMA, levels = c(0, 1, 2))
# summary(dat$iss)
# summary(dat$AGE)
# summary(dat$chronic)
# summary(dat$income_2)
# summary(dat$income_3)
# summary(dat$income_4)
# summary(dat$pay1_1)
# summary(dat$pay1_2)
# summary(dat$pay1_4)
# summary(dat$pay1_5)
# summary(dat$pay1_6)
#
# summary(dat$nchs_2)
# summary(dat$nchs_3)
# summary(dat$nchs_4)
# summary(dat$nchs_5)
# summary(dat$nchs_6)
# summary(dat$multiple_injury)
# #summary(dat$MULTINJURY)
# summary(dat$FEMALE)
####################
# Propensity score #
####################
formulaPropScore <- formula(HOSP_TRAUMA ~ iss + AGE +
income_2 + income_3 + chronic + multiple_injury + income_4 +
pay1_1 + pay1_2 + pay1_4 + pay1_5 + pay1_6 +
nchs_2 + nchs_3 + nchs_4 + nchs_5 + nchs_6+
FEMALE)
propScoreModel <- multinom(formulaPropScore, family=binomial(link=logit),data=dat)
probabilitiesPS <- predict(propScoreModel, type = "probs")
dat$logit1vs0 <- log(probabilitiesPS[,"1"]/probabilitiesPS[,"0"])
dat$logit2vs0 <- log(probabilitiesPS[,"2"]/probabilitiesPS[,"0"])
dat$prob0 <- probabilitiesPS[,"0"]
dat$prob1 <- probabilitiesPS[,"1"]
dat$prob2 <- probabilitiesPS[,"2"]
rm(propScoreModel, probabilitiesPS)
rm(datH1, datH2)
gc(reset=T)
#Info about propensity score
#----------------------------
# plot(density(dat$prob0))
# plot(density(dat$prob1))
# plot(density(dat$prob2))
############
# Matching #
############
variableTreatment <- "HOSP_TRAUMA"
variablesMatch <- c("logit1vs0","logit2vs0")
#3-way constrained optimal matching
#----------------------
#3-way optimal matching - change of starting edge
start <- Sys.time()
startingEdges <- c("0-2","1-2","0-1")
options("optmatch_max_problem_size" = Inf)
dat$HOSP_TRAUMA <- as.numeric(as.character(dat$HOSP_TRAUMA))
for(iter in 1:3) {
result3wayConstr <- applyModifiedOptMatching(data = dat, startingEdges[iter],
variablesMatch = variablesMatch,
variableTreatment = variableTreatment)
endTemp <- Sys.time()
print(endTemp - start)
save(result3wayConstr, file = paste("bestResult_3wayconstr_allData_",startingEdges[iter],".Rdata",sep=""))
}
end <- Sys.time()
end - start
|
/codes/analysis NEDS/matching.R
|
permissive
|
jasa-acs/Triplet-Matching-for-Estimating-Causal-Effects-With-Three-Treatment-Arms-A-Comparative-Study-of-M...
|
R
| false
| false
| 2,849
|
r
|
library(sas7bdat)
library(nnet)
library(optmatch)
setwd("U:/")
source("simulation analysis/functionsMatching_6.R")
########
# DATA #
########
#Load data and construction analytic file
datH1 <- read.sas7bdat(file = "trauma_25_75_h1.sas7bdat")
datH2 <- read.sas7bdat(file = "trauma_25_75_h2.sas7bdat")
table(datH1$HOSP_TRAUMA,exclude = NULL)
table(datH2$HOSP_TRAUMA,exclude = NULL)
dat <- rbind(datH1,datH2[datH2$HOSP_TRAUMA %in% 0,])
#Treatment variable
table(dat$HOSP_TRAUMA,exclude = NULL)
dat$HOSP_TRAUMA <- factor(dat$HOSP_TRAUMA, levels = c(0, 1, 2))
# summary(dat$iss)
# summary(dat$AGE)
# summary(dat$chronic)
# summary(dat$income_2)
# summary(dat$income_3)
# summary(dat$income_4)
# summary(dat$pay1_1)
# summary(dat$pay1_2)
# summary(dat$pay1_4)
# summary(dat$pay1_5)
# summary(dat$pay1_6)
#
# summary(dat$nchs_2)
# summary(dat$nchs_3)
# summary(dat$nchs_4)
# summary(dat$nchs_5)
# summary(dat$nchs_6)
# summary(dat$multiple_injury)
# #summary(dat$MULTINJURY)
# summary(dat$FEMALE)
####################
# Propensity score #
####################
formulaPropScore <- formula(HOSP_TRAUMA ~ iss + AGE +
income_2 + income_3 + chronic + multiple_injury + income_4 +
pay1_1 + pay1_2 + pay1_4 + pay1_5 + pay1_6 +
nchs_2 + nchs_3 + nchs_4 + nchs_5 + nchs_6+
FEMALE)
propScoreModel <- multinom(formulaPropScore, family=binomial(link=logit),data=dat)
probabilitiesPS <- predict(propScoreModel, type = "probs")
dat$logit1vs0 <- log(probabilitiesPS[,"1"]/probabilitiesPS[,"0"])
dat$logit2vs0 <- log(probabilitiesPS[,"2"]/probabilitiesPS[,"0"])
dat$prob0 <- probabilitiesPS[,"0"]
dat$prob1 <- probabilitiesPS[,"1"]
dat$prob2 <- probabilitiesPS[,"2"]
rm(propScoreModel, probabilitiesPS)
rm(datH1, datH2)
gc(reset=T)
#Info about propensity score
#----------------------------
# plot(density(dat$prob0))
# plot(density(dat$prob1))
# plot(density(dat$prob2))
############
# Matching #
############
variableTreatment <- "HOSP_TRAUMA"
variablesMatch <- c("logit1vs0","logit2vs0")
#3-way constrained optimal matching
#----------------------
#3-way optimal matching - change of starting edge
start <- Sys.time()
startingEdges <- c("0-2","1-2","0-1")
options("optmatch_max_problem_size" = Inf)
dat$HOSP_TRAUMA <- as.numeric(as.character(dat$HOSP_TRAUMA))
for(iter in 1:3) {
result3wayConstr <- applyModifiedOptMatching(data = dat, startingEdges[iter],
variablesMatch = variablesMatch,
variableTreatment = variableTreatment)
endTemp <- Sys.time()
print(endTemp - start)
save(result3wayConstr, file = paste("bestResult_3wayconstr_allData_",startingEdges[iter],".Rdata",sep=""))
}
end <- Sys.time()
end - start
|
library(NPS)
### Name: npc
### Title: Create Net Promoter Categories from Likelihood to Recommend
### Scores
### Aliases: npc
### ** Examples
# The command below will generate Net Promoter categories for each point
# on a standard 0:10 Likelihood to Recommend scale
npc(0:10)
# Here's how scores and categories map out. Notice that scores which are
# 'off the scale' drop out as missing/invalid
data.frame(score = -2:12, category = npc(-2:12))
# When you have lots of data, summaries are useful
rec <- sample(0:10, prob=c(0.02, 0.01, 0.01, 0.01, 0.01, 0.03, 0.03, 0.09,
0.22, 0.22, 0.35), 1000, replace=TRUE)
# A Histrogram of the Likelihood to Recommend scores we just generated
hist(rec, breaks=-1:10)
# A look at the by nps category using summary
summary(npc(rec))
# As above
table(npc(rec))
# As a crosstabulation
table(rec, npc(rec))
nps(rec)
|
/data/genthat_extracted_code/NPS/examples/npc.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 870
|
r
|
library(NPS)
### Name: npc
### Title: Create Net Promoter Categories from Likelihood to Recommend
### Scores
### Aliases: npc
### ** Examples
# The command below will generate Net Promoter categories for each point
# on a standard 0:10 Likelihood to Recommend scale
npc(0:10)
# Here's how scores and categories map out. Notice that scores which are
# 'off the scale' drop out as missing/invalid
data.frame(score = -2:12, category = npc(-2:12))
# When you have lots of data, summaries are useful
rec <- sample(0:10, prob=c(0.02, 0.01, 0.01, 0.01, 0.01, 0.03, 0.03, 0.09,
0.22, 0.22, 0.35), 1000, replace=TRUE)
# A Histrogram of the Likelihood to Recommend scores we just generated
hist(rec, breaks=-1:10)
# A look at the by nps category using summary
summary(npc(rec))
# As above
table(npc(rec))
# As a crosstabulation
table(rec, npc(rec))
nps(rec)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_write.R
\name{Plum_runs}
\alias{Plum_runs}
\title{List the folders present in the current core directory.}
\usage{
Plum_runs(coredir = get("info")$coredir)
}
\arguments{
\item{coredir}{The directory where the Bacon runs reside. Defaults to \code{coredir="Plum_runs"}.}
}
\value{
A list of folders
}
\description{
Lists all folders located within the core's directory.
}
\details{
The directory is either "Plum_runs", "Cores" or a custom-named one.
}
\seealso{
\url{http://www.qub.ac.uk/chrono/blaauw/manualBacon_2.3.pdf}
}
\author{
Maarten Blaauw, J. Andres Christen
}
|
/fuzzedpackages/rplum/man/Plum_runs.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| true
| 652
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_write.R
\name{Plum_runs}
\alias{Plum_runs}
\title{List the folders present in the current core directory.}
\usage{
Plum_runs(coredir = get("info")$coredir)
}
\arguments{
\item{coredir}{The directory where the Bacon runs reside. Defaults to \code{coredir="Plum_runs"}.}
}
\value{
A list of folders
}
\description{
Lists all folders located within the core's directory.
}
\details{
The directory is either "Plum_runs", "Cores" or a custom-named one.
}
\seealso{
\url{http://www.qub.ac.uk/chrono/blaauw/manualBacon_2.3.pdf}
}
\author{
Maarten Blaauw, J. Andres Christen
}
|
testlist <- list(x = NA_integer_, y = c(NA, -58666L, NA, 30464L))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609961300-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 139
|
r
|
testlist <- list(x = NA_integer_, y = c(NA, -58666L, NA, 30464L))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
setwd("I:\NTL\files\exdata_data_household_power_consumption")
##reading data from the file
data_full<-read.csv("household_power_consumption.txt", header=T, sep=';', na.string="?" ,nrow=2075259)
## subset the data according to the time
data1 <- subset(data_full, Date %in% c("1/2/2007","2/2/2007"))
#formating the date
data1$Date <- as.Date(data1$Date, format="%d/%m/%Y")
datetime <- paste(as.Date(data1$Date), data1$Time)
data1$Datetime <- as.POSIXct(datetime)
##the second plot
with(data1, {
plot(Global_active_power~Datetime, type="l",ylab="Global Active Power (kilowatts)", xlab="")})
dev.copy(png,"plot2.png",height=480, width=480)
dev.off()
|
/plot2.R
|
no_license
|
hagar912/Exploratory-Data-analysis-Project
|
R
| false
| false
| 650
|
r
|
setwd("I:\NTL\files\exdata_data_household_power_consumption")
##reading data from the file
data_full<-read.csv("household_power_consumption.txt", header=T, sep=';', na.string="?" ,nrow=2075259)
## subset the data according to the time
data1 <- subset(data_full, Date %in% c("1/2/2007","2/2/2007"))
#formating the date
data1$Date <- as.Date(data1$Date, format="%d/%m/%Y")
datetime <- paste(as.Date(data1$Date), data1$Time)
data1$Datetime <- as.POSIXct(datetime)
##the second plot
with(data1, {
plot(Global_active_power~Datetime, type="l",ylab="Global Active Power (kilowatts)", xlab="")})
dev.copy(png,"plot2.png",height=480, width=480)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vpcstats2.R
\name{plot.vpcstatsobj}
\alias{plot.vpcstatsobj}
\title{Plot a \code{vpcstatsobj}.}
\usage{
\method{plot}{vpcstatsobj}(
x,
...,
show.points = TRUE,
show.boundaries = TRUE,
show.stats = !is.null(x$stats),
show.binning = isFALSE(show.stats),
xlab = NULL,
ylab = NULL,
color = c("red", "blue", "red"),
linetype = c("dotted", "solid", "dashed"),
legend.position = "top",
facet.scales = "free"
)
}
\arguments{
\item{x}{An object.}
\item{...}{Further arguments can be specified but are ignored.}
\item{show.points}{Should the observed data points be plotted?}
\item{show.boundaries}{Should the bin boundary be displayed?}
\item{show.stats}{Should the VPC stats be displayed?}
\item{show.binning}{Should the binning be displayed by coloring the observed data points by bin?}
\item{xlab}{A character label for the x-axis.}
\item{ylab}{A character label for the y-axis.}
\item{color}{A character vector of colors for the percentiles, from low to high.}
\item{linetype}{A character vector of linetyps for the percentiles, from low to high.}
\item{legend.position}{A character string specifying the position of the legend.}
\item{facet.scales}{A character string specifying the `scales` argument to use for facetting.}
}
\value{
A `ggplot` object.
}
\description{
Plot a \code{vpcstatsobj}.
}
\seealso{
\code{ggplot}
}
|
/man/plot.vpcstatsobj.Rd
|
no_license
|
benjaminrich/vpcstats
|
R
| false
| true
| 1,433
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vpcstats2.R
\name{plot.vpcstatsobj}
\alias{plot.vpcstatsobj}
\title{Plot a \code{vpcstatsobj}.}
\usage{
\method{plot}{vpcstatsobj}(
x,
...,
show.points = TRUE,
show.boundaries = TRUE,
show.stats = !is.null(x$stats),
show.binning = isFALSE(show.stats),
xlab = NULL,
ylab = NULL,
color = c("red", "blue", "red"),
linetype = c("dotted", "solid", "dashed"),
legend.position = "top",
facet.scales = "free"
)
}
\arguments{
\item{x}{An object.}
\item{...}{Further arguments can be specified but are ignored.}
\item{show.points}{Should the observed data points be plotted?}
\item{show.boundaries}{Should the bin boundary be displayed?}
\item{show.stats}{Should the VPC stats be displayed?}
\item{show.binning}{Should the binning be displayed by coloring the observed data points by bin?}
\item{xlab}{A character label for the x-axis.}
\item{ylab}{A character label for the y-axis.}
\item{color}{A character vector of colors for the percentiles, from low to high.}
\item{linetype}{A character vector of linetyps for the percentiles, from low to high.}
\item{legend.position}{A character string specifying the position of the legend.}
\item{facet.scales}{A character string specifying the `scales` argument to use for facetting.}
}
\value{
A `ggplot` object.
}
\description{
Plot a \code{vpcstatsobj}.
}
\seealso{
\code{ggplot}
}
|
# ' Curve of the BIC for each possible p2 with a fixed Z and truncature of Z
# ' @param X matrix containing the dataset
# ' @param Z adjacency matrix (binary) describing the structure between the variables
# ' @param Bic_null_vect vector of the BIC for each variable. used when the variable is independant
# ' @param plot boolean to plot or not the curve
# ' @param star boolean to use BIC* (hierarchical uniform law on the structure)
# ' @param trunc number of sub-regression to keep (best R2). if NULL the min of BIC is kept
# ' @export
BicZcurve<-function(X=X,Z=Z,Bic_null_vect=Bic_null_vect,plot=T,star=F,trunc=NULL){
p2=sum(colSums(Z)!=0)
if(is.null(Bic_null_vect)){
Bic_null_vect=density_estimation(X=X)$BIC_vect
}
curve=sum(BicZ(X=X,Z=0*Z,Bic_null_vect=Bic_null_vect,star=star))
if(p2>0){
I2=which(colSums(Z)!=0)
sigmavect=R2Z(Z=Z,X=X,crit="R2",adj=T)
ordre=order(sigmavect[I2],decreasing=T)
for (i in 1:p2){
Zloc=Z;Zloc[,-I2[ordre[1:i]]]=0
curve=c(curve,sum(BicZ(X=X,Z=Zloc,Bic_null_vect=Bic_null_vect,star=star)))
}
if(plot){
plot(curve[-1])
abline(h=curve[1])
}
}
quimin=which.min(curve)
if(quimin>1){
quimin=quimin-1
Zopt=Z;Zopt[,-I2[ordre[1:quimin]]]=0
if(plot){
abline(v=quimin)
}
}else{
Zopt=0*Z
}
if(!is.null(trunc)){
trunc=min(trunc,p2)
trunc=max(0,trunc)
Zopt=Z;Zopt[,-I2[ordre[1:trunc]]]=0
if(plot){
abline(v=trunc,col="red")
}
}
return(list(curve=curve,Zopt=Zopt))
}
|
/CorReg/R/BicZcurve.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 1,641
|
r
|
# ' Curve of the BIC for each possible p2 with a fixed Z and truncature of Z
# ' @param X matrix containing the dataset
# ' @param Z adjacency matrix (binary) describing the structure between the variables
# ' @param Bic_null_vect vector of the BIC for each variable. used when the variable is independant
# ' @param plot boolean to plot or not the curve
# ' @param star boolean to use BIC* (hierarchical uniform law on the structure)
# ' @param trunc number of sub-regression to keep (best R2). if NULL the min of BIC is kept
# ' @export
BicZcurve<-function(X=X,Z=Z,Bic_null_vect=Bic_null_vect,plot=T,star=F,trunc=NULL){
p2=sum(colSums(Z)!=0)
if(is.null(Bic_null_vect)){
Bic_null_vect=density_estimation(X=X)$BIC_vect
}
curve=sum(BicZ(X=X,Z=0*Z,Bic_null_vect=Bic_null_vect,star=star))
if(p2>0){
I2=which(colSums(Z)!=0)
sigmavect=R2Z(Z=Z,X=X,crit="R2",adj=T)
ordre=order(sigmavect[I2],decreasing=T)
for (i in 1:p2){
Zloc=Z;Zloc[,-I2[ordre[1:i]]]=0
curve=c(curve,sum(BicZ(X=X,Z=Zloc,Bic_null_vect=Bic_null_vect,star=star)))
}
if(plot){
plot(curve[-1])
abline(h=curve[1])
}
}
quimin=which.min(curve)
if(quimin>1){
quimin=quimin-1
Zopt=Z;Zopt[,-I2[ordre[1:quimin]]]=0
if(plot){
abline(v=quimin)
}
}else{
Zopt=0*Z
}
if(!is.null(trunc)){
trunc=min(trunc,p2)
trunc=max(0,trunc)
Zopt=Z;Zopt[,-I2[ordre[1:trunc]]]=0
if(plot){
abline(v=trunc,col="red")
}
}
return(list(curve=curve,Zopt=Zopt))
}
|
# R script for running models
# -----------------------------------------------------------------------------
# Ready workspace
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Identify git directory and remove previous objects if not already done
if (!exists("git.dir")) {
rm(list=ls(all=TRUE))
graphics.off()
git.dir <- "E:/Hughes/Git"
# git.dir <- "C:/Users/Jim Hughes/Documents/GitRepos"
#git.dir <- "C:/Users/hugjh001/Documents"
reponame <- "len_pbpk"
scriptname <- "run_model.R"
}
# Set working directory
master.dir <- paste(git.dir, reponame, sep = "/")
setwd(master.dir)
# Source observed data
source(paste(git.dir, reponame, "rscripts",
"data_newiv.R", sep = "/"), verbose = F)
# Load package libraries
library(mrgsolve)
library(reshape2)
library(ggplot2)
# Define a custom ggplot2 theme
theme_bw2 <- theme_set(theme_bw(base_size = 16))
theme_bw2 <- theme_update(axis.text.x = element_text(angle = 35, hjust = 0.8))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Simulation Settings
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Set number of individuals that make up the 95% prediction intervals
n <- 1
# 95% prediction interval functions - calculate the 2.5th and 97.5th percentiles
CI95lo <- function(x) quantile(x, probs = 0.025)
CI95hi <- function(x) quantile(x, probs = 0.975)
# 90% prediction interval functions - calculate the 5th and 95th percentiles
CI90lo <- function(x) quantile(x, probs = 0.05)
CI90hi <- function(x) quantile(x, probs = 0.95)
# Set seed for reproducible numbers
set.seed(123456)
TIME <- seq(from = 0, to = 500, by = 0.2)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Simulation
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Source the model
source(paste(master.dir, "models", "mouse_brown.R", sep = "/"))
# Simulate concentration-time profiles for the population
ID <- 1:n
ID2 <- sort(c(rep(ID, times = length(TIME))))
times <- rep(TIME, times = length(ID))
input.simdata <- data.frame(
ID = ID2,
time = times,
amt = 0,
evid = 0,
rate = 0,
cmt = 1
)
dose.times <- 0
dosedata <- input.simdata[input.simdata$time %in% dose.times, ]
dosedata$amt <- 1
dosedata$evid <- 1
dosedata$rate <- 60
dosedata$cmt <- 1
input.simdata <- rbind(input.simdata, dosedata)
input.simdata <- input.simdata[with(input.simdata, order(ID, time)), ]
simdata <- as.data.frame(mrgsim(
data_set(brown.mod, input.simdata)
)) # mrgsim
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Plot Observed Data
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
obsdata <- subdata[c("ID", "TIME", "TADNOM", "DOSEMGKG", "DOSEMG", "WT", "PLA", "BRA",
"LVR", "MSC", "HRT", "SPL", "LUN", "KID")]
names(obsdata)[names(obsdata) == "PLA"] <- "PA"
names(obsdata)[names(obsdata) == "SPL"] <- "SPR"
names(obsdata)[names(obsdata) == "LUN"] <- "ART"
obsdata.plot <- melt(obsdata,
c("ID", "TIME", "TADNOM", "DOSEMGKG", "DOSEMG", "WT"),
variable.name = "COMP", value.name = "C"
)
obsdata.plot$CNORM <-
# dose normalised concentrations
p0 <- NULL
p0 <- ggplot(data = obsdata.plot)
p0 <- p0 + geom_point(aes(x = TIME, y = C), colour = "blue")
p0 <- p0 + facet_wrap(~ COMP, ncol = 4)
p0 <- p0 + scale_y_log10("Concentrations (ug/g)\n", labels = scales::comma)
p0 <- p0 + scale_x_continuous("\nTime (mins)")
p0
avedata <- subdata.av[c("TIME", "TADNOM", "DOSEMGKG", "DOSEMG", "WT", "PLA", "BRA",
"LVR", "MSC", "HRT", "SPL", "LUN", "KID")]
names(avedata)[names(avedata) == "PLA"] <- "PA"
names(avedata)[names(avedata) == "SPL"] <- "SPR"
names(avedata)[names(avedata) == "LUN"] <- "ART"
avedata$SPS <- avedata$SPR
avedata.plot <- melt(avedata,
c("TIME", "TADNOM", "DOSEMGKG", "DOSEMG", "WT"),
variable.name = "COMP"
)
avedata.plot$C <- avedata.plot$value/(avedata.plot$DOSEMG*10^3)
p1 <- NULL
p1 <- ggplot(data = avedata.plot)
p1 <- p1 + geom_point(aes(x = TIME, y = C), colour = "blue")
p1 <- p1 + facet_wrap(~ COMP, ncol = 4)
p1 <- p1 + scale_y_log10("Concentrations (ug/g)\n", labels = scales::comma)
p1 <- p1 + scale_x_continuous("\nTime (mins)")
p1
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Plot Simulated Data
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Determine number of model compartments
init.str <- names(brown.mod@init)
init.n <- length(init.str)
simdata.plot <- cbind(
melt(simdata[1:(2+init.n)], c("ID", "time"), variable.name = "tissue"),
melt(simdata[c(1:2, (init.n+3):(2+init.n*2))], c("ID", "time"))[-(1:3)]
)
names(simdata.plot) <- c("ID", "TIME", "COMP", "A", "C")
simdata.plot$COMP <- as.factor(simdata.plot$COMP)
levels(simdata.plot$COMP) <- c(
toupper(substr(init.str, 2, nchar(init.str)))
)
simdata.plot <- simdata.plot[simdata.plot$TIME != 0, ]
p2 <- NULL
p2 <- ggplot(data = simdata.plot[!simdata.plot$COMP %in% c("PO", "GUT", "TUBF", "TUBC", "BOD"),])
p2 <- p2 + geom_line(aes(x = TIME, y = C), colour = "blue")
p2 <- p2 + facet_wrap(~ COMP, ncol = 4)
p2 <- p2 + scale_y_log10("Concentrations (mg/L)\n", labels = scales::comma) #scale_y_log10("Concentrations (mg/mL)\n")
p2 <- p2 + scale_x_continuous("\nTime (mins)")
p2
p3 <- NULL
p3 <- ggplot(data = simdata.plot[!simdata.plot$COMP %in% c("PO", "GUT", "TUBF", "TUBC", "BOD"),])
p3 <- p3 + geom_line(aes(x = TIME, y = C), colour = "blue")
p3 <- p3 + geom_point(aes(x = TIME, y = C), data = avedata.plot, colour = "red", alpha = 0.2)
p3 <- p3 + facet_wrap(~ COMP, ncol = 4)
p3 <- p3 + scale_y_log10("Concentrations (mg/L)\n", labels = scales::comma) #scale_y_log10("Concentrations (mg/mL)\n")
p3 <- p3 + scale_x_continuous("\nTime (mins)", lim = c(0, 100))
p3
|
/rscripts/run_model_new.R
|
no_license
|
jhhughes256/len_pbpk
|
R
| false
| false
| 6,025
|
r
|
# R script for running models
# -----------------------------------------------------------------------------
# Ready workspace
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Identify git directory and remove previous objects if not already done
if (!exists("git.dir")) {
rm(list=ls(all=TRUE))
graphics.off()
git.dir <- "E:/Hughes/Git"
# git.dir <- "C:/Users/Jim Hughes/Documents/GitRepos"
#git.dir <- "C:/Users/hugjh001/Documents"
reponame <- "len_pbpk"
scriptname <- "run_model.R"
}
# Set working directory
master.dir <- paste(git.dir, reponame, sep = "/")
setwd(master.dir)
# Source observed data
source(paste(git.dir, reponame, "rscripts",
"data_newiv.R", sep = "/"), verbose = F)
# Load package libraries
library(mrgsolve)
library(reshape2)
library(ggplot2)
# Define a custom ggplot2 theme
theme_bw2 <- theme_set(theme_bw(base_size = 16))
theme_bw2 <- theme_update(axis.text.x = element_text(angle = 35, hjust = 0.8))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Simulation Settings
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Set number of individuals that make up the 95% prediction intervals
n <- 1
# 95% prediction interval functions - calculate the 2.5th and 97.5th percentiles
CI95lo <- function(x) quantile(x, probs = 0.025)
CI95hi <- function(x) quantile(x, probs = 0.975)
# 90% prediction interval functions - calculate the 5th and 95th percentiles
CI90lo <- function(x) quantile(x, probs = 0.05)
CI90hi <- function(x) quantile(x, probs = 0.95)
# Set seed for reproducible numbers
set.seed(123456)
TIME <- seq(from = 0, to = 500, by = 0.2)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Simulation
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Source the model
source(paste(master.dir, "models", "mouse_brown.R", sep = "/"))
# Simulate concentration-time profiles for the population
ID <- 1:n
ID2 <- sort(c(rep(ID, times = length(TIME))))
times <- rep(TIME, times = length(ID))
input.simdata <- data.frame(
ID = ID2,
time = times,
amt = 0,
evid = 0,
rate = 0,
cmt = 1
)
dose.times <- 0
dosedata <- input.simdata[input.simdata$time %in% dose.times, ]
dosedata$amt <- 1
dosedata$evid <- 1
dosedata$rate <- 60
dosedata$cmt <- 1
input.simdata <- rbind(input.simdata, dosedata)
input.simdata <- input.simdata[with(input.simdata, order(ID, time)), ]
simdata <- as.data.frame(mrgsim(
data_set(brown.mod, input.simdata)
)) # mrgsim
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Plot Observed Data
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
obsdata <- subdata[c("ID", "TIME", "TADNOM", "DOSEMGKG", "DOSEMG", "WT", "PLA", "BRA",
"LVR", "MSC", "HRT", "SPL", "LUN", "KID")]
names(obsdata)[names(obsdata) == "PLA"] <- "PA"
names(obsdata)[names(obsdata) == "SPL"] <- "SPR"
names(obsdata)[names(obsdata) == "LUN"] <- "ART"
obsdata.plot <- melt(obsdata,
c("ID", "TIME", "TADNOM", "DOSEMGKG", "DOSEMG", "WT"),
variable.name = "COMP", value.name = "C"
)
obsdata.plot$CNORM <-
# dose normalised concentrations
p0 <- NULL
p0 <- ggplot(data = obsdata.plot)
p0 <- p0 + geom_point(aes(x = TIME, y = C), colour = "blue")
p0 <- p0 + facet_wrap(~ COMP, ncol = 4)
p0 <- p0 + scale_y_log10("Concentrations (ug/g)\n", labels = scales::comma)
p0 <- p0 + scale_x_continuous("\nTime (mins)")
p0
avedata <- subdata.av[c("TIME", "TADNOM", "DOSEMGKG", "DOSEMG", "WT", "PLA", "BRA",
"LVR", "MSC", "HRT", "SPL", "LUN", "KID")]
names(avedata)[names(avedata) == "PLA"] <- "PA"
names(avedata)[names(avedata) == "SPL"] <- "SPR"
names(avedata)[names(avedata) == "LUN"] <- "ART"
avedata$SPS <- avedata$SPR
avedata.plot <- melt(avedata,
c("TIME", "TADNOM", "DOSEMGKG", "DOSEMG", "WT"),
variable.name = "COMP"
)
avedata.plot$C <- avedata.plot$value/(avedata.plot$DOSEMG*10^3)
p1 <- NULL
p1 <- ggplot(data = avedata.plot)
p1 <- p1 + geom_point(aes(x = TIME, y = C), colour = "blue")
p1 <- p1 + facet_wrap(~ COMP, ncol = 4)
p1 <- p1 + scale_y_log10("Concentrations (ug/g)\n", labels = scales::comma)
p1 <- p1 + scale_x_continuous("\nTime (mins)")
p1
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Plot Simulated Data
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Determine number of model compartments
init.str <- names(brown.mod@init)
init.n <- length(init.str)
simdata.plot <- cbind(
melt(simdata[1:(2+init.n)], c("ID", "time"), variable.name = "tissue"),
melt(simdata[c(1:2, (init.n+3):(2+init.n*2))], c("ID", "time"))[-(1:3)]
)
names(simdata.plot) <- c("ID", "TIME", "COMP", "A", "C")
simdata.plot$COMP <- as.factor(simdata.plot$COMP)
levels(simdata.plot$COMP) <- c(
toupper(substr(init.str, 2, nchar(init.str)))
)
simdata.plot <- simdata.plot[simdata.plot$TIME != 0, ]
p2 <- NULL
p2 <- ggplot(data = simdata.plot[!simdata.plot$COMP %in% c("PO", "GUT", "TUBF", "TUBC", "BOD"),])
p2 <- p2 + geom_line(aes(x = TIME, y = C), colour = "blue")
p2 <- p2 + facet_wrap(~ COMP, ncol = 4)
p2 <- p2 + scale_y_log10("Concentrations (mg/L)\n", labels = scales::comma) #scale_y_log10("Concentrations (mg/mL)\n")
p2 <- p2 + scale_x_continuous("\nTime (mins)")
p2
p3 <- NULL
p3 <- ggplot(data = simdata.plot[!simdata.plot$COMP %in% c("PO", "GUT", "TUBF", "TUBC", "BOD"),])
p3 <- p3 + geom_line(aes(x = TIME, y = C), colour = "blue")
p3 <- p3 + geom_point(aes(x = TIME, y = C), data = avedata.plot, colour = "red", alpha = 0.2)
p3 <- p3 + facet_wrap(~ COMP, ncol = 4)
p3 <- p3 + scale_y_log10("Concentrations (mg/L)\n", labels = scales::comma) #scale_y_log10("Concentrations (mg/mL)\n")
p3 <- p3 + scale_x_continuous("\nTime (mins)", lim = c(0, 100))
p3
|
knitr::opts_chunk$set (prompt=TRUE, tidy=FALSE, comment="")
options (markdown.HTML.options =
setdiff(markdown::markdownHTMLOptions(default=TRUE),"highlight_code"))
|
/CSC121/assignment_4/options.r
|
no_license
|
chenjuntu/Projects
|
R
| false
| false
| 175
|
r
|
knitr::opts_chunk$set (prompt=TRUE, tidy=FALSE, comment="")
options (markdown.HTML.options =
setdiff(markdown::markdownHTMLOptions(default=TRUE),"highlight_code"))
|
#' fset.transform
#'
#' @description
#' @param
#' @return
#' @export
#'
#' @examples
fset.transform <- function(compensated.set, elgcl) {
## transform fcs data using ^ estimated logicile
fsApply(compensated.set, function(frame) {
print(paste0("transforming ", frame@description$`$FIL`))
frame_trans <- transform(frame, elgcl)
frame_trans
})
}
|
/R/fset.transform.R
|
no_license
|
sportiellomike/spookyflow
|
R
| false
| false
| 362
|
r
|
#' fset.transform
#'
#' @description
#' @param
#' @return
#' @export
#'
#' @examples
fset.transform <- function(compensated.set, elgcl) {
## transform fcs data using ^ estimated logicile
fsApply(compensated.set, function(frame) {
print(paste0("transforming ", frame@description$`$FIL`))
frame_trans <- transform(frame, elgcl)
frame_trans
})
}
|
# remotes::install_github("nmfs-fish-tools/RMAS")
# library(RMAS)
rmas_dir <- "C:/Users/bai.li/Documents/Github/RMAS-master/src/"
devtools::load_all(rmas_dir)
setwd("C:/Users/bai.li/Documents/Github/githubactiontest/")
devtools::load_all()
## Need to install packages below:
## ASAPplots, r4ss, readxl, RMAS
maindir <- "C:/Users/bai.li/Documents/Github/githubactiontest/example"
om_sim_num <- 160 # total number of iterations per case
keep_sim_num <- 100 # number of kept iterations per case
figure_number <- 10 # number of individual iteration to plot
seed_num <- 9924
year <- 1:30
logf_sd <- 0.2
f_dev_change <- FALSE
f_pattern <- 1
f_min <- 0.01
f_max <- 0.39
logR_sd <- 0.2
r_dev_change <- TRUE
em_bias_cor <- FALSE
#### Life-history parameters ####
ages=1:12 #Age structure of the popn
R0=1000000 #Average annual unfished recruitment (scales the popn)
h=0.75 #Steepness of the Beverton-Holt spawner-recruit relationship.
M=0.2 #Age-invariant natural mortality
Linf=800 #Asymptotic average length
K=0.18 #Growth coefficient
a0=-1.36 #Theoretical age at size 0
a.lw=0.000000025 #Length-weight coefficient
b.lw=3.0 #Length-weight exponent
A50.mat=2.25 #Age at 50% maturity
slope.mat=3 #Slope of maturity ogive
pattern.mat=1 #Simple logistic maturity
female.proportion=0.5 #Sex ratio
#### Fleet settings ####
fleet_num <- 1
#CV of landings for OM
cv.L <- list()
cv.L$fleet1 <- 0.005
#Input CV of landings for EMs
input.cv.L <- list()
input.cv.L$fleet1 <- 0.01
#Annual sample size (nfish) of age comp samples
n.L <- list()
n.L$fleet1 <- 200
#Define fleet selectivity
sel_fleet <- list()
sel_fleet$fleet1$pattern <- 1
sel_fleet$fleet1$A50.sel <- 2
sel_fleet$fleet1$slope.sel <- 1
#### Survey settings ####
survey_num <- 1
#CV of surveys for OM
cv.survey <- list()
cv.survey$survey1 <- 0.1
#cv.survey$survey2 <- 0.1
#Input CV of surveys for EMs
input.cv.survey <- list()
input.cv.survey$survey1 <- 0.2
#input.cv.survey$survey2 <- 0.2
#Annual sample size (nfish) of age comp samples
n.survey <- list()
n.survey$survey1 <- 200
#n.survey$survey2 <- 200
#Define survey selectivity
sel_survey <- list()
sel_survey$survey1$pattern <- 1
sel_survey$survey1$A50.sel <- 1.5
sel_survey$survey1$slope.sel <- 2
#sel_survey$survey2$pattern <- 1
#sel_survey$survey2$A50.sel <- 1.5
#sel_survey$survey2$slope.sel <- 2
#### Base Case ####
#### Run OM
run_om(maindir=maindir)
#### Run EMs
# run_em(run_em_names=c("AMAK", "ASAP"))
# run_em(run_em_names=c("BAM"))
# run_em(run_em_names=c("SS"))
run_em(run_em_names=c("MAS"))
#### Plot comparison outputs
# generate_plot(em_names = c("MAS"), plot_ncol=1, plot_nrow=1, plot_color = c("orange"))
# generate_plot(em_names = c("AMAK", "ASAP", "BAM", "SS"), plot_ncol=2, plot_nrow=2, plot_color = c("orange", "green", "red", "deepskyblue3"))
#
# generate_plot(em_names = c("AMAK", "ASAP", "BAM"), plot_ncol=3, plot_nrow=1, plot_color = c("orange", "green", "red"))
#### Case 1 ####
# logR_sd <- 0.4
# run_om(maindir=maindir)
# run_em(run_em_names=c("MAS"))
# generate_plot(em_names = c("MAS"), plot_ncol=1, plot_nrow=1, plot_color = c("orange"))
|
/example/example.R
|
no_license
|
msupernaw/githubactiontest
|
R
| false
| false
| 3,156
|
r
|
# remotes::install_github("nmfs-fish-tools/RMAS")
# library(RMAS)
rmas_dir <- "C:/Users/bai.li/Documents/Github/RMAS-master/src/"
devtools::load_all(rmas_dir)
setwd("C:/Users/bai.li/Documents/Github/githubactiontest/")
devtools::load_all()
## Need to install packages below:
## ASAPplots, r4ss, readxl, RMAS
maindir <- "C:/Users/bai.li/Documents/Github/githubactiontest/example"
om_sim_num <- 160 # total number of iterations per case
keep_sim_num <- 100 # number of kept iterations per case
figure_number <- 10 # number of individual iteration to plot
seed_num <- 9924
year <- 1:30
logf_sd <- 0.2
f_dev_change <- FALSE
f_pattern <- 1
f_min <- 0.01
f_max <- 0.39
logR_sd <- 0.2
r_dev_change <- TRUE
em_bias_cor <- FALSE
#### Life-history parameters ####
ages=1:12 #Age structure of the popn
R0=1000000 #Average annual unfished recruitment (scales the popn)
h=0.75 #Steepness of the Beverton-Holt spawner-recruit relationship.
M=0.2 #Age-invariant natural mortality
Linf=800 #Asymptotic average length
K=0.18 #Growth coefficient
a0=-1.36 #Theoretical age at size 0
a.lw=0.000000025 #Length-weight coefficient
b.lw=3.0 #Length-weight exponent
A50.mat=2.25 #Age at 50% maturity
slope.mat=3 #Slope of maturity ogive
pattern.mat=1 #Simple logistic maturity
female.proportion=0.5 #Sex ratio
#### Fleet settings ####
fleet_num <- 1
#CV of landings for OM
cv.L <- list()
cv.L$fleet1 <- 0.005
#Input CV of landings for EMs
input.cv.L <- list()
input.cv.L$fleet1 <- 0.01
#Annual sample size (nfish) of age comp samples
n.L <- list()
n.L$fleet1 <- 200
#Define fleet selectivity
sel_fleet <- list()
sel_fleet$fleet1$pattern <- 1
sel_fleet$fleet1$A50.sel <- 2
sel_fleet$fleet1$slope.sel <- 1
#### Survey settings ####
survey_num <- 1
#CV of surveys for OM
cv.survey <- list()
cv.survey$survey1 <- 0.1
#cv.survey$survey2 <- 0.1
#Input CV of surveys for EMs
input.cv.survey <- list()
input.cv.survey$survey1 <- 0.2
#input.cv.survey$survey2 <- 0.2
#Annual sample size (nfish) of age comp samples
n.survey <- list()
n.survey$survey1 <- 200
#n.survey$survey2 <- 200
#Define survey selectivity
sel_survey <- list()
sel_survey$survey1$pattern <- 1
sel_survey$survey1$A50.sel <- 1.5
sel_survey$survey1$slope.sel <- 2
#sel_survey$survey2$pattern <- 1
#sel_survey$survey2$A50.sel <- 1.5
#sel_survey$survey2$slope.sel <- 2
#### Base Case ####
#### Run OM
run_om(maindir=maindir)
#### Run EMs
# run_em(run_em_names=c("AMAK", "ASAP"))
# run_em(run_em_names=c("BAM"))
# run_em(run_em_names=c("SS"))
run_em(run_em_names=c("MAS"))
#### Plot comparison outputs
# generate_plot(em_names = c("MAS"), plot_ncol=1, plot_nrow=1, plot_color = c("orange"))
# generate_plot(em_names = c("AMAK", "ASAP", "BAM", "SS"), plot_ncol=2, plot_nrow=2, plot_color = c("orange", "green", "red", "deepskyblue3"))
#
# generate_plot(em_names = c("AMAK", "ASAP", "BAM"), plot_ncol=3, plot_nrow=1, plot_color = c("orange", "green", "red"))
#### Case 1 ####
# logR_sd <- 0.4
# run_om(maindir=maindir)
# run_em(run_em_names=c("MAS"))
# generate_plot(em_names = c("MAS"), plot_ncol=1, plot_nrow=1, plot_color = c("orange"))
|
context("rgee: Operators test")
skip_if_no_pypkg()
# -------------------------------------------------------------------------
test_that("Arithmetic Operator", {
img <- ee$Image(1)
# sum
expect_equal((img + img), (img$add(img)))
# subtract
expect_equal((img - img), (img$subtract(img)))
# Negative (-) 1 -> -1
expect_equal(ee_extract(-img, ee$Geometry$Point(0, 0))$constant, -1)
# multiply
expect_equal(ee_extract(2 * img, ee$Geometry$Point(0, 0))$constant, 2)
# pow
expect_equal(ee_extract(2 ** img, ee$Geometry$Point(0, 0))$constant, 2)
# Module %%
expect_equal(ee_extract(img %% 3, ee$Geometry$Point(0, 0))$constant, 1)
# Integer division %/%
expect_equal(ee_extract(img %/% 2, ee$Geometry$Point(0, 0))$constant, 0)
# Division /
expect_equal(ee_extract(img / 2, ee$Geometry$Point(0, 0))$constant, 0.5)
})
test_that("Logic Operator", {
img <- ee$Image(0)
# Not !
expect_equal(ee_extract(!img, ee$Geometry$Point(0, 0))$constant, 1)
# And &
expect_equal(ee_extract(img & TRUE, ee$Geometry$Point(0, 0))$constant, 0)
# Or |
expect_equal(ee_extract(1 | img, ee$Geometry$Point(0, 0))$constant, 1)
# eq ==
expect_equal(ee_extract(1 == img, ee$Geometry$Point(0, 0))$constant, 0)
# neq !=
expect_equal(ee_extract(1 != img, ee$Geometry$Point(0, 0))$constant, 1)
# lt <
expect_equal(ee_extract(10 < img, ee$Geometry$Point(0, 0))$constant, 0)
# lte <=
expect_equal(ee_extract(0 <= img, ee$Geometry$Point(0, 0))$constant, 1)
# gt >
expect_equal(ee_extract(10 > img, ee$Geometry$Point(0, 0))$constant, 1)
# gte >=
expect_equal(ee_extract(img >= 0 , ee$Geometry$Point(0, 0))$constant, 1)
})
test_that("Mathematical functions", {
ee_geom <- ee$Geometry$Point(0, 0)
# abs
expect_equal(ee_extract(abs(ee$Image(-10)), ee_geom)$constant, 10)
# sign
expect_equal(ee_extract(sign(ee$Image(-10)), ee_geom)$constant, -1)
# sqrt
expect_equal(ee_extract(sqrt(ee$Image(10)), ee_geom)$constant, sqrt(10))
# ceiling
expect_equal(ee_extract(ceiling(ee$Image(10.4)), ee_geom)$constant, ceiling(10.4))
# cumsum
ee_series <- ee_extract(
x = cumsum(ee$ImageCollection(lapply(1:10, function(x) ee$Image(x)))$toBands()),
y = ee_geom
) %>% as.numeric()
expect_equal(ee_series, cumsum(1:10))
# cumprod
ee_series <- ee_extract(
x = cumprod(ee$ImageCollection(lapply(1:10, function(x) ee$Image(x)))$toBands()),
y = ee_geom
) %>% as.numeric()
expect_equal(ee_series, cumprod(1:10))
# log
expect_equal(ee_extract(log(ee$Image(10)), ee_geom)$constant, log(10))
expect_equal(
object = ee_extract(log(ee$Image(10), base = 5), ee_geom)$constant,
expected = log(10, base = 5),
tolerance = 1e-07
)
# log10
expect_equal(ee_extract(log10(ee$Image(10)), ee_geom)$constant, log10(10))
# log1p
expect_equal(ee_extract(log1p(ee$Image(10)), ee_geom)$constant, log1p(10))
# log2
expect_equal(ee_extract(log2(ee$Image(10)), ee_geom)$constant, log2(10))
# acos
expect_equal(ee_extract(acos(ee$Image(0.1)), ee_geom)$constant, acos(0.1))
# floor
expect_equal(ee_extract(floor(ee$Image(0.1)), ee_geom)$constant, floor(0.1))
# asin
expect_equal(
object = ee_extract(asin(ee$Image(0.1)), ee_geom)$constant,
expected = asin(0.1),
tolerance = 1e-07
)
# atan
expect_equal(
object = ee_extract(atan(ee$Image(0.1)), ee_geom)$constant,
expected = atan(0.1),
tolerance = 1e-07
)
# exp
expect_equal(
object = ee_extract(exp(ee$Image(0.1)), ee_geom)$constant,
expected = exp(0.1),
tolerance = 1e-07
)
# expm1
expect_equal(
object = ee_extract(expm1(ee$Image(0.1)), ee_geom)$constant,
expected = expm1(0.1),
tolerance = 1e-07
)
# cos
expect_equal(
object = ee_extract(cos(ee$Image(0.1)), ee_geom)$constant,
expected = cos(0.1),
tolerance = 1e-07
)
# cosh
expect_equal(
object = ee_extract(cosh(ee$Image(0.1)), ee_geom)$constant,
expected = cosh(0.1),
tolerance = 1e-07
)
# sin
expect_equal(
object = ee_extract(sin(ee$Image(0.1)), ee_geom)$constant,
expected = sin(0.1),
tolerance = 1e-07
)
# sinh
expect_equal(
object = ee_extract(sinh(ee$Image(0.1)), ee_geom)$constant,
expected = sinh(0.1),
tolerance = 1e-07
)
# tan
expect_equal(
object = ee_extract(tan(ee$Image(0.1)), ee_geom)$constant,
expected = tan(0.1),
tolerance = 1e-07
)
# tanh
expect_equal(
object = ee_extract(tanh(ee$Image(0.1)), ee_geom)$constant,
expected = tanh(0.1),
tolerance = 1e-07
)
})
test_that("Summary functions", {
ee_geom <- ee$Geometry$Point(0, 0)
# mean Image
mean_img <- mean(ee$Image(0), ee$Image(1), ee$Image(2), ee$Image(3))
expect_equal(ee_extract(mean_img, ee_geom)$mean, 1.5)
# max Image
max_img <- max(ee$Image(0), ee$Image(1), ee$Image(2), ee$Image(3))
expect_equal(ee_extract(max_img, ee_geom)$max, 3)
# min Image
min_img <- min(ee$Image(0), ee$Image(1), ee$Image(2), ee$Image(3))
expect_equal(ee_extract(min_img, ee_geom)$min, 0)
# range Image
range_img <- range(ee$Image(0), ee$Image(1), ee$Image(2), ee$Image(3))
expect_equal(mean(as.numeric(ee_extract(range_img, ee_geom))), 1.5)
# sum Image
sum_img <- sum(ee$Image(0), ee$Image(1), ee$Image(2), ee$Image(3))
expect_equal(ee_extract(sum_img, ee_geom)$sum, 6)
prod_img <- prod(ee$Image(0), ee$Image(1), ee$Image(2), ee$Image(3))
expect_equal(ee_extract(prod_img, ee_geom)$product, 0)
}
)
|
/tests/testthat/test-operators.R
|
permissive
|
kateburrows/rgee
|
R
| false
| false
| 5,533
|
r
|
context("rgee: Operators test")
skip_if_no_pypkg()
# -------------------------------------------------------------------------
test_that("Arithmetic Operator", {
img <- ee$Image(1)
# sum
expect_equal((img + img), (img$add(img)))
# subtract
expect_equal((img - img), (img$subtract(img)))
# Negative (-) 1 -> -1
expect_equal(ee_extract(-img, ee$Geometry$Point(0, 0))$constant, -1)
# multiply
expect_equal(ee_extract(2 * img, ee$Geometry$Point(0, 0))$constant, 2)
# pow
expect_equal(ee_extract(2 ** img, ee$Geometry$Point(0, 0))$constant, 2)
# Module %%
expect_equal(ee_extract(img %% 3, ee$Geometry$Point(0, 0))$constant, 1)
# Integer division %/%
expect_equal(ee_extract(img %/% 2, ee$Geometry$Point(0, 0))$constant, 0)
# Division /
expect_equal(ee_extract(img / 2, ee$Geometry$Point(0, 0))$constant, 0.5)
})
test_that("Logic Operator", {
img <- ee$Image(0)
# Not !
expect_equal(ee_extract(!img, ee$Geometry$Point(0, 0))$constant, 1)
# And &
expect_equal(ee_extract(img & TRUE, ee$Geometry$Point(0, 0))$constant, 0)
# Or |
expect_equal(ee_extract(1 | img, ee$Geometry$Point(0, 0))$constant, 1)
# eq ==
expect_equal(ee_extract(1 == img, ee$Geometry$Point(0, 0))$constant, 0)
# neq !=
expect_equal(ee_extract(1 != img, ee$Geometry$Point(0, 0))$constant, 1)
# lt <
expect_equal(ee_extract(10 < img, ee$Geometry$Point(0, 0))$constant, 0)
# lte <=
expect_equal(ee_extract(0 <= img, ee$Geometry$Point(0, 0))$constant, 1)
# gt >
expect_equal(ee_extract(10 > img, ee$Geometry$Point(0, 0))$constant, 1)
# gte >=
expect_equal(ee_extract(img >= 0 , ee$Geometry$Point(0, 0))$constant, 1)
})
test_that("Mathematical functions", {
ee_geom <- ee$Geometry$Point(0, 0)
# abs
expect_equal(ee_extract(abs(ee$Image(-10)), ee_geom)$constant, 10)
# sign
expect_equal(ee_extract(sign(ee$Image(-10)), ee_geom)$constant, -1)
# sqrt
expect_equal(ee_extract(sqrt(ee$Image(10)), ee_geom)$constant, sqrt(10))
# ceiling
expect_equal(ee_extract(ceiling(ee$Image(10.4)), ee_geom)$constant, ceiling(10.4))
# cumsum
ee_series <- ee_extract(
x = cumsum(ee$ImageCollection(lapply(1:10, function(x) ee$Image(x)))$toBands()),
y = ee_geom
) %>% as.numeric()
expect_equal(ee_series, cumsum(1:10))
# cumprod
ee_series <- ee_extract(
x = cumprod(ee$ImageCollection(lapply(1:10, function(x) ee$Image(x)))$toBands()),
y = ee_geom
) %>% as.numeric()
expect_equal(ee_series, cumprod(1:10))
# log
expect_equal(ee_extract(log(ee$Image(10)), ee_geom)$constant, log(10))
expect_equal(
object = ee_extract(log(ee$Image(10), base = 5), ee_geom)$constant,
expected = log(10, base = 5),
tolerance = 1e-07
)
# log10
expect_equal(ee_extract(log10(ee$Image(10)), ee_geom)$constant, log10(10))
# log1p
expect_equal(ee_extract(log1p(ee$Image(10)), ee_geom)$constant, log1p(10))
# log2
expect_equal(ee_extract(log2(ee$Image(10)), ee_geom)$constant, log2(10))
# acos
expect_equal(ee_extract(acos(ee$Image(0.1)), ee_geom)$constant, acos(0.1))
# floor
expect_equal(ee_extract(floor(ee$Image(0.1)), ee_geom)$constant, floor(0.1))
# asin
expect_equal(
object = ee_extract(asin(ee$Image(0.1)), ee_geom)$constant,
expected = asin(0.1),
tolerance = 1e-07
)
# atan
expect_equal(
object = ee_extract(atan(ee$Image(0.1)), ee_geom)$constant,
expected = atan(0.1),
tolerance = 1e-07
)
# exp
expect_equal(
object = ee_extract(exp(ee$Image(0.1)), ee_geom)$constant,
expected = exp(0.1),
tolerance = 1e-07
)
# expm1
expect_equal(
object = ee_extract(expm1(ee$Image(0.1)), ee_geom)$constant,
expected = expm1(0.1),
tolerance = 1e-07
)
# cos
expect_equal(
object = ee_extract(cos(ee$Image(0.1)), ee_geom)$constant,
expected = cos(0.1),
tolerance = 1e-07
)
# cosh
expect_equal(
object = ee_extract(cosh(ee$Image(0.1)), ee_geom)$constant,
expected = cosh(0.1),
tolerance = 1e-07
)
# sin
expect_equal(
object = ee_extract(sin(ee$Image(0.1)), ee_geom)$constant,
expected = sin(0.1),
tolerance = 1e-07
)
# sinh
expect_equal(
object = ee_extract(sinh(ee$Image(0.1)), ee_geom)$constant,
expected = sinh(0.1),
tolerance = 1e-07
)
# tan
expect_equal(
object = ee_extract(tan(ee$Image(0.1)), ee_geom)$constant,
expected = tan(0.1),
tolerance = 1e-07
)
# tanh
expect_equal(
object = ee_extract(tanh(ee$Image(0.1)), ee_geom)$constant,
expected = tanh(0.1),
tolerance = 1e-07
)
})
test_that("Summary functions", {
ee_geom <- ee$Geometry$Point(0, 0)
# mean Image
mean_img <- mean(ee$Image(0), ee$Image(1), ee$Image(2), ee$Image(3))
expect_equal(ee_extract(mean_img, ee_geom)$mean, 1.5)
# max Image
max_img <- max(ee$Image(0), ee$Image(1), ee$Image(2), ee$Image(3))
expect_equal(ee_extract(max_img, ee_geom)$max, 3)
# min Image
min_img <- min(ee$Image(0), ee$Image(1), ee$Image(2), ee$Image(3))
expect_equal(ee_extract(min_img, ee_geom)$min, 0)
# range Image
range_img <- range(ee$Image(0), ee$Image(1), ee$Image(2), ee$Image(3))
expect_equal(mean(as.numeric(ee_extract(range_img, ee_geom))), 1.5)
# sum Image
sum_img <- sum(ee$Image(0), ee$Image(1), ee$Image(2), ee$Image(3))
expect_equal(ee_extract(sum_img, ee_geom)$sum, 6)
prod_img <- prod(ee$Image(0), ee$Image(1), ee$Image(2), ee$Image(3))
expect_equal(ee_extract(prod_img, ee_geom)$product, 0)
}
)
|
library(shiny)
library(shinyAce)
library(mailR)
library(shinyjs)
library(rdrop2)
shinyServer(
function(input, output, session) {
# HANDLERS TO DOWNLOAD TEMPLATES WHEN THE LINK IS CLICKED
output$download_cov <- downloadHandler(
filename = "effects.csv" ,
content = function(file) {
write.csv(
read.csv("./files/betas.csv"),
file, na = "",
row.names = FALSE,
col.names = FALSE)})
output$download_param <- downloadHandler(
filename = "parameters.csv" ,
content = function(file) {
write.csv(
read.csv("./files/parameters.csv"),
file, na = "",
row.names = FALSE,
col.names = FALSE)})
output$download_catparam <- downloadHandler(
filename = "categorical_parameters.csv" ,
content = function(file) {
write.csv(
read.csv("./files/categorical_parameters.csv"),
file, na = "",
row.names = FALSE,
col.names = FALSE)})
output$download_pcor <- downloadHandler(
filename = "pcor.csv" ,
content = function(file) {
write.csv(
read.csv("./files/pcor.csv"),
file, na = "",
row.names = FALSE,
col.names = FALSE)})
output$download_wcor <- downloadHandler(
filename = "wcor.csv" ,
content = function(file) {
write.csv(
read.csv("./files/wcor.csv"),
file, na = "",
row.names = FALSE,
col.names = FALSE)})
######################################################
#####################################################
########## CONNECT TO DROPBOX TO STORE PERSISTENT FILES ########
# To set up this for the first time do the following steps:
# 1- library(rdrop2)
# 2- token <- drop_auth() , this will open an authentication window on the browser
# 3 - saveRDS(token, "droptoken.rds"), an authentication file will be created that can be store locally in the App-1 folder and
# be used every time that the application needs to authenticate Dropbox.
# read it back with readRDS
token <- readRDS("droptoken.rds")
# Then pass the token to each drop_ function
drop_acc(dtoken = token)
###### authentication completed now let's create a function to those files #####
saveData <- function(fileName, outputDir) {
drop_upload(fileName, dest = outputDir)
}
check_input <- function(){
validate(
need(input$file6, 'Effects file (Box 3) is missing'),
need(input$file7, 'Covariates file (Box 2) is missing'),
#need(input$file1, 'Cat parameters file (Box 2) is missing'),
need(input$file3, 'PCOR file (Box 2) is missing'),
need(input$file2, 'WCOR file (Box 2) is missing'),
need(input$text, 'An email address must be provided'),
need(input$sig, 'Significance (Box 1) must be specified'),
need(input$numsu, 'Number of subjects (Box 1) must be specified'),
need(input$numsi, 'Number of simulations (Box 1) must be specified'),
need(input$drugs, 'Exposures to be tested (Box 1) must be specified'),
need(input$numob, 'Number of observations (Box 2) must be specified')
)
}
output$error <- renderText({ NULL })
# GENERATE SBATCH FILES #############################
source("runSimulation.R")
# DO STUFF after "run simulation" button has been clicked
observeEvent(input$run, {
# VALIDATE REQUIRED FIELDS
output$error <- renderPrint({
check_input()
})
if (!is.null(check_input()))
{
return(NULL)
}
# Name files with current Pacific time
Sys.setenv(TZ="America/Los_Angeles")
outputDir <- format(Sys.time(), "%m_%d_%Y__%H_%M_%S")
print(outputDir)
##### SAVE FILES SUBMITTED BY THE USER ################
withProgress(message = 'Saving simulation', value = 1, {
# Read betas
betas <- input$file6
write.csv(read.csv(betas$datapath),"betas.csv", na = "",
row.names = FALSE,
col.names = FALSE)
saveData("betas.csv",paste('Sim Data/',outputDir, sep = ""))
# Read normal covariates
cov <- input$file7
# re-organize data
param <- read.csv(cov$datapath)
param <- rbind(param[param$type=="static.binary",], param[param$type!="static.binary",])
# Add needed rows and columns
param$name <- as.character(param$name)
param$type <- as.character(param$type)
param <- rbind(c("id", "id", NA, NA, NA, NA, NA),param) # adding id row
# adding XX.var columns
param$across.SD <- as.numeric(param$across.SD)
param$across.var <- param$across.SD**2
param$within.sd <- as.numeric(param$within.sd)
param$within.var <- param$within.sd**2
# add categorical variables if any
cat_cov <- input$file1
if (!is.null(cat_cov))
{
cat_param <- read.csv(cat_cov$datapath)
levels <- as.factor(cat_param$level)
for(i in levels(levels)) #add cat names
{
param <- rbind(param, c(i, "cat.static", NA, NA, NA, NA, NA, NA, NA))
}
write.csv(read.csv(cat_cov$datapath),"categorical_parameters.csv", na = "",
row.names = FALSE,
col.names = FALSE)
saveData("categorical_parameters.csv",paste('Sim Data/',outputDir, sep = ""))
}
write.csv(param,"parameters.csv", na = "",
row.names = FALSE,
col.names = FALSE)
saveData("parameters.csv",paste('Sim Data/',outputDir, sep = ""))
# Read pcor
pcor <- input$file3
write.csv(read.csv(pcor$datapath),"pcor.csv", na = "",
row.names = FALSE,
col.names = FALSE)
saveData("pcor.csv",paste('Sim Data/',outputDir, sep = ""))
# Read wcor
wcor <- input$file2
write.csv(read.csv(wcor$datapath),"wcor.csv", na = "",
row.names = FALSE,
col.names = FALSE)
saveData("wcor.csv",paste('Sim Data/',outputDir, sep = ""))
drop_create(paste('Sim Data/', outputDir,"/sbatch_files" ,sep = ""))
drop_create(paste('Sim Data/', outputDir,"/datasets" ,sep = ""))
# Read surv
surv <- input$file5
if (!is.null(surv)) {
# User uploaded a file yet
write.csv(read.csv(surv$datapath),"survival.csv", na = "",
row.names = FALSE,
col.names = FALSE)
saveData("survival.csv",paste('Sim Data/',outputDir, sep = ""))
surv <- paste("./survival.csv", sep = "")
}
# Read surv
cens <- input$file4
if (!is.null(cens)) {
# User uploaded a file yet
write.csv(read.csv(cens$datapath),"censoring.csv", na = "",
row.names = FALSE,
col.names = FALSE)
saveData("censoring.csv",paste('Sim Data/',outputDir, sep = ""))
cens <- paste("./censoring.csv", sep = "")
}
# WRITE FILE WITH SIMULATION USER INPUT
drugs <- gsub(",", "-", input$drugs) # replace commas by hyphen
drugs <- gsub(" ", "", drugs) # remove space
df<-NULL;
df <- rbind(df,c(input$sig,input$numsu,input$numob,input$text,input$numsi,drugs))
colnames(df)<- c('sign','subj','obs', 'email', 'no.sims', 'exp.test')
write.csv(df, "input_params.csv", row.names=FALSE)
saveData("input_params.csv",paste('Sim Data/',outputDir, sep = ""))
path = "./"
runfile_path = paste(path, "runCovs.sbatch", sep="")
if (!is.na(runfile_path)) {
outfile_lines <- paste(genSbatchCov(input$numsi,input$numsu,input$numob, paste("./betas.csv", sep = ""), surv, cens, outputDir))
cat(paste0(outfile_lines, collapse = "\n"), file = runfile_path)
}
saveData("runCovs.sbatch",paste('Sim Data/',outputDir, sep = ""))
path = "./"
runfile_path = paste(path, "open_this_file_first.R", sep="")
if (!is.na(runfile_path)) {
outfile_lines <- paste(genRfile(outputDir))
cat(paste0(outfile_lines, collapse = "\n"), file = runfile_path)
}
saveData("open_this_file_first.R",paste('Sim Data/',outputDir, sep = ""))
})
sender <- "qsu.cer.pcori@gmail.com"
recipients <- input$text
send.mail(from = sender,
to = recipients,
subject="CER simulation submitted",
body = "We received your simulation parameters. Your simulation should start
running shortly and you should receive a file with results as soon as it completes",
smtp = list(host.name = "smtp.gmail.com", port = 465,
user.name="qsu.cer.pcori@gmail.com", passwd="@@pcfaicerS", ssl=TRUE),
authenticate = TRUE,
send = TRUE)
sender <- "qsu.cer.pcori@gmail.com"
recipients <- "qsu.cer.pcori@gmail.com"
send.mail(from = sender,
to = recipients,
subject="New simulation",
body = "A simulation was just submitted. Please check Dropbox",
smtp = list(host.name = "smtp.gmail.com", port = 465,
user.name="qsu.cer.pcori@gmail.com", passwd="@@pcfaicerS", ssl=TRUE),
authenticate = TRUE,
send = TRUE)
text("main", "<div style=\"height:900px;\"><img src = \"logo_all.png\" /><div id=\"header\">
<h2>\"Power Calculator for Associations in Comparative Effectiveness Research Studies\"</h2>
</div><div class=\"centered\"><table class=\"thanks_table\"><tr><td><b>Thank you!</b></td></tr><tr><td>You have successfully submitted your simulation. <br>You should receive a confirmation email
shortly!</td></tr></table></div></div>")
})
}
)
|
/App-1/server.R
|
no_license
|
qsuProjects/PowerApp
|
R
| false
| false
| 10,497
|
r
|
library(shiny)
library(shinyAce)
library(mailR)
library(shinyjs)
library(rdrop2)
shinyServer(
function(input, output, session) {
# HANDLERS TO DOWNLOAD TEMPLATES WHEN THE LINK IS CLICKED
output$download_cov <- downloadHandler(
filename = "effects.csv" ,
content = function(file) {
write.csv(
read.csv("./files/betas.csv"),
file, na = "",
row.names = FALSE,
col.names = FALSE)})
output$download_param <- downloadHandler(
filename = "parameters.csv" ,
content = function(file) {
write.csv(
read.csv("./files/parameters.csv"),
file, na = "",
row.names = FALSE,
col.names = FALSE)})
output$download_catparam <- downloadHandler(
filename = "categorical_parameters.csv" ,
content = function(file) {
write.csv(
read.csv("./files/categorical_parameters.csv"),
file, na = "",
row.names = FALSE,
col.names = FALSE)})
output$download_pcor <- downloadHandler(
filename = "pcor.csv" ,
content = function(file) {
write.csv(
read.csv("./files/pcor.csv"),
file, na = "",
row.names = FALSE,
col.names = FALSE)})
output$download_wcor <- downloadHandler(
filename = "wcor.csv" ,
content = function(file) {
write.csv(
read.csv("./files/wcor.csv"),
file, na = "",
row.names = FALSE,
col.names = FALSE)})
######################################################
#####################################################
########## CONNECT TO DROPBOX TO STORE PERSISTENT FILES ########
# To set up this for the first time do the following steps:
# 1- library(rdrop2)
# 2- token <- drop_auth() , this will open an authentication window on the browser
# 3 - saveRDS(token, "droptoken.rds"), an authentication file will be created that can be store locally in the App-1 folder and
# be used every time that the application needs to authenticate Dropbox.
# read it back with readRDS
token <- readRDS("droptoken.rds")
# Then pass the token to each drop_ function
drop_acc(dtoken = token)
###### authentication completed now let's create a function to those files #####
saveData <- function(fileName, outputDir) {
drop_upload(fileName, dest = outputDir)
}
check_input <- function(){
validate(
need(input$file6, 'Effects file (Box 3) is missing'),
need(input$file7, 'Covariates file (Box 2) is missing'),
#need(input$file1, 'Cat parameters file (Box 2) is missing'),
need(input$file3, 'PCOR file (Box 2) is missing'),
need(input$file2, 'WCOR file (Box 2) is missing'),
need(input$text, 'An email address must be provided'),
need(input$sig, 'Significance (Box 1) must be specified'),
need(input$numsu, 'Number of subjects (Box 1) must be specified'),
need(input$numsi, 'Number of simulations (Box 1) must be specified'),
need(input$drugs, 'Exposures to be tested (Box 1) must be specified'),
need(input$numob, 'Number of observations (Box 2) must be specified')
)
}
output$error <- renderText({ NULL })
# GENERATE SBATCH FILES #############################
source("runSimulation.R")
# DO STUFF after "run simulation" button has been clicked
observeEvent(input$run, {
# VALIDATE REQUIRED FIELDS
output$error <- renderPrint({
check_input()
})
if (!is.null(check_input()))
{
return(NULL)
}
# Name files with current Pacific time
Sys.setenv(TZ="America/Los_Angeles")
outputDir <- format(Sys.time(), "%m_%d_%Y__%H_%M_%S")
print(outputDir)
##### SAVE FILES SUBMITTED BY THE USER ################
withProgress(message = 'Saving simulation', value = 1, {
# Read betas
betas <- input$file6
write.csv(read.csv(betas$datapath),"betas.csv", na = "",
row.names = FALSE,
col.names = FALSE)
saveData("betas.csv",paste('Sim Data/',outputDir, sep = ""))
# Read normal covariates
cov <- input$file7
# re-organize data
param <- read.csv(cov$datapath)
param <- rbind(param[param$type=="static.binary",], param[param$type!="static.binary",])
# Add needed rows and columns
param$name <- as.character(param$name)
param$type <- as.character(param$type)
param <- rbind(c("id", "id", NA, NA, NA, NA, NA),param) # adding id row
# adding XX.var columns
param$across.SD <- as.numeric(param$across.SD)
param$across.var <- param$across.SD**2
param$within.sd <- as.numeric(param$within.sd)
param$within.var <- param$within.sd**2
# add categorical variables if any
cat_cov <- input$file1
if (!is.null(cat_cov))
{
cat_param <- read.csv(cat_cov$datapath)
levels <- as.factor(cat_param$level)
for(i in levels(levels)) #add cat names
{
param <- rbind(param, c(i, "cat.static", NA, NA, NA, NA, NA, NA, NA))
}
write.csv(read.csv(cat_cov$datapath),"categorical_parameters.csv", na = "",
row.names = FALSE,
col.names = FALSE)
saveData("categorical_parameters.csv",paste('Sim Data/',outputDir, sep = ""))
}
write.csv(param,"parameters.csv", na = "",
row.names = FALSE,
col.names = FALSE)
saveData("parameters.csv",paste('Sim Data/',outputDir, sep = ""))
# Read pcor
pcor <- input$file3
write.csv(read.csv(pcor$datapath),"pcor.csv", na = "",
row.names = FALSE,
col.names = FALSE)
saveData("pcor.csv",paste('Sim Data/',outputDir, sep = ""))
# Read wcor
wcor <- input$file2
write.csv(read.csv(wcor$datapath),"wcor.csv", na = "",
row.names = FALSE,
col.names = FALSE)
saveData("wcor.csv",paste('Sim Data/',outputDir, sep = ""))
drop_create(paste('Sim Data/', outputDir,"/sbatch_files" ,sep = ""))
drop_create(paste('Sim Data/', outputDir,"/datasets" ,sep = ""))
# Read surv
surv <- input$file5
if (!is.null(surv)) {
# User uploaded a file yet
write.csv(read.csv(surv$datapath),"survival.csv", na = "",
row.names = FALSE,
col.names = FALSE)
saveData("survival.csv",paste('Sim Data/',outputDir, sep = ""))
surv <- paste("./survival.csv", sep = "")
}
# Read surv
cens <- input$file4
if (!is.null(cens)) {
# User uploaded a file yet
write.csv(read.csv(cens$datapath),"censoring.csv", na = "",
row.names = FALSE,
col.names = FALSE)
saveData("censoring.csv",paste('Sim Data/',outputDir, sep = ""))
cens <- paste("./censoring.csv", sep = "")
}
# WRITE FILE WITH SIMULATION USER INPUT
drugs <- gsub(",", "-", input$drugs) # replace commas by hyphen
drugs <- gsub(" ", "", drugs) # remove space
df<-NULL;
df <- rbind(df,c(input$sig,input$numsu,input$numob,input$text,input$numsi,drugs))
colnames(df)<- c('sign','subj','obs', 'email', 'no.sims', 'exp.test')
write.csv(df, "input_params.csv", row.names=FALSE)
saveData("input_params.csv",paste('Sim Data/',outputDir, sep = ""))
path = "./"
runfile_path = paste(path, "runCovs.sbatch", sep="")
if (!is.na(runfile_path)) {
outfile_lines <- paste(genSbatchCov(input$numsi,input$numsu,input$numob, paste("./betas.csv", sep = ""), surv, cens, outputDir))
cat(paste0(outfile_lines, collapse = "\n"), file = runfile_path)
}
saveData("runCovs.sbatch",paste('Sim Data/',outputDir, sep = ""))
path = "./"
runfile_path = paste(path, "open_this_file_first.R", sep="")
if (!is.na(runfile_path)) {
outfile_lines <- paste(genRfile(outputDir))
cat(paste0(outfile_lines, collapse = "\n"), file = runfile_path)
}
saveData("open_this_file_first.R",paste('Sim Data/',outputDir, sep = ""))
})
sender <- "qsu.cer.pcori@gmail.com"
recipients <- input$text
send.mail(from = sender,
to = recipients,
subject="CER simulation submitted",
body = "We received your simulation parameters. Your simulation should start
running shortly and you should receive a file with results as soon as it completes",
smtp = list(host.name = "smtp.gmail.com", port = 465,
user.name="qsu.cer.pcori@gmail.com", passwd="@@pcfaicerS", ssl=TRUE),
authenticate = TRUE,
send = TRUE)
sender <- "qsu.cer.pcori@gmail.com"
recipients <- "qsu.cer.pcori@gmail.com"
send.mail(from = sender,
to = recipients,
subject="New simulation",
body = "A simulation was just submitted. Please check Dropbox",
smtp = list(host.name = "smtp.gmail.com", port = 465,
user.name="qsu.cer.pcori@gmail.com", passwd="@@pcfaicerS", ssl=TRUE),
authenticate = TRUE,
send = TRUE)
text("main", "<div style=\"height:900px;\"><img src = \"logo_all.png\" /><div id=\"header\">
<h2>\"Power Calculator for Associations in Comparative Effectiveness Research Studies\"</h2>
</div><div class=\"centered\"><table class=\"thanks_table\"><tr><td><b>Thank you!</b></td></tr><tr><td>You have successfully submitted your simulation. <br>You should receive a confirmation email
shortly!</td></tr></table></div></div>")
})
}
)
|
library(ggplot2) # Data visualization
library(readr) # CSV file I/O, e.g. the read_csv function
library(SuperLearner)
library(Hmisc)
library(caret)
# Any results you write to the current directory are saved as output.
# ------------------------------------------------------------------
# R version of some Machine Learning Method starter code using H2O.
# Average of multiple H2O DNN models
# Fork from https://www.kaggle.com/kumareshd/allstate-claims-severity/performance-of-different-methods-in-r-using-h2o/discussion
# Example parameters https://github.com/h2oai/h2o-3/blob/0d3e52cdce9f699a8d693b5d3b11c8bd3e15ca02/h2o-r/h2o-package/R/deeplearning.R
# See: http://mlwave.com/kaggle-ensembling-guide/
# Load H2O
library(h2o)
kd_h2o<-h2o.init(nthreads = -1, max_mem_size = "8g")
# Installation of H2O-Ensemble, does not work on Kaggle cloud
# install.packages("https://h2o-release.s3.amazonaws.com/h2o-ensemble/R/h2oEnsemble_0.1.8.tar.gz", repos = NULL)
library(h2oEnsemble)
# Locally start jar and then use this line
# kd_h2o<-h2o.init(ip = "localhost", port = 54323 ,nthreads = -1, max_mem_size = "10g")
#### Loading data ####
#Reading Data, old school read.csv. Using fread is faster.
set.seed(12345)
train<-read.csv('train.csv')
test<-read.csv('test.csv')
train.index <- train[,1]
test.index <- test[,1]
train.loss <- train[,ncol(train)]
#### Pre-processing dataset ####
#Combining train and test data for joint pre-processing
bulk <- rbind(train[,-ncol(train)], test)
bulk$id <- NULL
#Converting categories to numeric
#this is done by first splitting the binary level, multi-level, and
#continuous variables
#colnames(all.train)
bin.bulk <- bulk[,1:72]
cat.bulk <- bulk[,73:116]
cont.bulk <- bulk[,117:130]
#Combine levels
#Combining multiple levels using combine.levels
#minimum frequency = minlev
temp <- sapply(cat.bulk, combine.levels, minlev = 0.001)
temp <- as.data.frame(temp)
str(temp)
#Column bind binary and reduced categorical variables
# comb.train <- cbind(bin.train, cat.train)
comb.bulk <- cbind(bin.bulk, temp)
#Dummify all factor variables
dmy <- dummyVars(" ~ .", data = comb.bulk, fullRank=T)
temp <- as.data.frame(predict(dmy, newdata = comb.bulk))
dim(temp)
#Combine dummified with cont vars
bulk <- cbind(temp, cont.bulk)
dim(bulk)
#Split pre-cprocessed dataset into train and test
train.e = bulk[1:nrow(train),]
test.e = bulk[(nrow(train)+1):nrow(bulk),]
#Re-attach index
train.e <- cbind(train.index, train.e)
test.e <- cbind(test.index, test.e)
#Re-attach loss
train.e$loss <- train.loss
#Pre-processed data for training and validation
train <- train.e
test <- test.e
#### Start of H2O part ####
#Removing index column
train <- train[,-1]
test_label <- test[,1]
test <- test[,-1]
#Getting index of test subset
index <- sample(1:(dim(train)[1]), 0.2*dim(train)[1], replace=FALSE)
#Creating training=train_frame and test=valid_frame subsets
train_frame<-train[-index,]
valid_frame<-train[index,]
#Separating loss variable from test set. valid_predict has NO LOSS variable
valid_predict<-valid_frame[,-ncol(valid_frame)]
valid_loss<-valid_frame[,ncol(valid_frame)]
#Log transform
train_frame[,ncol(train_frame)]<-log(train_frame[,ncol(train_frame)])
valid_frame[,ncol(train_frame)]<-log(valid_frame[,ncol(valid_frame)])
# load H2o data frame // validate that H2O flow looses all continous data
train_frame.hex<-as.h2o(train_frame)
valid_frame.hex<-as.h2o(valid_frame)
valid_predict.hex<-as.h2o(valid_predict)
test.hex<-as.h2o(test)
#---------------
#creating custom learners
h2o.glm.1 <- function(..., alpha = 0.0) h2o.glm.wrapper(..., alpha = alpha)
h2o.glm.2 <- function(..., alpha = 0.5) h2o.glm.wrapper(..., alpha = alpha)
h2o.glm.3 <- function(..., alpha = 1.0) h2o.glm.wrapper(..., alpha = alpha)
h2o.randomForest.1 <- function(..., ntrees = 300, nbins = 50, seed = 1) h2o.randomForest.wrapper(..., ntrees = ntrees, nbins = nbins, seed = seed)
h2o.randomForest.2 <- function(..., ntrees = 300, sample_rate = 0.75, seed = 1, stopping_rounds=5, stopping_metric="MSE", stopping_tolerance=1e-3) h2o.randomForest.wrapper(..., ntrees = ntrees, sample_rate = sample_rate, seed = seed, stopping_rounds = stopping_rounds, stopping_metric = stopping_metric, stopping_tolerance = stopping_tolerance)
h2o.randomForest.3 <- function(..., ntrees = 300, sample_rate = 0.85, seed = 1) h2o.randomForest.wrapper(..., ntrees = ntrees, sample_rate = sample_rate, seed = seed)
h2o.randomForest.4 <- function(..., ntrees = 300, nbins = 50, balance_classes = TRUE, seed = 1) h2o.randomForest.wrapper(..., ntrees = ntrees, nbins = nbins, balance_classes = balance_classes, seed = seed)
h2o.gbm.1 <- function(..., ntrees = 500, max_depth = 7, seed = 1) h2o.gbm.wrapper(..., ntrees = ntrees, max_depth = max_depth, seed = seed)
h2o.gbm.2 <- function(..., ntrees = 300, nbins = 50, seed = 1) h2o.gbm.wrapper(..., ntrees = ntrees, nbins = nbins, seed = seed)
h2o.gbm.3 <- function(..., ntrees = 300, max_depth = 10, seed = 1) h2o.gbm.wrapper(..., ntrees = ntrees, max_depth = max_depth, seed = seed)
h2o.gbm.4 <- function(..., ntrees = 300, col_sample_rate = 0.8, seed = 1) h2o.gbm.wrapper(..., ntrees = ntrees, col_sample_rate = col_sample_rate, seed = seed)
h2o.gbm.5 <- function(..., ntrees = 300, col_sample_rate = 0.7, seed = 1) h2o.gbm.wrapper(..., ntrees = ntrees, col_sample_rate = col_sample_rate, seed = seed)
h2o.gbm.6 <- function(..., ntrees = 300, col_sample_rate = 0.6, seed = 1) h2o.gbm.wrapper(..., ntrees = ntrees, col_sample_rate = col_sample_rate, seed = seed)
h2o.gbm.7 <- function(..., ntrees = 300, balance_classes = TRUE, seed = 1) h2o.gbm.wrapper(..., ntrees = ntrees, balance_classes = balance_classes, seed = seed)
h2o.gbm.8 <- function(..., ntrees = 300, max_depth = 3, seed = 1) h2o.gbm.wrapper(..., ntrees = ntrees, max_depth = max_depth, seed = seed)
h2o.deeplearning.1 <- function(..., hidden = c(200,200), activation = "Rectifier", epochs = 50, seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, epochs = epochs, seed = seed)
h2o.deeplearning.2 <- function(..., hidden = c(200,200,200), activation = "Tanh", epochs = 50, seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, epochs = epochs, seed = seed)
h2o.deeplearning.3 <- function(..., hidden = c(500,500), activation = "RectifierWithDropout", epochs = 50, seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, epochs = epochs, seed = seed)
h2o.deeplearning.4 <- function(..., hidden = c(500,500), activation = "Rectifier", epochs = 50, balance_classes = TRUE, seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, balance_classes = balance_classes, epochs = epochs, seed = seed)
h2o.deeplearning.5 <- function(..., hidden = c(100,100,100), activation = "Rectifier", epochs = 50, seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, epochs = epochs, seed = seed)
h2o.deeplearning.6 <- function(..., hidden = c(50,50), activation = "Rectifier", epochs = 50, seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, epochs = epochs, seed = seed)
h2o.deeplearning.7 <- function(..., hidden = c(100,100), activation = "Rectifier", epochs = 50, seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, epochs = epochs, seed = seed)
#----------------------------------------
# #---------------
# #creating custom learners
# h2o.glm.1 <- function(..., alpha = 0.0, family="gamma")
# h2o.glm.wrapper(..., alpha = alpha, family = family)
# h2o.randomForest.1 <- function(..., ntrees = 600, seed = 1, stopping_rounds=5, stopping_metric="MSE", stopping_tolerance=1e-3)
# h2o.randomForest.wrapper(..., ntrees = ntrees, seed = seed, stopping_rounds = stopping_rounds, stopping_metric = stopping_metric, stopping_tolerance = stopping_tolerance)
# h2o.gbm.1 <- function(..., family="gamma", ntrees = 600, max_depth = 7, seed = 1, stopping_rounds=5, stopping_metric="MSE", stopping_tolerance=1e-3)
# h2o.gbm.wrapper(..., family = family, ntrees = ntrees, max_depth = max_depth, seed = seed, stopping_rounds = stopping_rounds, stopping_metric = stopping_metric, stopping_tolerance = stopping_tolerance)
# h2o.deeplearning.1 <- function(..., hidden = c(200,200), activation = "Rectifier", epochs = 50, seed = 1)
# h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, epochs = epochs, seed = seed)
# h2o.deeplearning.2 <- function(..., hidden = c(512), activation = "Rectifier", epochs = 50, seed = 1)
# h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, epochs = epochs, seed = seed)
# h2o.deeplearning.3 <- function(..., hidden = c(64,64,64), activation = "Rectifier", epochs = 50, seed = 1)
# h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, epochs = epochs, seed = seed)
# h2o.deeplearning.4 <- function(..., hidden = c(32,32,32,32,32), activation = "Rectifier", epochs = 50, seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, seed = seed)
# #----------------------------------------
#
#----------------------------------------------------------
learner <- c(
# "h2o.glm.1",
"h2o.glm.wrapper",
# "h2o.glm.3",
# "h2o.randomForest.wrapper",
# "h2o.randomForest.1",
"h2o.randomForest.2",
# "h2o.gbm.wrapper",
"h2o.gbm.1",
"h2o.gbm.6",
"h2o.gbm.8",
# "h2o.deeplearning.wrapper",
"h2o.deeplearning.1"
# "h2o.deeplearning.2",
# "h2o.deeplearning.3",
# "h2o.deeplearning.4"
)
metalearner <-
#"h2o.gbm.wrapper"
# "h2o.gbm.1"
"SL.glm"
#----------------------------------------------------------
# Passing the validation_frame to h2o.ensemble does not currently do anything.
# Right now, you must use the predict.h2o.ensemble function to generate predictions on a test set.
fit <- h2o.ensemble(x = 1:(ncol(train_frame.hex)-1), y = ncol(train_frame.hex),
train_frame.hex, validation_frame=valid_frame.hex,
family = "gaussian",
learner = learner,
metalearner = metalearner,
cvControl = list(V = 5))
pred <- predict(fit,valid_frame.hex)
# show stacked prediction and all 4 independent learners
# h2o.glm.wrapper h2o.randomForest.wrapper h2o.gbm.wrapper h2o.deeplearning.wrapper
head(pred)
# show combined only
head(pred[1])
# show h2o.glm.wrapper
head(pred$basepred[1])
pred_m1 <-as.matrix(pred$basepred[1])
score_m1=mean(abs(exp(pred_m1)-valid_loss))
cat("score_m1 (h2o.glm.wrapper) :",score_m1,"\n")
pred_m2 <-as.matrix(pred$basepred[2])
score_m2=mean(abs(exp(pred_m2)-valid_loss))
cat("score_m2 ( h2o.randomForest.1) :",score_m2,"\n")
pred_m3 <-as.matrix(pred$basepred[3])
score_m3=mean(abs(exp(pred_m3)-valid_loss))
cat("score_m3 (h2o.gbm.1 ):",score_m3,"\n")
pred_m4 <-as.matrix(pred$basepred[4])
score_m4=mean(abs(exp(pred_m4)-valid_loss))
cat("score_m4 (h2o.deeplearning.1):",score_m4,"\n")
pred_m5 <-as.matrix(pred$basepred[5])
score_m5=mean(abs(exp(pred_m5)-valid_loss))
cat("score_m5 (h2o.deeplearning.2):",score_m5,"\n")
pred_m6 <-as.matrix(pred$basepred[6])
score_m6=mean(abs(exp(pred_m6)-valid_loss))
cat("score_m6 (h2o.deeplearning.3):",score_m6,"\n")
pred_m7 <-as.matrix(pred$basepred[7])
score_m7=mean(abs(exp(pred_m7)-valid_loss))
cat("score_m7 (h2o.deeplearning.4):",score_m7,"\n")
# Average everything
pred_average=exp((pred_m1+pred_m2+pred_m3+pred_m4+pred_m5+pred_m6+pred_m7)/7)
colnames(pred_average) = "predict"
score_average=mean(abs((pred_average)-valid_loss))
cat("Ensemble score: (simple average) " ,score_average,"\n")
# get the H2O.ensemble score
pred_ensemble <-(as.matrix(pred[[1]]))
score_ensemble=mean(abs(exp(pred_ensemble)-valid_loss))
cat("score_meta (h2o.ensemble):",score_ensemble,"\n")
# final predict the full test sets
pred_test <- as.matrix(predict(fit,test.hex))
# Write ensemble submissions
# local: submission = read.csv('./data/sample_submission.csv', colClasses = c("integer", "numeric"))
submission = read.csv('sample_submission.csv', colClasses = c("integer", "numeric"))
submission$loss = as.numeric(as.matrix((exp(pred_test[[1]]))))
write.csv(submission, 'h2_oldcustlearnSLglm_log_ensemble.csv', row.names=FALSE)
# > fit$learner
# [1] "h2o.glm.wrapper" "h2o.randomForest.2" "h2o.gbm.1" "h2o.gbm.6"
# [5] "h2o.gbm.8" "h2o.deeplearning.wrapper"
# > fit$metalearner
# [1] "h2o.gbm.1"
# Kaggle score
# 1125.39604
# Save the fit ensemble to disk
# the model will be saved as "./folder_for_myDRF/myDRF"
h2o.save_ensemble(fit, path = "fit_") # define your path here
|
/Project4-MachineLearning/Team-1/h2o_custlearners.R
|
no_license
|
vuchau/bootcamp007_project
|
R
| false
| false
| 12,638
|
r
|
library(ggplot2) # Data visualization
library(readr) # CSV file I/O, e.g. the read_csv function
library(SuperLearner)
library(Hmisc)
library(caret)
# Any results you write to the current directory are saved as output.
# ------------------------------------------------------------------
# R version of some Machine Learning Method starter code using H2O.
# Average of multiple H2O DNN models
# Fork from https://www.kaggle.com/kumareshd/allstate-claims-severity/performance-of-different-methods-in-r-using-h2o/discussion
# Example parameters https://github.com/h2oai/h2o-3/blob/0d3e52cdce9f699a8d693b5d3b11c8bd3e15ca02/h2o-r/h2o-package/R/deeplearning.R
# See: http://mlwave.com/kaggle-ensembling-guide/
# Load H2O
library(h2o)
kd_h2o<-h2o.init(nthreads = -1, max_mem_size = "8g")
# Installation of H2O-Ensemble, does not work on Kaggle cloud
# install.packages("https://h2o-release.s3.amazonaws.com/h2o-ensemble/R/h2oEnsemble_0.1.8.tar.gz", repos = NULL)
library(h2oEnsemble)
# Locally start jar and then use this line
# kd_h2o<-h2o.init(ip = "localhost", port = 54323 ,nthreads = -1, max_mem_size = "10g")
#### Loading data ####
#Reading Data, old school read.csv. Using fread is faster.
set.seed(12345)
train<-read.csv('train.csv')
test<-read.csv('test.csv')
train.index <- train[,1]
test.index <- test[,1]
train.loss <- train[,ncol(train)]
#### Pre-processing dataset ####
#Combining train and test data for joint pre-processing
bulk <- rbind(train[,-ncol(train)], test)
bulk$id <- NULL
#Converting categories to numeric
#this is done by first splitting the binary level, multi-level, and
#continuous variables
#colnames(all.train)
bin.bulk <- bulk[,1:72]
cat.bulk <- bulk[,73:116]
cont.bulk <- bulk[,117:130]
#Combine levels
#Combining multiple levels using combine.levels
#minimum frequency = minlev
temp <- sapply(cat.bulk, combine.levels, minlev = 0.001)
temp <- as.data.frame(temp)
str(temp)
#Column bind binary and reduced categorical variables
# comb.train <- cbind(bin.train, cat.train)
comb.bulk <- cbind(bin.bulk, temp)
#Dummify all factor variables
dmy <- dummyVars(" ~ .", data = comb.bulk, fullRank=T)
temp <- as.data.frame(predict(dmy, newdata = comb.bulk))
dim(temp)
#Combine dummified with cont vars
bulk <- cbind(temp, cont.bulk)
dim(bulk)
#Split pre-cprocessed dataset into train and test
train.e = bulk[1:nrow(train),]
test.e = bulk[(nrow(train)+1):nrow(bulk),]
#Re-attach index
train.e <- cbind(train.index, train.e)
test.e <- cbind(test.index, test.e)
#Re-attach loss
train.e$loss <- train.loss
#Pre-processed data for training and validation
train <- train.e
test <- test.e
#### Start of H2O part ####
#Removing index column
train <- train[,-1]
test_label <- test[,1]
test <- test[,-1]
#Getting index of test subset
index <- sample(1:(dim(train)[1]), 0.2*dim(train)[1], replace=FALSE)
#Creating training=train_frame and test=valid_frame subsets
train_frame<-train[-index,]
valid_frame<-train[index,]
#Separating loss variable from test set. valid_predict has NO LOSS variable
valid_predict<-valid_frame[,-ncol(valid_frame)]
valid_loss<-valid_frame[,ncol(valid_frame)]
#Log transform
train_frame[,ncol(train_frame)]<-log(train_frame[,ncol(train_frame)])
valid_frame[,ncol(train_frame)]<-log(valid_frame[,ncol(valid_frame)])
# load H2o data frame // validate that H2O flow looses all continous data
train_frame.hex<-as.h2o(train_frame)
valid_frame.hex<-as.h2o(valid_frame)
valid_predict.hex<-as.h2o(valid_predict)
test.hex<-as.h2o(test)
#---------------
#creating custom learners
h2o.glm.1 <- function(..., alpha = 0.0) h2o.glm.wrapper(..., alpha = alpha)
h2o.glm.2 <- function(..., alpha = 0.5) h2o.glm.wrapper(..., alpha = alpha)
h2o.glm.3 <- function(..., alpha = 1.0) h2o.glm.wrapper(..., alpha = alpha)
h2o.randomForest.1 <- function(..., ntrees = 300, nbins = 50, seed = 1) h2o.randomForest.wrapper(..., ntrees = ntrees, nbins = nbins, seed = seed)
h2o.randomForest.2 <- function(..., ntrees = 300, sample_rate = 0.75, seed = 1, stopping_rounds=5, stopping_metric="MSE", stopping_tolerance=1e-3) h2o.randomForest.wrapper(..., ntrees = ntrees, sample_rate = sample_rate, seed = seed, stopping_rounds = stopping_rounds, stopping_metric = stopping_metric, stopping_tolerance = stopping_tolerance)
h2o.randomForest.3 <- function(..., ntrees = 300, sample_rate = 0.85, seed = 1) h2o.randomForest.wrapper(..., ntrees = ntrees, sample_rate = sample_rate, seed = seed)
h2o.randomForest.4 <- function(..., ntrees = 300, nbins = 50, balance_classes = TRUE, seed = 1) h2o.randomForest.wrapper(..., ntrees = ntrees, nbins = nbins, balance_classes = balance_classes, seed = seed)
h2o.gbm.1 <- function(..., ntrees = 500, max_depth = 7, seed = 1) h2o.gbm.wrapper(..., ntrees = ntrees, max_depth = max_depth, seed = seed)
h2o.gbm.2 <- function(..., ntrees = 300, nbins = 50, seed = 1) h2o.gbm.wrapper(..., ntrees = ntrees, nbins = nbins, seed = seed)
h2o.gbm.3 <- function(..., ntrees = 300, max_depth = 10, seed = 1) h2o.gbm.wrapper(..., ntrees = ntrees, max_depth = max_depth, seed = seed)
h2o.gbm.4 <- function(..., ntrees = 300, col_sample_rate = 0.8, seed = 1) h2o.gbm.wrapper(..., ntrees = ntrees, col_sample_rate = col_sample_rate, seed = seed)
h2o.gbm.5 <- function(..., ntrees = 300, col_sample_rate = 0.7, seed = 1) h2o.gbm.wrapper(..., ntrees = ntrees, col_sample_rate = col_sample_rate, seed = seed)
h2o.gbm.6 <- function(..., ntrees = 300, col_sample_rate = 0.6, seed = 1) h2o.gbm.wrapper(..., ntrees = ntrees, col_sample_rate = col_sample_rate, seed = seed)
h2o.gbm.7 <- function(..., ntrees = 300, balance_classes = TRUE, seed = 1) h2o.gbm.wrapper(..., ntrees = ntrees, balance_classes = balance_classes, seed = seed)
h2o.gbm.8 <- function(..., ntrees = 300, max_depth = 3, seed = 1) h2o.gbm.wrapper(..., ntrees = ntrees, max_depth = max_depth, seed = seed)
h2o.deeplearning.1 <- function(..., hidden = c(200,200), activation = "Rectifier", epochs = 50, seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, epochs = epochs, seed = seed)
h2o.deeplearning.2 <- function(..., hidden = c(200,200,200), activation = "Tanh", epochs = 50, seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, epochs = epochs, seed = seed)
h2o.deeplearning.3 <- function(..., hidden = c(500,500), activation = "RectifierWithDropout", epochs = 50, seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, epochs = epochs, seed = seed)
h2o.deeplearning.4 <- function(..., hidden = c(500,500), activation = "Rectifier", epochs = 50, balance_classes = TRUE, seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, balance_classes = balance_classes, epochs = epochs, seed = seed)
h2o.deeplearning.5 <- function(..., hidden = c(100,100,100), activation = "Rectifier", epochs = 50, seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, epochs = epochs, seed = seed)
h2o.deeplearning.6 <- function(..., hidden = c(50,50), activation = "Rectifier", epochs = 50, seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, epochs = epochs, seed = seed)
h2o.deeplearning.7 <- function(..., hidden = c(100,100), activation = "Rectifier", epochs = 50, seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, epochs = epochs, seed = seed)
#----------------------------------------
# #---------------
# #creating custom learners
# h2o.glm.1 <- function(..., alpha = 0.0, family="gamma")
# h2o.glm.wrapper(..., alpha = alpha, family = family)
# h2o.randomForest.1 <- function(..., ntrees = 600, seed = 1, stopping_rounds=5, stopping_metric="MSE", stopping_tolerance=1e-3)
# h2o.randomForest.wrapper(..., ntrees = ntrees, seed = seed, stopping_rounds = stopping_rounds, stopping_metric = stopping_metric, stopping_tolerance = stopping_tolerance)
# h2o.gbm.1 <- function(..., family="gamma", ntrees = 600, max_depth = 7, seed = 1, stopping_rounds=5, stopping_metric="MSE", stopping_tolerance=1e-3)
# h2o.gbm.wrapper(..., family = family, ntrees = ntrees, max_depth = max_depth, seed = seed, stopping_rounds = stopping_rounds, stopping_metric = stopping_metric, stopping_tolerance = stopping_tolerance)
# h2o.deeplearning.1 <- function(..., hidden = c(200,200), activation = "Rectifier", epochs = 50, seed = 1)
# h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, epochs = epochs, seed = seed)
# h2o.deeplearning.2 <- function(..., hidden = c(512), activation = "Rectifier", epochs = 50, seed = 1)
# h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, epochs = epochs, seed = seed)
# h2o.deeplearning.3 <- function(..., hidden = c(64,64,64), activation = "Rectifier", epochs = 50, seed = 1)
# h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, epochs = epochs, seed = seed)
# h2o.deeplearning.4 <- function(..., hidden = c(32,32,32,32,32), activation = "Rectifier", epochs = 50, seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, seed = seed)
# #----------------------------------------
#
#----------------------------------------------------------
learner <- c(
# "h2o.glm.1",
"h2o.glm.wrapper",
# "h2o.glm.3",
# "h2o.randomForest.wrapper",
# "h2o.randomForest.1",
"h2o.randomForest.2",
# "h2o.gbm.wrapper",
"h2o.gbm.1",
"h2o.gbm.6",
"h2o.gbm.8",
# "h2o.deeplearning.wrapper",
"h2o.deeplearning.1"
# "h2o.deeplearning.2",
# "h2o.deeplearning.3",
# "h2o.deeplearning.4"
)
metalearner <-
#"h2o.gbm.wrapper"
# "h2o.gbm.1"
"SL.glm"
#----------------------------------------------------------
# Passing the validation_frame to h2o.ensemble does not currently do anything.
# Right now, you must use the predict.h2o.ensemble function to generate predictions on a test set.
fit <- h2o.ensemble(x = 1:(ncol(train_frame.hex)-1), y = ncol(train_frame.hex),
train_frame.hex, validation_frame=valid_frame.hex,
family = "gaussian",
learner = learner,
metalearner = metalearner,
cvControl = list(V = 5))
pred <- predict(fit,valid_frame.hex)
# show stacked prediction and all 4 independent learners
# h2o.glm.wrapper h2o.randomForest.wrapper h2o.gbm.wrapper h2o.deeplearning.wrapper
head(pred)
# show combined only
head(pred[1])
# show h2o.glm.wrapper
head(pred$basepred[1])
pred_m1 <-as.matrix(pred$basepred[1])
score_m1=mean(abs(exp(pred_m1)-valid_loss))
cat("score_m1 (h2o.glm.wrapper) :",score_m1,"\n")
pred_m2 <-as.matrix(pred$basepred[2])
score_m2=mean(abs(exp(pred_m2)-valid_loss))
cat("score_m2 ( h2o.randomForest.1) :",score_m2,"\n")
pred_m3 <-as.matrix(pred$basepred[3])
score_m3=mean(abs(exp(pred_m3)-valid_loss))
cat("score_m3 (h2o.gbm.1 ):",score_m3,"\n")
pred_m4 <-as.matrix(pred$basepred[4])
score_m4=mean(abs(exp(pred_m4)-valid_loss))
cat("score_m4 (h2o.deeplearning.1):",score_m4,"\n")
pred_m5 <-as.matrix(pred$basepred[5])
score_m5=mean(abs(exp(pred_m5)-valid_loss))
cat("score_m5 (h2o.deeplearning.2):",score_m5,"\n")
pred_m6 <-as.matrix(pred$basepred[6])
score_m6=mean(abs(exp(pred_m6)-valid_loss))
cat("score_m6 (h2o.deeplearning.3):",score_m6,"\n")
pred_m7 <-as.matrix(pred$basepred[7])
score_m7=mean(abs(exp(pred_m7)-valid_loss))
cat("score_m7 (h2o.deeplearning.4):",score_m7,"\n")
# Average everything
pred_average=exp((pred_m1+pred_m2+pred_m3+pred_m4+pred_m5+pred_m6+pred_m7)/7)
colnames(pred_average) = "predict"
score_average=mean(abs((pred_average)-valid_loss))
cat("Ensemble score: (simple average) " ,score_average,"\n")
# get the H2O.ensemble score
pred_ensemble <-(as.matrix(pred[[1]]))
score_ensemble=mean(abs(exp(pred_ensemble)-valid_loss))
cat("score_meta (h2o.ensemble):",score_ensemble,"\n")
# final predict the full test sets
pred_test <- as.matrix(predict(fit,test.hex))
# Write ensemble submissions
# local: submission = read.csv('./data/sample_submission.csv', colClasses = c("integer", "numeric"))
submission = read.csv('sample_submission.csv', colClasses = c("integer", "numeric"))
submission$loss = as.numeric(as.matrix((exp(pred_test[[1]]))))
write.csv(submission, 'h2_oldcustlearnSLglm_log_ensemble.csv', row.names=FALSE)
# > fit$learner
# [1] "h2o.glm.wrapper" "h2o.randomForest.2" "h2o.gbm.1" "h2o.gbm.6"
# [5] "h2o.gbm.8" "h2o.deeplearning.wrapper"
# > fit$metalearner
# [1] "h2o.gbm.1"
# Kaggle score
# 1125.39604
# Save the fit ensemble to disk
# the model will be saved as "./folder_for_myDRF/myDRF"
h2o.save_ensemble(fit, path = "fit_") # define your path here
|
context("format_character")
chartype_frame <- function()
{
chars <- character()
desc <- character()
chars[1] <- "\u0001\u001f"
desc[1] <- "C0 control code"
chars[2] <- "\a\b\f\n\r\t"
desc[2] <- "Named control code"
chars[3] <- "abcdefuvwxyz"
desc[3] <- "ASCII"
chars[4] <- "\u0080\u009f"
desc[4] <- "C1 control code"
chars[5] <- paste0("\u00a0\u00a1\u00a2\u00a3\u00a4\u00a5",
"\u00fa\u00fb\u00fc\u00fd\u00fe\u00ff")
desc[5] <- "Latin-1"
chars[6] <- paste0("\u0100\u0101\u0102\u0103\u0104\u0105",
"\u0106\u0107\u0108\u0109\u010a\u010b")
desc[6] <- "Unicode"
chars[7] <- "\uff01\uff02\uff03\uff04\uff05\uff06"
desc[7] <- "Unicode wide"
chars[8] <- "\ue00\u2029"
desc[8] <- "Unicode control"
chars[9] <- paste0("x\u00adx\u200bx\u200cx\u200dx\u200ex\u200f",
"x\u034fx\ufeffx", intToUtf8(0xE0001), "x",
intToUtf8(0xE0020), "x", intToUtf8(0xE01EF), "x")
desc[9] <- "Unicode ignorable"
chars[10] <- paste0("a\u0300a\u0301a\u0302a\u0303a\u0304a\u0305",
"a\u0306a\u0307a\u0308a\u0309a\u030aa\u030b")
desc[10] <- "Unicode mark"
chars[11] <- paste0(intToUtf8(0x1F600), intToUtf8(0x1F601),
intToUtf8(0x1F602), intToUtf8(0x1F603),
intToUtf8(0x1F604), intToUtf8(0x1F483))
desc[11] <- "Emoji"
chars[12] <- paste0("x", intToUtf8(0x10ffff), "x")
desc[12] <- "Unassigned"
chars[13] <- "\xfd\xfe\xff"
desc[13] <- "Invalid"
chars[14] <- "\\"
desc[14] <- "Backslash"
chars[15] <- '"'
desc[15] <- "Quote"
Encoding(chars) <- "UTF-8"
data.frame(chars, desc, stringsAsFactors = FALSE)
}
test_that("output test", {
expect_pillar_output(letters[1:5], filename = "letters.txt")
expect_pillar_output(paste(letters, collapse = ""), filename = "letters-long.txt")
expect_pillar_output(paste(letters, collapse = ""), width = 10, filename = "letters-long-10.txt")
expect_pillar_output(paste(letters, collapse = ""), width = 3, filename = "letters-long-03.txt")
expect_pillar_output("\u6210\u4ea4\u65e5", title = "\u6210\u4ea4", filename = "deal1.txt")
expect_pillar_output("\u6210\u4ea4", title = "\u6210\u4ea4\u65e5", filename = "deal2.txt")
expect_pillar_output(1L, title = "\u6210\u4ea4\u65e5", filename = "deal3.txt")
expect_pillar_output(c("", " ", " a", "a ", "a b"), width = 5, filename = "spaces.txt")
skip_on_os("windows")
expect_pillar_output(xf = colonnade(chartype_frame()), width = 50, filename = "utf8.txt")
})
|
/tests/testthat/test-format_character.R
|
no_license
|
kevinykuo/pillar
|
R
| false
| false
| 2,549
|
r
|
context("format_character")
chartype_frame <- function()
{
chars <- character()
desc <- character()
chars[1] <- "\u0001\u001f"
desc[1] <- "C0 control code"
chars[2] <- "\a\b\f\n\r\t"
desc[2] <- "Named control code"
chars[3] <- "abcdefuvwxyz"
desc[3] <- "ASCII"
chars[4] <- "\u0080\u009f"
desc[4] <- "C1 control code"
chars[5] <- paste0("\u00a0\u00a1\u00a2\u00a3\u00a4\u00a5",
"\u00fa\u00fb\u00fc\u00fd\u00fe\u00ff")
desc[5] <- "Latin-1"
chars[6] <- paste0("\u0100\u0101\u0102\u0103\u0104\u0105",
"\u0106\u0107\u0108\u0109\u010a\u010b")
desc[6] <- "Unicode"
chars[7] <- "\uff01\uff02\uff03\uff04\uff05\uff06"
desc[7] <- "Unicode wide"
chars[8] <- "\ue00\u2029"
desc[8] <- "Unicode control"
chars[9] <- paste0("x\u00adx\u200bx\u200cx\u200dx\u200ex\u200f",
"x\u034fx\ufeffx", intToUtf8(0xE0001), "x",
intToUtf8(0xE0020), "x", intToUtf8(0xE01EF), "x")
desc[9] <- "Unicode ignorable"
chars[10] <- paste0("a\u0300a\u0301a\u0302a\u0303a\u0304a\u0305",
"a\u0306a\u0307a\u0308a\u0309a\u030aa\u030b")
desc[10] <- "Unicode mark"
chars[11] <- paste0(intToUtf8(0x1F600), intToUtf8(0x1F601),
intToUtf8(0x1F602), intToUtf8(0x1F603),
intToUtf8(0x1F604), intToUtf8(0x1F483))
desc[11] <- "Emoji"
chars[12] <- paste0("x", intToUtf8(0x10ffff), "x")
desc[12] <- "Unassigned"
chars[13] <- "\xfd\xfe\xff"
desc[13] <- "Invalid"
chars[14] <- "\\"
desc[14] <- "Backslash"
chars[15] <- '"'
desc[15] <- "Quote"
Encoding(chars) <- "UTF-8"
data.frame(chars, desc, stringsAsFactors = FALSE)
}
test_that("output test", {
expect_pillar_output(letters[1:5], filename = "letters.txt")
expect_pillar_output(paste(letters, collapse = ""), filename = "letters-long.txt")
expect_pillar_output(paste(letters, collapse = ""), width = 10, filename = "letters-long-10.txt")
expect_pillar_output(paste(letters, collapse = ""), width = 3, filename = "letters-long-03.txt")
expect_pillar_output("\u6210\u4ea4\u65e5", title = "\u6210\u4ea4", filename = "deal1.txt")
expect_pillar_output("\u6210\u4ea4", title = "\u6210\u4ea4\u65e5", filename = "deal2.txt")
expect_pillar_output(1L, title = "\u6210\u4ea4\u65e5", filename = "deal3.txt")
expect_pillar_output(c("", " ", " a", "a ", "a b"), width = 5, filename = "spaces.txt")
skip_on_os("windows")
expect_pillar_output(xf = colonnade(chartype_frame()), width = 50, filename = "utf8.txt")
})
|
\name{Heatmap_Legend}
\alias{Heatmap_Legend}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Makes a legend for plotting surfaces (e.g., population density) }
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
Heatmap_Legend(colvec, heatrange, margintext = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{colvec}{
%% ~~Describe \code{colvec} here~~
}
\item{heatrange}{
%% ~~Describe \code{heatrange} here~~
}
\item{margintext}{
%% ~~Describe \code{margintext} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (colvec, heatrange, margintext = NULL)
{
par(xaxs = "i", yaxs = "i", mar = c(1, 0, 1, 2 + ifelse(is.null(margintext),
0, 1.5)), mgp = c(1.5, 0.25, 0), tck = -0.02)
N = length(colvec)
Y = seq(heatrange[1], heatrange[2], length = N + 1)
plot(1, type = "n", xlim = c(0, 1), ylim = heatrange, xlab = "",
ylab = "", main = "", xaxt = "n", yaxt = "n", cex.main = 1.5)
for (i in 1:N) polygon(x = c(0, 1, 1, 0), y = Y[c(i, i, i +
1, i + 1)], col = colvec[i], border = NA)
axis(side = 4, at = pretty(heatrange))
if (!is.null(margintext))
mtext(side = 4, text = margintext, line = 2, cex = 1.5)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/man/Heatmap_Legend.Rd
|
no_license
|
James-Thorson/spatial_condition_factor
|
R
| false
| false
| 2,106
|
rd
|
\name{Heatmap_Legend}
\alias{Heatmap_Legend}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Makes a legend for plotting surfaces (e.g., population density) }
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
Heatmap_Legend(colvec, heatrange, margintext = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{colvec}{
%% ~~Describe \code{colvec} here~~
}
\item{heatrange}{
%% ~~Describe \code{heatrange} here~~
}
\item{margintext}{
%% ~~Describe \code{margintext} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (colvec, heatrange, margintext = NULL)
{
par(xaxs = "i", yaxs = "i", mar = c(1, 0, 1, 2 + ifelse(is.null(margintext),
0, 1.5)), mgp = c(1.5, 0.25, 0), tck = -0.02)
N = length(colvec)
Y = seq(heatrange[1], heatrange[2], length = N + 1)
plot(1, type = "n", xlim = c(0, 1), ylim = heatrange, xlab = "",
ylab = "", main = "", xaxt = "n", yaxt = "n", cex.main = 1.5)
for (i in 1:N) polygon(x = c(0, 1, 1, 0), y = Y[c(i, i, i +
1, i + 1)], col = colvec[i], border = NA)
axis(side = 4, at = pretty(heatrange))
if (!is.null(margintext))
mtext(side = 4, text = margintext, line = 2, cex = 1.5)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
testlist <- list(b = c(-16756480L, NA, 65288L, -1332215550L, 142573380L, 2136702975L, -16756480L, 255L, 65407L, 2139062143L, 1367343103L, -1L, -1L, -255L, 2130870353L, 2130837504L, NA, -2138636289L, 2139030143L, -16776961L, 33488896L, -1966281L, 16776960L, -30261249L, -1L, -255L, 2139553791L, -64896L, 1371734528L, 8388608L, 8388608L, 1002373119L, -16711680L))
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result)
|
/mcga/inst/testfiles/ByteVectorToDoubles/libFuzzer_ByteVectorToDoubles/ByteVectorToDoubles_valgrind_files/1612761196-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 433
|
r
|
testlist <- list(b = c(-16756480L, NA, 65288L, -1332215550L, 142573380L, 2136702975L, -16756480L, 255L, 65407L, 2139062143L, 1367343103L, -1L, -1L, -255L, 2130870353L, 2130837504L, NA, -2138636289L, 2139030143L, -16776961L, 33488896L, -1966281L, 16776960L, -30261249L, -1L, -255L, 2139553791L, -64896L, 1371734528L, 8388608L, 8388608L, 1002373119L, -16711680L))
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result)
|
## Download and extract files from zip
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip",method="curl")
unzip(zipfile="./data/Dataset.zip",exdir="./data")
path_rf <- file.path("./data" , "UCI HAR Dataset")
files<-list.files(path_rf, recursive=TRUE)
files
## Read Activity Files
dataActivityTest <- read.table(file.path(path_rf, "test" , "Y_test.txt" ),header = FALSE)
dataActivityTrain <- read.table(file.path(path_rf, "train", "Y_train.txt"),header = FALSE)
## Read Subject Files
dataSubjectTrain <- read.table(file.path(path_rf, "train", "subject_train.txt"),header = FALSE)
dataSubjectTest <- read.table(file.path(path_rf, "test" , "subject_test.txt"),header = FALSE)
## Read Features Files
dataFeaturesTest <- read.table(file.path(path_rf, "test" , "X_test.txt" ),header = FALSE)
dataFeaturesTrain <- read.table(file.path(path_rf, "train", "X_train.txt"),header = FALSE)
## rBinding tables by rows
dataSubject <- rbind(dataSubjectTrain, dataSubjectTest)
dataActivity<- rbind(dataActivityTrain, dataActivityTest)
dataFeatures<- rbind(dataFeaturesTrain, dataFeaturesTest)
## Naming Variables
names(dataSubject)<-c("subject")
names(dataActivity)<- c("activity")
dataFeaturesNames <- read.table(file.path(path_rf, "features.txt"),head=FALSE)
names(dataFeatures)<- dataFeaturesNames$V2
## cBinding data
dataCombine <- cbind(dataSubject, dataActivity)
Data <- cbind(dataFeatures, dataCombine)
## Subsetting name of feature by the measurement of the mean and std
subdataFeaturesNames<-dataFeaturesNames$V2[grep("mean\\(\\)|std\\(\\)", dataFeaturesNames$V2)]
## Subsetting data by selected features names
selectedNames<-c(as.character(subdataFeaturesNames), "subject", "activity" )
Data<-subset(Data,select=selectedNames)
## Reading descriptive activity names
activityLabels <- read.table(file.path(path_rf, "activity_labels.txt"),header = FALSE)
## label dataset
names(Data)<-gsub("^t", "time", names(Data))
names(Data)<-gsub("^f", "frequency", names(Data))
names(Data)<-gsub("Acc", "Accelerometer", names(Data))
names(Data)<-gsub("Gyro", "Gyroscope", names(Data))
names(Data)<-gsub("Mag", "Magnitude", names(Data))
names(Data)<-gsub("BodyBody", "Body", names(Data))
## Creating dataset
library(plyr);
Data2<-aggregate(. ~subject + activity, Data, mean)
Data2<-Data2[order(Data2$subject,Data2$activity),]
write.table(Data2, file = "tidydataset.txt",row.name=FALSE)
|
/run_analysis.R
|
no_license
|
mmontauti/getting-cleaning-data-project
|
R
| false
| false
| 2,529
|
r
|
## Download and extract files from zip
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip",method="curl")
unzip(zipfile="./data/Dataset.zip",exdir="./data")
path_rf <- file.path("./data" , "UCI HAR Dataset")
files<-list.files(path_rf, recursive=TRUE)
files
## Read Activity Files
dataActivityTest <- read.table(file.path(path_rf, "test" , "Y_test.txt" ),header = FALSE)
dataActivityTrain <- read.table(file.path(path_rf, "train", "Y_train.txt"),header = FALSE)
## Read Subject Files
dataSubjectTrain <- read.table(file.path(path_rf, "train", "subject_train.txt"),header = FALSE)
dataSubjectTest <- read.table(file.path(path_rf, "test" , "subject_test.txt"),header = FALSE)
## Read Features Files
dataFeaturesTest <- read.table(file.path(path_rf, "test" , "X_test.txt" ),header = FALSE)
dataFeaturesTrain <- read.table(file.path(path_rf, "train", "X_train.txt"),header = FALSE)
## rBinding tables by rows
dataSubject <- rbind(dataSubjectTrain, dataSubjectTest)
dataActivity<- rbind(dataActivityTrain, dataActivityTest)
dataFeatures<- rbind(dataFeaturesTrain, dataFeaturesTest)
## Naming Variables
names(dataSubject)<-c("subject")
names(dataActivity)<- c("activity")
dataFeaturesNames <- read.table(file.path(path_rf, "features.txt"),head=FALSE)
names(dataFeatures)<- dataFeaturesNames$V2
## cBinding data
dataCombine <- cbind(dataSubject, dataActivity)
Data <- cbind(dataFeatures, dataCombine)
## Subsetting name of feature by the measurement of the mean and std
subdataFeaturesNames<-dataFeaturesNames$V2[grep("mean\\(\\)|std\\(\\)", dataFeaturesNames$V2)]
## Subsetting data by selected features names
selectedNames<-c(as.character(subdataFeaturesNames), "subject", "activity" )
Data<-subset(Data,select=selectedNames)
## Reading descriptive activity names
activityLabels <- read.table(file.path(path_rf, "activity_labels.txt"),header = FALSE)
## label dataset
names(Data)<-gsub("^t", "time", names(Data))
names(Data)<-gsub("^f", "frequency", names(Data))
names(Data)<-gsub("Acc", "Accelerometer", names(Data))
names(Data)<-gsub("Gyro", "Gyroscope", names(Data))
names(Data)<-gsub("Mag", "Magnitude", names(Data))
names(Data)<-gsub("BodyBody", "Body", names(Data))
## Creating dataset
library(plyr);
Data2<-aggregate(. ~subject + activity, Data, mean)
Data2<-Data2[order(Data2$subject,Data2$activity),]
write.table(Data2, file = "tidydataset.txt",row.name=FALSE)
|
# generate 5 conditional simulations
library(gstat)
library(sp)
number_of_simulations <- 10
name_of_prop <- "cadmium"
data(meuse)
coordinates(meuse) = ~x+y
v <- variogram(log(cadmium)~1, meuse)
m <- fit.variogram(v, vgm(1, "Sph", 300, 1))
plot(v, model = m)
set.seed(131)
data(meuse.grid)
gridded(meuse.grid) = ~x+y
sim <- krige(formula = log(cadmium)~1, meuse, meuse.grid, model = m,
nmax = 15, beta = 5.9, nsim = number_of_simulations)
# show all 5 simulation
spplot(sim, col.regions = colorRampPalette(c("blue","light blue","light green","green","yellow", "orange", "red")))
x_coords <- c(1:length(sim))
y_coords <- c(1:length(sim))
xyvalues <- rep(1.0, times = length(sim))
for (ith in c(1:number_of_simulations))
{
ith_sim <- paste(c("sim",ith), collapse = "")
for (coordinate in c(1:length(sim[ith_sim]))){
# resolution: 2-40
# X: Min - 178440 | Max - 181560 (-20 to anchor on left)
x_coords[coordinate] = (as.numeric(meuse.grid$x[coordinate]) - 178440 - 20) / 40.0
# Y: Min - 329600 | Max - 333760 (-20 to anchor on bottom)
y_coords[coordinate] = (as.numeric(meuse.grid$y[coordinate]) - 329600 - 20) / 40.0
xyvalues[coordinate] = as.numeric(sim[[ith_sim]][coordinate])
}
meuse_data <- data.frame(X = x_coords,
Y = y_coords,
V = xyvalues)
write.table(meuse_data, paste(c("D:/GitHub/",name_of_prop,"/",name_of_prop,"_",ith,".prop"), collapse=""), sep="\t", row.names = FALSE, col.names = FALSE)
}
|
/Resources/MeuseSimulation.R
|
no_license
|
lquatrin/inf2031
|
R
| false
| false
| 1,531
|
r
|
# generate 5 conditional simulations
library(gstat)
library(sp)
number_of_simulations <- 10
name_of_prop <- "cadmium"
data(meuse)
coordinates(meuse) = ~x+y
v <- variogram(log(cadmium)~1, meuse)
m <- fit.variogram(v, vgm(1, "Sph", 300, 1))
plot(v, model = m)
set.seed(131)
data(meuse.grid)
gridded(meuse.grid) = ~x+y
sim <- krige(formula = log(cadmium)~1, meuse, meuse.grid, model = m,
nmax = 15, beta = 5.9, nsim = number_of_simulations)
# show all 5 simulation
spplot(sim, col.regions = colorRampPalette(c("blue","light blue","light green","green","yellow", "orange", "red")))
x_coords <- c(1:length(sim))
y_coords <- c(1:length(sim))
xyvalues <- rep(1.0, times = length(sim))
for (ith in c(1:number_of_simulations))
{
ith_sim <- paste(c("sim",ith), collapse = "")
for (coordinate in c(1:length(sim[ith_sim]))){
# resolution: 2-40
# X: Min - 178440 | Max - 181560 (-20 to anchor on left)
x_coords[coordinate] = (as.numeric(meuse.grid$x[coordinate]) - 178440 - 20) / 40.0
# Y: Min - 329600 | Max - 333760 (-20 to anchor on bottom)
y_coords[coordinate] = (as.numeric(meuse.grid$y[coordinate]) - 329600 - 20) / 40.0
xyvalues[coordinate] = as.numeric(sim[[ith_sim]][coordinate])
}
meuse_data <- data.frame(X = x_coords,
Y = y_coords,
V = xyvalues)
write.table(meuse_data, paste(c("D:/GitHub/",name_of_prop,"/",name_of_prop,"_",ith,".prop"), collapse=""), sep="\t", row.names = FALSE, col.names = FALSE)
}
|
#' Generate miscellaneous statistics for species range shifts
#'
#' This function generates graphs showing the changes in range size and position between
#' the data a species distribution model was trained on and future or past projections
#' of species ranges.
#'
#' NOTE: this function is dependent on the outputs generated by the \code{projectSuit}
#' function, in particular the "Results.csv" file, which includes information on
#' changes in range size and position across the projected time periods and scenarios.
#'
#' @param result_dir the directory where the ensembled and binary maps are placed in
#' addition to the "Results.csv" file. If \code{projectSuit} was used to make
#' these maps, this should be the same as the \code{output} argument in that function.
#' @param time_periods a vector of the years in which the projection will occur, with the
#' first element as the year the model will be trained on (usually the current data).If no
#' precise years are available (e.g., using data from the Last Glacial Maximum), order
#' time periods from current to least current and give character strings for the years (e.g., "LGM").
#' @param scenarios a vector of character strings detailing the different climate models
#' used in the forecasted/hindcasted species distribution models. In no projection is
#' needed, set to NA (defualt).
#' @param ncores the number of computer cores to parallelize the background point
#' generation on. Default is 1; Using one fewer core than the computer has is usually
#' optimal.
#' @param dispersal (logical \code{TRUE} or \code{FALSE}) Should these statistics be
#' calculated for the dispersal-constrained distribution maps? If dispersal rate
#' analysis are not needed, or if the \code{megaSDM::dispersalRate} function has yet
#' to be run, this should be set to \code{FALSE} (the default).
#' @param dispersaldata A dataframe or the full file path to a .csv file with two columns:
#' 1. Species.
#' 2. Dispersal Rate in km/yr.
#' See the function \code{megaSDM::dispersalRate} for more details.
#' @export
#' @return creates .pdf files of graphs showing changes in range size and distribution
#' across multiple time periods and scenarios:
#' 1. The overall modelled range size across all time periods and scenarios.
#' 2. The percent change from the current range size.
#' 3. Average range size for each year given multiple climate scenarios.
#' 4. (If \code{dispersal = TRUE}) the difference in range size between dispersal constrained
#' and non-dispersal constrained species ranges.
#'
additionalStats <- function(result_dir, time_periods, scenarios,
dispersal = FALSE, dispersaldata = NA,
ncores = 1) {
spp.list <- list.dirs(result_dir, full.names = FALSE, recursive = FALSE)
if (length(spp.list) == 0) {
stop(paste0("No projected models found in 'result_dir': Ensure that 'result_dir' provides a path to the proper location"))
}
ListSpp <- c()
#Generates the species list for parallelization
if (dispersal == "TRUE") {
#Reads in dispersal data
if (class(dispersaldata) == "character") {
dispersaldata <- utils::read.csv(dispersaldata, stringsAsFactors = FALSE)
dispersaldata[, 1] <- gsub("_", " ", dispersaldata[, 1])
} else {
dispersaldata[, 1] <- gsub("_", " ", dispersaldata[, 1])
}
for (i in 1:length(spp.list)) {
curspecies <- spp.list[i]
if (file.exists(paste0(result_dir, "/", curspecies, "/Results_Dispersal.csv"))) {
ListSpp <- c(ListSpp, spp.list[i])
}
}
for (w in 1:length(spp.list)) {
FocSpec <- gsub("_", " ",spp.list[w])
DispSpec <- grep(paste0("^", FocSpec, "$"), dispersaldata[, 1])
if (length(DispSpec) == 0) {
message(paste0("No dispersal rate values found for ", FocSpec, ": skipping dispersal rate analysis"))
}
}
} else {
ListSpp <- spp.list
}
if (length(ListSpp) < ncores) {
ncores <- length(ListSpp)
}
ListSpp <- matrix(ListSpp, ncol = ncores)
#Get the nubmer of years and scenarios
numYear <- length(time_periods)
numScenario <- length(scenarios[!is.na(scenarios)])
#Graphical parameters for the barplots
colScenario <- grDevices::colorRampPalette(c("blue", "darkred"))(max(numScenario, 1)) #COLOR SCHEME
colYear <- grDevices::colorRampPalette(c("darkgreen", "brown"))(numYear)
col13 <- grDevices::colorRampPalette(c("yellow", "darkgrey"))(max(numScenario, 1) * (numYear - 1) + 1)
#Functions----------------------------
#Creates a bar-graph of the number of cells at all time periods for all scenarios
getCellsGraph <- function(spp, stats, dispersalApplied) {
#If dispersal has been applied, prints out a new graph
if (dispersalApplied == "TRUE") {
grDevices::pdf(file = paste0(result_dir, "/", spp, "/Dispersal Applied Additional Stats/NumCells.pdf"))
} else {
grDevices::pdf(file = paste0(result_dir, "/", spp, "/Additional Stats/NumCells.pdf"))
}
#Graphical parameters and barplot
graphics::par(mar = c(8, 4, 4, 2), mfrow = c(1, 1), las = 2)
ticks <- signif(seq(from = 0, to = max(stats$NumberCells), length.out = 10), digits = 3)
graphics::par(yaxp = c(0, max(stats$NumberCells), 20))
graphics::barplot(stats$NumberCells,
ylab = "Number of Cells",
axes = FALSE,
main = spp,
col = col13,
names.arg = stats$Projection,
cex.names = 0.7,
cex.lab = 0.7,
beside = TRUE)
graphics::axis(2, ticks, cex.axis = 0.6)
grDevices::dev.off()
}
#Creates multiple bar-graphs showing number of cells for each time period (one for each scenario)
getDiffScenariosGraph <- function(spp, stats, dispersalApplied) {
#If dispersal has been applied, prints out a new graph
if (dispersalApplied == "TRUE") {
grDevices::pdf(file = paste0(result_dir, "/", spp, "/Dispersal Applied Additional Stats/NumCells", numScenario, "Graphs.pdf"))
} else {
grDevices::pdf(file = paste0(result_dir, "/", spp, "/Additional Stats/NumCells", numScenario, "Graphs.pdf"))
}
#graphical parameters and bar plot (for each climate scenario)
old.par <- graphics::par(mfrow = c(2, ceiling((max(numScenario, 1) / 2))), las = 2, mar = c(8, 4, 4, 2))
for (ScenIndex in 1:numScenario) {
ticks <- signif(seq(from = 0, to = max(stats$NumberCells), length.out = 10), digits = 3)
graphics::barplot(stats$NumberCells[c(1, (2 + ((ScenIndex - 1) * (numYear - 1))):((2 + ((ScenIndex - 1) * (numYear - 1))) + numYear - 2))],
ylab = "Number of Cells",
axes = FALSE,
main = paste0(spp, "_", scenarios[ScenIndex]),
names.arg = time_periods,
cex.names = 0.7,
cex.axis = 0.5,
cex.lab = 0.7,
beside = TRUE,
col = colYear)
graphics::axis(2, ticks, cex.axis = 0.6)
}
grDevices::dev.off()
}
#Creates a bar-graph of the change in cells (percent of original) from original
getpercentDiffGraphs <- function(spp, stats, dispersalApplied) {
nstats <- nrow(stats)
PercentChange <- c()
origcells <- stats$NumberCells[1]
#Calculates percent change between time period range and original range
for (n in 1:nstats) {
nchange <- stats$CellChange[n]
if (origcells > 0) {
perchange <- (nchange / origcells * 100)
} else {
perchange <- 0
}
PercentChange <- c(PercentChange, perchange)
}
PercentChange <- matrix(PercentChange)
rownames(PercentChange) <- stats$Projection
#If dispersal has been applied, prints out a new graph
if (dispersalApplied == "TRUE") {
grDevices::pdf(file = paste0(result_dir, "/", spp, "/Dispersal Applied Additional Stats/CellChange.pdf"))
} else {
grDevices::pdf(file = paste0(result_dir, "/", spp, "/Additional Stats/CellChange.pdf"))
}
#barplot
CellChangePlot <- graphics::barplot(PercentChange[, 1],
ylab = paste0("Percent Change from ", time_periods[1]),
main = spp,
cex.names = 0.7,
cex.axis = 0.7,
cex.lab = 0.7,
col = col13)
grDevices::dev.off()
}
getMinMaxGraphs <- function(spp, stats, dispersalApplied) {
numlist <- c(stats$NumberCells[1])
#Calculates average number of cells per time period across scenarios
for (i in 2:numYear) {
#Sums all of the cell numbers from each scenario within a time period
for (j in 0:(max(numScenario, 1) - 1)) {
if (j == 0) {
num <- stats$NumberCells[j * (numYear - 1) + (i - 1) + 1]
} else {
num <- num + stats$NumberCells[j * (numYear - 1) + (i - 1) + 1]
}
}
num <- num / max(numScenario, 1)
numlist <- c(numlist, num)
}
#Calculates maximum range size per time period
maxlist <- c(0)
for (i in 2:numYear) {
tempList <- c()
for (j in 0:(max(numScenario, 1) - 1)) {
tempList <- c(tempList, stats$NumberCells[j * (numYear - 1) + (i - 1) + 1])
}
maxlist <- c(maxlist, max(tempList))
}
#Calculates minimum range size per time period
minlist <- c(0)
for (i in 2:numYear) {
tempList <- c()
for (j in 0:(max(numScenario, 1) - 1)) {
tempList <- c(tempList, stats$NumberCells[j * (numYear - 1) + (i - 1) + 1])
}
minlist <- c(minlist, min(tempList))
}
#If dispersal has been applied, prints out a new graph
if (dispersalApplied == "TRUE") {
grDevices::pdf(file = paste0(result_dir, "/", spp, "/Dispersal Applied Additional Stats/AvgNumberCells.pdf"))
} else {
grDevices::pdf(file = paste0(result_dir, "/", spp, "/Additional Stats/AvgNumberCells.pdf"))
}
#graphical parameters and barplot
ticks <- signif(seq(from = 0, to = max(numlist), length.out = 10), digits = 2)
avg <- graphics::barplot(numlist,
axes = FALSE,
ylab = "Average # of Cells per Decade",
main = spp,
names.arg = time_periods,
cex.names = 0.9,
cex.axis = 0.5,
cex.lab = 0.9,
beside=TRUE,
col=colYear)
graphics::axis(2, ticks, cex.axis = 0.6)
graphics::segments(x0 = avg, y0 = minlist, x1 = avg, y1 = maxlist)
grDevices::dev.off()
}
DispersalCompare <- function(spp, stats) {
spp.name <- spp
#Reads in the results data frame (not dispersal-constrained)
statsNoDisp <- utils::read.csv(file.path(result_dir, spp.name, "Results.csv"))
statsNoDisp <- statsNoDisp
nstatsNoDisp <- nrow(statsNoDisp)
for(scen in 1:numScenario) {
DispComp <- matrix(rep(NA, 2 * (length(time_periods) - 1)), nrow = 2, ncol = length(time_periods) - 1)
#Fill in "DispComp" matrix with area of suitable habitat (both dispersal and regular)
for(y in 2:length(time_periods)) {
CurYear <- time_periods[y]
DispCells <- stats[grep(paste0(scenarios[scen], "_", time_periods[y], "$"), stats$Projection), "NumberCells"]
NoDispCells <- statsNoDisp[grep(paste0(scenarios[scen], "_", time_periods[y], "$"), statsNoDisp$Projection), "NumberCells"]
DispComp[1, (y - 1)] <- NoDispCells
DispComp[2, (y - 1)] <- DispCells
}
#Name the output pdf
rownames(DispComp) <- c("No Dispersal", "Dispersal")
grDevices::pdf(file = file.path(result_dir, spp, "Dispersal Applied Additional Stats",
paste0(scenarios[scen], "_DispersalCompare.pdf")))
#graphical parameters and barplot
graphics::par(mfrow = c(1, 1), mar = c(5, 5, 4, 8))
graphics::barplot(DispComp,
main = c(spp, paste0("Dispersal Rate Constrained vs. Unconstrained: ", scenarios[scen])),
ylab = "Number of Cells",
names.arg = c(time_periods[2:length(time_periods)]),
col = c("darkblue", "red"),
legend = c("Unconstrained", "Constrained"),
args.legend = list(x = "bottomright", bty = "n",inset = c(-0.35, 0)),
beside = TRUE)
grDevices::dev.off()
}
}
run <- function(CurSpp) {
spp.name <- CurSpp
if (dispersal == "TRUE") {
dir.create(file.path(result_dir, spp.name, "Dispersal Applied Additional Stats"))
stats <- utils::read.csv(file.path(result_dir, spp.name, "Results_Dispersal.csv"))
stats <- stats
nstats <- nrow(stats)
} else {
dir.create(file.path(result_dir, spp.name, "Additional Stats"))
stats <- utils::read.csv(file.path(result_dir, spp.name, "Results.csv"))
stats <- stats
nstats <- nrow(stats)
}
#construct graphs of area changes
getCellsGraph(spp.name, stats, dispersal)
getDiffScenariosGraph(spp.name, stats, dispersal)
getpercentDiffGraphs(spp.name, stats, dispersal)
if (numScenario > 0) {
getMinMaxGraphs(spp.name, stats, dispersal)
}
if (dispersal == "TRUE") {
DispersalCompare(spp.name, stats)
}
grDevices::graphics.off()
}
if (ncores == 1) {
ListSpp <- as.vector(ListSpp)
out <- sapply(ListSpp, function(x) run(x))
} else {
clus <- parallel::makeCluster(ncores, setup_timeout = 0.5)
parallel::clusterExport(clus, varlist = c("colScenario", "colYear", "col13", "result_dir",
"numYear", "numScenario", "dispersal",
"dispersaldata", "DispersalCompare", "time_periods","scenarios",
"getCellsGraph", "getDiffScenariosGraph", "getpercentDiffGraphs",
"getMinMaxGraphs", "run"), envir = environment())
parallel::clusterEvalQ(clus, library(graphics))
for (i in 1:nrow(ListSpp)) {
out <- parallel::parLapply(clus, ListSpp[i, ], function(x) run(x))
}
parallel::stopCluster(clus)
}
}
|
/R/additionalStats.R
|
permissive
|
brshipley/megaSDM
|
R
| false
| false
| 14,178
|
r
|
#' Generate miscellaneous statistics for species range shifts
#'
#' This function generates graphs showing the changes in range size and position between
#' the data a species distribution model was trained on and future or past projections
#' of species ranges.
#'
#' NOTE: this function is dependent on the outputs generated by the \code{projectSuit}
#' function, in particular the "Results.csv" file, which includes information on
#' changes in range size and position across the projected time periods and scenarios.
#'
#' @param result_dir the directory where the ensembled and binary maps are placed in
#' addition to the "Results.csv" file. If \code{projectSuit} was used to make
#' these maps, this should be the same as the \code{output} argument in that function.
#' @param time_periods a vector of the years in which the projection will occur, with the
#' first element as the year the model will be trained on (usually the current data).If no
#' precise years are available (e.g., using data from the Last Glacial Maximum), order
#' time periods from current to least current and give character strings for the years (e.g., "LGM").
#' @param scenarios a vector of character strings detailing the different climate models
#' used in the forecasted/hindcasted species distribution models. In no projection is
#' needed, set to NA (defualt).
#' @param ncores the number of computer cores to parallelize the background point
#' generation on. Default is 1; Using one fewer core than the computer has is usually
#' optimal.
#' @param dispersal (logical \code{TRUE} or \code{FALSE}) Should these statistics be
#' calculated for the dispersal-constrained distribution maps? If dispersal rate
#' analysis are not needed, or if the \code{megaSDM::dispersalRate} function has yet
#' to be run, this should be set to \code{FALSE} (the default).
#' @param dispersaldata A dataframe or the full file path to a .csv file with two columns:
#' 1. Species.
#' 2. Dispersal Rate in km/yr.
#' See the function \code{megaSDM::dispersalRate} for more details.
#' @export
#' @return creates .pdf files of graphs showing changes in range size and distribution
#' across multiple time periods and scenarios:
#' 1. The overall modelled range size across all time periods and scenarios.
#' 2. The percent change from the current range size.
#' 3. Average range size for each year given multiple climate scenarios.
#' 4. (If \code{dispersal = TRUE}) the difference in range size between dispersal constrained
#' and non-dispersal constrained species ranges.
#'
additionalStats <- function(result_dir, time_periods, scenarios,
dispersal = FALSE, dispersaldata = NA,
ncores = 1) {
spp.list <- list.dirs(result_dir, full.names = FALSE, recursive = FALSE)
if (length(spp.list) == 0) {
stop(paste0("No projected models found in 'result_dir': Ensure that 'result_dir' provides a path to the proper location"))
}
ListSpp <- c()
#Generates the species list for parallelization
if (dispersal == "TRUE") {
#Reads in dispersal data
if (class(dispersaldata) == "character") {
dispersaldata <- utils::read.csv(dispersaldata, stringsAsFactors = FALSE)
dispersaldata[, 1] <- gsub("_", " ", dispersaldata[, 1])
} else {
dispersaldata[, 1] <- gsub("_", " ", dispersaldata[, 1])
}
for (i in 1:length(spp.list)) {
curspecies <- spp.list[i]
if (file.exists(paste0(result_dir, "/", curspecies, "/Results_Dispersal.csv"))) {
ListSpp <- c(ListSpp, spp.list[i])
}
}
for (w in 1:length(spp.list)) {
FocSpec <- gsub("_", " ",spp.list[w])
DispSpec <- grep(paste0("^", FocSpec, "$"), dispersaldata[, 1])
if (length(DispSpec) == 0) {
message(paste0("No dispersal rate values found for ", FocSpec, ": skipping dispersal rate analysis"))
}
}
} else {
ListSpp <- spp.list
}
if (length(ListSpp) < ncores) {
ncores <- length(ListSpp)
}
ListSpp <- matrix(ListSpp, ncol = ncores)
#Get the nubmer of years and scenarios
numYear <- length(time_periods)
numScenario <- length(scenarios[!is.na(scenarios)])
#Graphical parameters for the barplots
colScenario <- grDevices::colorRampPalette(c("blue", "darkred"))(max(numScenario, 1)) #COLOR SCHEME
colYear <- grDevices::colorRampPalette(c("darkgreen", "brown"))(numYear)
col13 <- grDevices::colorRampPalette(c("yellow", "darkgrey"))(max(numScenario, 1) * (numYear - 1) + 1)
#Functions----------------------------
#Creates a bar-graph of the number of cells at all time periods for all scenarios
getCellsGraph <- function(spp, stats, dispersalApplied) {
#If dispersal has been applied, prints out a new graph
if (dispersalApplied == "TRUE") {
grDevices::pdf(file = paste0(result_dir, "/", spp, "/Dispersal Applied Additional Stats/NumCells.pdf"))
} else {
grDevices::pdf(file = paste0(result_dir, "/", spp, "/Additional Stats/NumCells.pdf"))
}
#Graphical parameters and barplot
graphics::par(mar = c(8, 4, 4, 2), mfrow = c(1, 1), las = 2)
ticks <- signif(seq(from = 0, to = max(stats$NumberCells), length.out = 10), digits = 3)
graphics::par(yaxp = c(0, max(stats$NumberCells), 20))
graphics::barplot(stats$NumberCells,
ylab = "Number of Cells",
axes = FALSE,
main = spp,
col = col13,
names.arg = stats$Projection,
cex.names = 0.7,
cex.lab = 0.7,
beside = TRUE)
graphics::axis(2, ticks, cex.axis = 0.6)
grDevices::dev.off()
}
#Creates multiple bar-graphs showing number of cells for each time period (one for each scenario)
getDiffScenariosGraph <- function(spp, stats, dispersalApplied) {
#If dispersal has been applied, prints out a new graph
if (dispersalApplied == "TRUE") {
grDevices::pdf(file = paste0(result_dir, "/", spp, "/Dispersal Applied Additional Stats/NumCells", numScenario, "Graphs.pdf"))
} else {
grDevices::pdf(file = paste0(result_dir, "/", spp, "/Additional Stats/NumCells", numScenario, "Graphs.pdf"))
}
#graphical parameters and bar plot (for each climate scenario)
old.par <- graphics::par(mfrow = c(2, ceiling((max(numScenario, 1) / 2))), las = 2, mar = c(8, 4, 4, 2))
for (ScenIndex in 1:numScenario) {
ticks <- signif(seq(from = 0, to = max(stats$NumberCells), length.out = 10), digits = 3)
graphics::barplot(stats$NumberCells[c(1, (2 + ((ScenIndex - 1) * (numYear - 1))):((2 + ((ScenIndex - 1) * (numYear - 1))) + numYear - 2))],
ylab = "Number of Cells",
axes = FALSE,
main = paste0(spp, "_", scenarios[ScenIndex]),
names.arg = time_periods,
cex.names = 0.7,
cex.axis = 0.5,
cex.lab = 0.7,
beside = TRUE,
col = colYear)
graphics::axis(2, ticks, cex.axis = 0.6)
}
grDevices::dev.off()
}
#Creates a bar-graph of the change in cells (percent of original) from original
getpercentDiffGraphs <- function(spp, stats, dispersalApplied) {
nstats <- nrow(stats)
PercentChange <- c()
origcells <- stats$NumberCells[1]
#Calculates percent change between time period range and original range
for (n in 1:nstats) {
nchange <- stats$CellChange[n]
if (origcells > 0) {
perchange <- (nchange / origcells * 100)
} else {
perchange <- 0
}
PercentChange <- c(PercentChange, perchange)
}
PercentChange <- matrix(PercentChange)
rownames(PercentChange) <- stats$Projection
#If dispersal has been applied, prints out a new graph
if (dispersalApplied == "TRUE") {
grDevices::pdf(file = paste0(result_dir, "/", spp, "/Dispersal Applied Additional Stats/CellChange.pdf"))
} else {
grDevices::pdf(file = paste0(result_dir, "/", spp, "/Additional Stats/CellChange.pdf"))
}
#barplot
CellChangePlot <- graphics::barplot(PercentChange[, 1],
ylab = paste0("Percent Change from ", time_periods[1]),
main = spp,
cex.names = 0.7,
cex.axis = 0.7,
cex.lab = 0.7,
col = col13)
grDevices::dev.off()
}
getMinMaxGraphs <- function(spp, stats, dispersalApplied) {
numlist <- c(stats$NumberCells[1])
#Calculates average number of cells per time period across scenarios
for (i in 2:numYear) {
#Sums all of the cell numbers from each scenario within a time period
for (j in 0:(max(numScenario, 1) - 1)) {
if (j == 0) {
num <- stats$NumberCells[j * (numYear - 1) + (i - 1) + 1]
} else {
num <- num + stats$NumberCells[j * (numYear - 1) + (i - 1) + 1]
}
}
num <- num / max(numScenario, 1)
numlist <- c(numlist, num)
}
#Calculates maximum range size per time period
maxlist <- c(0)
for (i in 2:numYear) {
tempList <- c()
for (j in 0:(max(numScenario, 1) - 1)) {
tempList <- c(tempList, stats$NumberCells[j * (numYear - 1) + (i - 1) + 1])
}
maxlist <- c(maxlist, max(tempList))
}
#Calculates minimum range size per time period
minlist <- c(0)
for (i in 2:numYear) {
tempList <- c()
for (j in 0:(max(numScenario, 1) - 1)) {
tempList <- c(tempList, stats$NumberCells[j * (numYear - 1) + (i - 1) + 1])
}
minlist <- c(minlist, min(tempList))
}
#If dispersal has been applied, prints out a new graph
if (dispersalApplied == "TRUE") {
grDevices::pdf(file = paste0(result_dir, "/", spp, "/Dispersal Applied Additional Stats/AvgNumberCells.pdf"))
} else {
grDevices::pdf(file = paste0(result_dir, "/", spp, "/Additional Stats/AvgNumberCells.pdf"))
}
#graphical parameters and barplot
ticks <- signif(seq(from = 0, to = max(numlist), length.out = 10), digits = 2)
avg <- graphics::barplot(numlist,
axes = FALSE,
ylab = "Average # of Cells per Decade",
main = spp,
names.arg = time_periods,
cex.names = 0.9,
cex.axis = 0.5,
cex.lab = 0.9,
beside=TRUE,
col=colYear)
graphics::axis(2, ticks, cex.axis = 0.6)
graphics::segments(x0 = avg, y0 = minlist, x1 = avg, y1 = maxlist)
grDevices::dev.off()
}
DispersalCompare <- function(spp, stats) {
spp.name <- spp
#Reads in the results data frame (not dispersal-constrained)
statsNoDisp <- utils::read.csv(file.path(result_dir, spp.name, "Results.csv"))
statsNoDisp <- statsNoDisp
nstatsNoDisp <- nrow(statsNoDisp)
for(scen in 1:numScenario) {
DispComp <- matrix(rep(NA, 2 * (length(time_periods) - 1)), nrow = 2, ncol = length(time_periods) - 1)
#Fill in "DispComp" matrix with area of suitable habitat (both dispersal and regular)
for(y in 2:length(time_periods)) {
CurYear <- time_periods[y]
DispCells <- stats[grep(paste0(scenarios[scen], "_", time_periods[y], "$"), stats$Projection), "NumberCells"]
NoDispCells <- statsNoDisp[grep(paste0(scenarios[scen], "_", time_periods[y], "$"), statsNoDisp$Projection), "NumberCells"]
DispComp[1, (y - 1)] <- NoDispCells
DispComp[2, (y - 1)] <- DispCells
}
#Name the output pdf
rownames(DispComp) <- c("No Dispersal", "Dispersal")
grDevices::pdf(file = file.path(result_dir, spp, "Dispersal Applied Additional Stats",
paste0(scenarios[scen], "_DispersalCompare.pdf")))
#graphical parameters and barplot
graphics::par(mfrow = c(1, 1), mar = c(5, 5, 4, 8))
graphics::barplot(DispComp,
main = c(spp, paste0("Dispersal Rate Constrained vs. Unconstrained: ", scenarios[scen])),
ylab = "Number of Cells",
names.arg = c(time_periods[2:length(time_periods)]),
col = c("darkblue", "red"),
legend = c("Unconstrained", "Constrained"),
args.legend = list(x = "bottomright", bty = "n",inset = c(-0.35, 0)),
beside = TRUE)
grDevices::dev.off()
}
}
run <- function(CurSpp) {
spp.name <- CurSpp
if (dispersal == "TRUE") {
dir.create(file.path(result_dir, spp.name, "Dispersal Applied Additional Stats"))
stats <- utils::read.csv(file.path(result_dir, spp.name, "Results_Dispersal.csv"))
stats <- stats
nstats <- nrow(stats)
} else {
dir.create(file.path(result_dir, spp.name, "Additional Stats"))
stats <- utils::read.csv(file.path(result_dir, spp.name, "Results.csv"))
stats <- stats
nstats <- nrow(stats)
}
#construct graphs of area changes
getCellsGraph(spp.name, stats, dispersal)
getDiffScenariosGraph(spp.name, stats, dispersal)
getpercentDiffGraphs(spp.name, stats, dispersal)
if (numScenario > 0) {
getMinMaxGraphs(spp.name, stats, dispersal)
}
if (dispersal == "TRUE") {
DispersalCompare(spp.name, stats)
}
grDevices::graphics.off()
}
if (ncores == 1) {
ListSpp <- as.vector(ListSpp)
out <- sapply(ListSpp, function(x) run(x))
} else {
clus <- parallel::makeCluster(ncores, setup_timeout = 0.5)
parallel::clusterExport(clus, varlist = c("colScenario", "colYear", "col13", "result_dir",
"numYear", "numScenario", "dispersal",
"dispersaldata", "DispersalCompare", "time_periods","scenarios",
"getCellsGraph", "getDiffScenariosGraph", "getpercentDiffGraphs",
"getMinMaxGraphs", "run"), envir = environment())
parallel::clusterEvalQ(clus, library(graphics))
for (i in 1:nrow(ListSpp)) {
out <- parallel::parLapply(clus, ListSpp[i, ], function(x) run(x))
}
parallel::stopCluster(clus)
}
}
|
#' @title Calculate durations of time
#' @description
#' Calculates the duration of time between two provided date objects.
#' Supports vectorized data (i.e. \code{\link[dplyr:mutate]{dplyr::mutate()}}).
#' @param x A date or datetime. The start date(s)/timestamp(s).
#' @param y A date or datetime. The end date(s)/timestamp(s).
#' @param units A character. Units of the returned duration
#' (i.e. 'seconds', 'days', 'years').
#' @return If 'units' specified, returns numeric. If 'units' unspecified,
#' returns an object of class '\code{\link[lubridate:Duration-class]{Duration}}'.
#' @note Supports multiple calculations against a single time point (i.e.
#' multiple start dates with a single end date). Note that start and end
#' must otherwise be of the same length.
#'
#' When the start and end dates are of different types (i.e. x = date,
#' y = datetime), a lossy cast will be performed which strips the datetime data
#' of its time components. This is done to avoid an assumption of more time
#' passing that would otherwise come with casting the date data to datetime.
#' @examples
#' library(lubridate)
#' library(purrr)
#'
#' # Dates -> duration in years
#' calc_duration(
#' x = mdy(map_chr(sample(1:9, 5), ~ paste0('01/01/199', .x))),
#' y = mdy(map_chr(sample(1:9, 5), ~ paste0('01/01/200', .x))),
#' units = 'years'
#' )
#'
#' # datetimes -> durations
#' calc_duration(
#' x = mdy_hm(map_chr(sample(1:9, 5), ~ paste0('01/01/199', .x, ' 1', .x, ':00'))),
#' y = mdy_hm(map_chr(sample(1:9, 5), ~ paste0('01/01/200', .x, ' 0', .x, ':00')))
#' )
#'
#' # Mixed date classes -> durations
#' calc_duration(
#' x = mdy(map_chr(sample(1:9, 5), ~ paste0('01/01/199', .x))),
#' y = mdy_hm(map_chr(sample(1:9, 5), ~ paste0('01/01/200', .x, ' 0', .x, ':00')))
#' )
#' @export
calc_duration <- function (x, y, units = NULL) {
# Input type check
if (
!all(lubridate::is.timepoint(x), na.rm = TRUE) |
!all(lubridate::is.timepoint(y), na.rm = TRUE)
) {
stop('\'x\' and/or \'y\' not <date> or <datetime>.')
}
# Recycle single timepoint or throw error for mismatched sizes
common_dates <- vctrs::vec_recycle_common(x = x, y = y)
# Remove timestamp if one variable is a Date object
if (any(class(x) != class(y), na.rm = TRUE)) {
common_dates <- purrr::map(common_dates, as.Date)
}
# Calculate duration
duration <- lubridate::as.duration(lubridate::interval(x, y))
# Return data as appropriate type
if (!is.null(units)) as.numeric(duration, units)
else duration
}
#' @title Calculate data chunk indices
#' @description
#' Calculates chunk indices of a data object
#' for a given chunk size (number of items per chunk).
#' @param x A data frame or vector.
#' @param size An integer. The number of items (e.g. rows in a tibble)
#' that make up a given chunk. Must be a positive integer. Caps out at data
#' maximum.
#' @param reverse A logical. Calculate chunks from back to front.
#' @return An iterable list of row indices for each chunk of data.
#' @examples
#' # Create chunk map for a data frame
#' chunks <- calc_chunks(mtcars, size = 6)
#'
#' # Iterate through chunks of data
#' for (chunk in chunks) print(paste0(rownames(mtcars[chunk,]), collapse = ', '))
#' @export
calc_chunks <- function (x, size = 10, reverse = FALSE) {
# Hard stops
if (!is.data.frame(x) & !is.vector(x))
stop('\'x\' not a <data.frame> or vector.')
if (!is.numeric(size) | size < 1)
stop('\'size\' not <numeric> or less than 1.')
# Variables
item_cnt <- vctrs::vec_size(x)
if (size > item_cnt) size <- item_cnt
# Calculate and return chunks
if (!reverse) purrr::map(1:ceiling(item_cnt / size), ~ ((.x-1)*size+1):min(item_cnt, (.x*size)))
else purrr::map(1:ceiling(item_cnt / size), ~ (item_cnt-(.x-1)*size):max(1, item_cnt-(.x*size)+1))
}
|
/R/calc.R
|
no_license
|
efinite/utile.tools
|
R
| false
| false
| 3,809
|
r
|
#' @title Calculate durations of time
#' @description
#' Calculates the duration of time between two provided date objects.
#' Supports vectorized data (i.e. \code{\link[dplyr:mutate]{dplyr::mutate()}}).
#' @param x A date or datetime. The start date(s)/timestamp(s).
#' @param y A date or datetime. The end date(s)/timestamp(s).
#' @param units A character. Units of the returned duration
#' (i.e. 'seconds', 'days', 'years').
#' @return If 'units' specified, returns numeric. If 'units' unspecified,
#' returns an object of class '\code{\link[lubridate:Duration-class]{Duration}}'.
#' @note Supports multiple calculations against a single time point (i.e.
#' multiple start dates with a single end date). Note that start and end
#' must otherwise be of the same length.
#'
#' When the start and end dates are of different types (i.e. x = date,
#' y = datetime), a lossy cast will be performed which strips the datetime data
#' of its time components. This is done to avoid an assumption of more time
#' passing that would otherwise come with casting the date data to datetime.
#' @examples
#' library(lubridate)
#' library(purrr)
#'
#' # Dates -> duration in years
#' calc_duration(
#' x = mdy(map_chr(sample(1:9, 5), ~ paste0('01/01/199', .x))),
#' y = mdy(map_chr(sample(1:9, 5), ~ paste0('01/01/200', .x))),
#' units = 'years'
#' )
#'
#' # datetimes -> durations
#' calc_duration(
#' x = mdy_hm(map_chr(sample(1:9, 5), ~ paste0('01/01/199', .x, ' 1', .x, ':00'))),
#' y = mdy_hm(map_chr(sample(1:9, 5), ~ paste0('01/01/200', .x, ' 0', .x, ':00')))
#' )
#'
#' # Mixed date classes -> durations
#' calc_duration(
#' x = mdy(map_chr(sample(1:9, 5), ~ paste0('01/01/199', .x))),
#' y = mdy_hm(map_chr(sample(1:9, 5), ~ paste0('01/01/200', .x, ' 0', .x, ':00')))
#' )
#' @export
calc_duration <- function (x, y, units = NULL) {
# Input type check
if (
!all(lubridate::is.timepoint(x), na.rm = TRUE) |
!all(lubridate::is.timepoint(y), na.rm = TRUE)
) {
stop('\'x\' and/or \'y\' not <date> or <datetime>.')
}
# Recycle single timepoint or throw error for mismatched sizes
common_dates <- vctrs::vec_recycle_common(x = x, y = y)
# Remove timestamp if one variable is a Date object
if (any(class(x) != class(y), na.rm = TRUE)) {
common_dates <- purrr::map(common_dates, as.Date)
}
# Calculate duration
duration <- lubridate::as.duration(lubridate::interval(x, y))
# Return data as appropriate type
if (!is.null(units)) as.numeric(duration, units)
else duration
}
#' @title Calculate data chunk indices
#' @description
#' Calculates chunk indices of a data object
#' for a given chunk size (number of items per chunk).
#' @param x A data frame or vector.
#' @param size An integer. The number of items (e.g. rows in a tibble)
#' that make up a given chunk. Must be a positive integer. Caps out at data
#' maximum.
#' @param reverse A logical. Calculate chunks from back to front.
#' @return An iterable list of row indices for each chunk of data.
#' @examples
#' # Create chunk map for a data frame
#' chunks <- calc_chunks(mtcars, size = 6)
#'
#' # Iterate through chunks of data
#' for (chunk in chunks) print(paste0(rownames(mtcars[chunk,]), collapse = ', '))
#' @export
calc_chunks <- function (x, size = 10, reverse = FALSE) {
# Hard stops
if (!is.data.frame(x) & !is.vector(x))
stop('\'x\' not a <data.frame> or vector.')
if (!is.numeric(size) | size < 1)
stop('\'size\' not <numeric> or less than 1.')
# Variables
item_cnt <- vctrs::vec_size(x)
if (size > item_cnt) size <- item_cnt
# Calculate and return chunks
if (!reverse) purrr::map(1:ceiling(item_cnt / size), ~ ((.x-1)*size+1):min(item_cnt, (.x*size)))
else purrr::map(1:ceiling(item_cnt / size), ~ (item_cnt-(.x-1)*size):max(1, item_cnt-(.x*size)+1))
}
|
#---------------------------------------------------------------------------
# setSampleParams.PowerTest.R
# Set the sample parameters and generate new groups
# @param sSize A vector containing the size of each sample
# @param sMean A vector containing the mean of each sample
# @param sSigma A vector containing the standard deviation of each sample
# @author Nihar Shah
#---------------------------------------------------------------------------
setMethodS3("setSampleParams", "PowerTest",
appendVarArgs = FALSE,
function(this, sSize, sMean, sSigma)
{
checkSampleStats(sSize = sSize,
sMean = sMean, sSigma = sSigma);
this$sampleSizes = sSize;
this$mu0 = sMean;
this$sigma = sSigma;
this$numberOfSamples = length(sSize);
this$groups = generateGroups.PowerTest();
});
#---------------------------------------------------------------------------
|
/setSampleParams.PowerTest.R
|
no_license
|
nihar/kruskal-wallis
|
R
| false
| false
| 887
|
r
|
#---------------------------------------------------------------------------
# setSampleParams.PowerTest.R
# Set the sample parameters and generate new groups
# @param sSize A vector containing the size of each sample
# @param sMean A vector containing the mean of each sample
# @param sSigma A vector containing the standard deviation of each sample
# @author Nihar Shah
#---------------------------------------------------------------------------
setMethodS3("setSampleParams", "PowerTest",
appendVarArgs = FALSE,
function(this, sSize, sMean, sSigma)
{
checkSampleStats(sSize = sSize,
sMean = sMean, sSigma = sSigma);
this$sampleSizes = sSize;
this$mu0 = sMean;
this$sigma = sSigma;
this$numberOfSamples = length(sSize);
this$groups = generateGroups.PowerTest();
});
#---------------------------------------------------------------------------
|
lognormal_param <- function(expectation, variance) {
m <- expectation;
v <- variance;
mu <- log(m/sqrt(1 + v/(m*m)));
sigma2 <- log(1 + v/(m*m));
return(list(mu=mu, sigma2=sigma2));
};
E <- 3;
V <- 5;
N <- 500;
exact <- lognormal_param(E, V);
sample <- rlnorm(N, exact$mu, sqrt(exact$sigma2));
par(mfrow=c(3, 1));
# fig 1
plot(sample);
title(sprintf("N = %d samples from lognormal(%f, %f)\nmean=%f, var=%f", N, exact$mu, exact$sigma2, mean(sample), var(sample)));
# fig 2
h <- hist(sample, breaks=50, plot=FALSE);
xfit <- seq(min(h$breaks), max(h$breaks), length = 200);
exact_pdf <- dlnorm(xfit, exact$mu, sqrt(exact$sigma2));
plot(h, ylim=c(0, max(h$density, exact_pdf)), freq=FALSE, main="");
lines(xfit, exact_pdf, col="blue");
title("Histogram of samples v.s. exact pdf");
legend("topright", "exact pdf", lty=1, col="blue");
# fig 3
empirical <- list(mu=mean(log(sample)), sigma2=var(log(sample)));
empirical_pdf <- dlnorm(xfit, empirical$mu, sqrt(empirical$sigma2));
plot(xfit, exact_pdf, type="l", lty=1, col="blue", ylim=c(0, max(exact_pdf, empirical_pdf)));
lines(xfit, empirical_pdf);
title("empirical pdf v.s. exact pdf");
legend("topright", c("exact pdf", "empirical pdf"), lty=c(1, 1), col=c("blue", "black"));
|
/exercise1/exercise1.r
|
no_license
|
zhzhzoo/meucci-test
|
R
| false
| false
| 1,248
|
r
|
lognormal_param <- function(expectation, variance) {
m <- expectation;
v <- variance;
mu <- log(m/sqrt(1 + v/(m*m)));
sigma2 <- log(1 + v/(m*m));
return(list(mu=mu, sigma2=sigma2));
};
E <- 3;
V <- 5;
N <- 500;
exact <- lognormal_param(E, V);
sample <- rlnorm(N, exact$mu, sqrt(exact$sigma2));
par(mfrow=c(3, 1));
# fig 1
plot(sample);
title(sprintf("N = %d samples from lognormal(%f, %f)\nmean=%f, var=%f", N, exact$mu, exact$sigma2, mean(sample), var(sample)));
# fig 2
h <- hist(sample, breaks=50, plot=FALSE);
xfit <- seq(min(h$breaks), max(h$breaks), length = 200);
exact_pdf <- dlnorm(xfit, exact$mu, sqrt(exact$sigma2));
plot(h, ylim=c(0, max(h$density, exact_pdf)), freq=FALSE, main="");
lines(xfit, exact_pdf, col="blue");
title("Histogram of samples v.s. exact pdf");
legend("topright", "exact pdf", lty=1, col="blue");
# fig 3
empirical <- list(mu=mean(log(sample)), sigma2=var(log(sample)));
empirical_pdf <- dlnorm(xfit, empirical$mu, sqrt(empirical$sigma2));
plot(xfit, exact_pdf, type="l", lty=1, col="blue", ylim=c(0, max(exact_pdf, empirical_pdf)));
lines(xfit, empirical_pdf);
title("empirical pdf v.s. exact pdf");
legend("topright", c("exact pdf", "empirical pdf"), lty=c(1, 1), col=c("blue", "black"));
|
\name{dbePlot}
\alias{dbePlot}
\alias{dbePlot,dbeOutput-method}
\docType{methods}
\title{Graphical display of 'dbeOutput' final estimates}
\description{
Method for plotting final estimates from an input 'dbeOutput' object.
}
\usage{
dbePlot(object,elmt,type="bar",Xstratum=NULL,step=NA,dispKey=TRUE,indScale=FALSE,\dots)
}
\arguments{
\item{object}{A \emph{dbeOutput} object.}
\item{elmt}{Character specifying an element (a dataframe) of \emph{dbeOutput} 'object'. For example, "lenStruc\$estim", "ageVar" or "totalNnum\$cv". 'rep' elements are not accepted ; see \emph{dbePlotRep}.}
\item{type}{Character specifying the type of the drawn plot. To be chosen between "bar" (default value), "point" and "line".}
\item{Xstratum}{Stratum displayed on x-axis if 'elmt' doesn't point at length or age structure information. To be chosen between "time", "space", "technical" and \code{NULL} (default value).}
\item{step}{Numeric. If given, empty length or age classes will be considered and displayed, according to specified value.}
\item{dispKey}{Logical. If \code{TRUE}, a describing key is displayed}
\item{indScale}{Logical. If \code{TRUE}, y-axis scale is specific to each panel. If \code{FALSE}, the same limits are used for every panel.}
\item{...}{Further graphical arguments such as \emph{col, lwd, lty, pch, cex, font, rot,}\dots}
}
\author{Mathieu Merzereaud}
\seealso{\code{\link{dbeOutput}}, \code{\link{dbePlotRep}}
}
\examples{
data(sole)
#stratification object
strDef <- strIni(timeStrata="quarter",spaceStrata="area")
#consolidated object
object <- csDataCons(csDataVal(sole.cs),strDef)
#dbeOutput initial object with needed parameters
dbeOutput <- dbeObject(desc="My object",species="Solea solea",param="weight",
strataDesc=strDef,methodDesc="analytical")
lW <- bpEstim(dbeOutput,object)
dbePlot(lW,elmt="ageStruc$estim",step=1,ylab="Mean weight (g)")
}
\keyword{methods}
|
/COSTdbe/man/dbePlot.rd
|
no_license
|
BackupTheBerlios/cost-project
|
R
| false
| false
| 1,968
|
rd
|
\name{dbePlot}
\alias{dbePlot}
\alias{dbePlot,dbeOutput-method}
\docType{methods}
\title{Graphical display of 'dbeOutput' final estimates}
\description{
Method for plotting final estimates from an input 'dbeOutput' object.
}
\usage{
dbePlot(object,elmt,type="bar",Xstratum=NULL,step=NA,dispKey=TRUE,indScale=FALSE,\dots)
}
\arguments{
\item{object}{A \emph{dbeOutput} object.}
\item{elmt}{Character specifying an element (a dataframe) of \emph{dbeOutput} 'object'. For example, "lenStruc\$estim", "ageVar" or "totalNnum\$cv". 'rep' elements are not accepted ; see \emph{dbePlotRep}.}
\item{type}{Character specifying the type of the drawn plot. To be chosen between "bar" (default value), "point" and "line".}
\item{Xstratum}{Stratum displayed on x-axis if 'elmt' doesn't point at length or age structure information. To be chosen between "time", "space", "technical" and \code{NULL} (default value).}
\item{step}{Numeric. If given, empty length or age classes will be considered and displayed, according to specified value.}
\item{dispKey}{Logical. If \code{TRUE}, a describing key is displayed}
\item{indScale}{Logical. If \code{TRUE}, y-axis scale is specific to each panel. If \code{FALSE}, the same limits are used for every panel.}
\item{...}{Further graphical arguments such as \emph{col, lwd, lty, pch, cex, font, rot,}\dots}
}
\author{Mathieu Merzereaud}
\seealso{\code{\link{dbeOutput}}, \code{\link{dbePlotRep}}
}
\examples{
data(sole)
#stratification object
strDef <- strIni(timeStrata="quarter",spaceStrata="area")
#consolidated object
object <- csDataCons(csDataVal(sole.cs),strDef)
#dbeOutput initial object with needed parameters
dbeOutput <- dbeObject(desc="My object",species="Solea solea",param="weight",
strataDesc=strDef,methodDesc="analytical")
lW <- bpEstim(dbeOutput,object)
dbePlot(lW,elmt="ageStruc$estim",step=1,ylab="Mean weight (g)")
}
\keyword{methods}
|
# https://fivethirtyeight.com/features/dont-throw-out-that-calendar/
library(lubridate)
classifyYear <- function(year){
four <- 1 * (year %% 4 == 0)
startDay <- wday(ymd(paste(as.character(year), '-1-1', sep = '')))
return(paste(four, '-', startDay, sep = ''))
}
yearClasses <- unlist(lapply(2000:2140, classifyYear))
yearData <- data.frame(
year = 2000:2140,
yearClass = yearClasses
)
identifyNextSameYear <- function(year){
yearClass <- yearData$yearClass[which(yearData$year == year)]
nextClass <- yearData$year[which(yearData$year > year & yearData$yearClass == yearClass)]
if(length(nextClass) > 0){
return(min(nextClass) - year)
} else {
return(NA)
}
}
calendarGap <- unlist(lapply(2000:2140, identifyNextSameYear))
yearData$calendarGap <- calendarGap
yearData
|
/2017-01-06/Riddler Express/riddler.r
|
no_license
|
alexvornsand/fivethirtyeight-riddler
|
R
| false
| false
| 802
|
r
|
# https://fivethirtyeight.com/features/dont-throw-out-that-calendar/
library(lubridate)
classifyYear <- function(year){
four <- 1 * (year %% 4 == 0)
startDay <- wday(ymd(paste(as.character(year), '-1-1', sep = '')))
return(paste(four, '-', startDay, sep = ''))
}
yearClasses <- unlist(lapply(2000:2140, classifyYear))
yearData <- data.frame(
year = 2000:2140,
yearClass = yearClasses
)
identifyNextSameYear <- function(year){
yearClass <- yearData$yearClass[which(yearData$year == year)]
nextClass <- yearData$year[which(yearData$year > year & yearData$yearClass == yearClass)]
if(length(nextClass) > 0){
return(min(nextClass) - year)
} else {
return(NA)
}
}
calendarGap <- unlist(lapply(2000:2140, identifyNextSameYear))
yearData$calendarGap <- calendarGap
yearData
|
#; 3.1 PCA of a two-variable matrix
library(readr)
boxes <- read_csv("D:/R/Q6 Quantitative Analysis R1-12/boxes.csv")
# boxes.pca -- principal components analysis of Davis boxes data
boxes.matrix <- data.matrix(cbind(boxes[,1],boxes[,4]))
dimnames(boxes.matrix) <- list(NULL, cbind("long","diag"))
plot (boxes.matrix)
cor(boxes.matrix)
#' the princomp() function from the stats package.
# The loadings() function extracts the loadings or the correlations
# between the input variables and the new components, and
# the the biplot() function creates a biplot a single figure
# that plots the loadings as vectors and the component scores as points represented by the observation numbers.
boxes.pca <- princomp(boxes.matrix, cor=T)
boxes.pca
summary(boxes.pca)
print(loadings(boxes.pca),cutoff=0.0)
biplot(boxes.pca)
#' ote the angle between the vectors–the correlation between two variables is
#' l to the cosine of the angle between the vectors (θ), or r = cos(θ).
#' the angle is 24.3201359, which is found by the following R code:
acos(cor(boxes.matrix[,1],boxes.matrix[,2]))/((2*pi)/360)
# The components can be drawn on the scatter plot as follows,
# get parameters of component lines (after Everitt & Rabe-Hesketh)
load <- boxes.pca$loadings
slope <- load[2,]/load[1,]
mn <- apply(boxes.matrix,2,mean)
intcpt <- mn[2]-(slope*mn[1])
# scatter plot with the two new axes added
par(pty="s") # square plotting frame
xlim <- range(boxes.matrix) # overall min, max
plot(boxes.matrix, xlim=xlim, ylim=xlim, pch=16, col="purple") # both axes same length
abline(intcpt[1],slope[1],lwd=2) # first component solid line
abline(intcpt[2],slope[2],lwd=2,lty=2) # second component dashed
legend("right", legend = c("PC 1", "PC 2"), lty = c(1, 2), lwd = 2, cex = 1)
# projections of points onto PCA 1
y1 <- intcpt[1]+slope[1]*boxes.matrix[,1]
x1 <- (boxes.matrix[,2]-intcpt[1])/slope[1]
y2 <- (y1+boxes.matrix[,2])/2.0
x2 <- (x1+boxes.matrix[,1])/2.0
segments(boxes.matrix[,1],boxes.matrix[,2], x2, y2, lwd=2,col="purple")
## 3.2 A second example using the large-cites data set
cities <- read_csv("D:/R/Q6 Quantitative Analysis R1-12/cities.csv")
head(cities)
cities.matrix <- data.matrix(cities[, 2:12])
cities.matrix
rownames(cities.matrix) <- cities[,1] # add city names as row labels ??????????????
plot(cities[,2:12], pch=16, cex=0.6)
cor(cities[,2:12])
library(corrplot)
corrplot(cor(cities[,2:12]), method="ellipse")
# An alternative is simply fill each cell with an appropriate color and shade.
corrplot(cor(cities[,2:12]), method="color")
##/ 3.2.2 PCA of the cities data
# Here’s the principal components analysis of the cities data:
cities.pca <- princomp(cities.matrix, cor=T)
cities.pca
summary(cities.pca)
screeplot(cities.pca)
loadings(cities.pca)
biplot(cities.pca, col=c("black","red"), cex=c(0.7,0.8))
# An alternative visualization of the principal component and their relationship with the original variables
qg.pca <- qgraph.pca(cities[,2:12], factors=2, rotation="none")
## 3.3 “Rotation” of principal components
library(psych)
cities.pca.unrot <- principal(cities.matrix, nfactors=2, rotate="none")
cities.pca.unrot
summary(cities.pca.unrot)
biplot(cities.pca.unrot, labels=rownames(cities.matrix), cex=0.5, col=c("black","red"))
qg.pca <- qgraph(cities.pca.unrot) # ????
# Here’s the result with rotated components:
cities.pca.rot <- principal(cities.matrix, nfactors=2, rotate="varimax")
cities.pca.rot
summary(cities.pca.rot)
biplot.psych(cities.pca.rot, labels=rownames(cities.matrix), col=c("black","red"), cex=c(0.7,0.8),
xlim.s=c(-3,3), ylim.s=c(-2,4))
### 4.1 Example of a factor analysis
# cities.fa1 -- factor analysis of cities data -- no rotation
cities.fa1 <- factanal(cities.matrix, factors=2, rotation="none", scores="regression")
cities.fa1
|
/Principal components and factor analysis.R
|
no_license
|
MichelMabinuola/Analysis-With-R
|
R
| false
| false
| 3,931
|
r
|
#; 3.1 PCA of a two-variable matrix
library(readr)
boxes <- read_csv("D:/R/Q6 Quantitative Analysis R1-12/boxes.csv")
# boxes.pca -- principal components analysis of Davis boxes data
boxes.matrix <- data.matrix(cbind(boxes[,1],boxes[,4]))
dimnames(boxes.matrix) <- list(NULL, cbind("long","diag"))
plot (boxes.matrix)
cor(boxes.matrix)
#' the princomp() function from the stats package.
# The loadings() function extracts the loadings or the correlations
# between the input variables and the new components, and
# the the biplot() function creates a biplot a single figure
# that plots the loadings as vectors and the component scores as points represented by the observation numbers.
boxes.pca <- princomp(boxes.matrix, cor=T)
boxes.pca
summary(boxes.pca)
print(loadings(boxes.pca),cutoff=0.0)
biplot(boxes.pca)
#' ote the angle between the vectors–the correlation between two variables is
#' l to the cosine of the angle between the vectors (θ), or r = cos(θ).
#' the angle is 24.3201359, which is found by the following R code:
acos(cor(boxes.matrix[,1],boxes.matrix[,2]))/((2*pi)/360)
# The components can be drawn on the scatter plot as follows,
# get parameters of component lines (after Everitt & Rabe-Hesketh)
load <- boxes.pca$loadings
slope <- load[2,]/load[1,]
mn <- apply(boxes.matrix,2,mean)
intcpt <- mn[2]-(slope*mn[1])
# scatter plot with the two new axes added
par(pty="s") # square plotting frame
xlim <- range(boxes.matrix) # overall min, max
plot(boxes.matrix, xlim=xlim, ylim=xlim, pch=16, col="purple") # both axes same length
abline(intcpt[1],slope[1],lwd=2) # first component solid line
abline(intcpt[2],slope[2],lwd=2,lty=2) # second component dashed
legend("right", legend = c("PC 1", "PC 2"), lty = c(1, 2), lwd = 2, cex = 1)
# projections of points onto PCA 1
y1 <- intcpt[1]+slope[1]*boxes.matrix[,1]
x1 <- (boxes.matrix[,2]-intcpt[1])/slope[1]
y2 <- (y1+boxes.matrix[,2])/2.0
x2 <- (x1+boxes.matrix[,1])/2.0
segments(boxes.matrix[,1],boxes.matrix[,2], x2, y2, lwd=2,col="purple")
## 3.2 A second example using the large-cites data set
cities <- read_csv("D:/R/Q6 Quantitative Analysis R1-12/cities.csv")
head(cities)
cities.matrix <- data.matrix(cities[, 2:12])
cities.matrix
rownames(cities.matrix) <- cities[,1] # add city names as row labels ??????????????
plot(cities[,2:12], pch=16, cex=0.6)
cor(cities[,2:12])
library(corrplot)
corrplot(cor(cities[,2:12]), method="ellipse")
# An alternative is simply fill each cell with an appropriate color and shade.
corrplot(cor(cities[,2:12]), method="color")
##/ 3.2.2 PCA of the cities data
# Here’s the principal components analysis of the cities data:
cities.pca <- princomp(cities.matrix, cor=T)
cities.pca
summary(cities.pca)
screeplot(cities.pca)
loadings(cities.pca)
biplot(cities.pca, col=c("black","red"), cex=c(0.7,0.8))
# An alternative visualization of the principal component and their relationship with the original variables
qg.pca <- qgraph.pca(cities[,2:12], factors=2, rotation="none")
## 3.3 “Rotation” of principal components
library(psych)
cities.pca.unrot <- principal(cities.matrix, nfactors=2, rotate="none")
cities.pca.unrot
summary(cities.pca.unrot)
biplot(cities.pca.unrot, labels=rownames(cities.matrix), cex=0.5, col=c("black","red"))
qg.pca <- qgraph(cities.pca.unrot) # ????
# Here’s the result with rotated components:
cities.pca.rot <- principal(cities.matrix, nfactors=2, rotate="varimax")
cities.pca.rot
summary(cities.pca.rot)
biplot.psych(cities.pca.rot, labels=rownames(cities.matrix), col=c("black","red"), cex=c(0.7,0.8),
xlim.s=c(-3,3), ylim.s=c(-2,4))
### 4.1 Example of a factor analysis
# cities.fa1 -- factor analysis of cities data -- no rotation
cities.fa1 <- factanal(cities.matrix, factors=2, rotation="none", scores="regression")
cities.fa1
|
#' @title sample CITEseq protein data 87 protein by 2872 cells
#'
#' @description A matrix of cells by antibodies Raw CITEseq data used for example scripts of the dsb package. raw data processed with CITE-seq-count https://hoohm.github.io/CITE-seq-Count/
#'
#' @format A matrix 87 protein by 2872 cells
#' \describe {
#' \item{cells_citeseq_mtx}{is an R matrix of cells as columns and 87 proteins as rows. It is a random even distribution of cells manually annotated from protein clustering of maximum of 100 cells per cluster on 30 clusters. Full celltype data annotations and more information is available in the referenced publication.}
#' }
"cells_citeseq_mtx"
|
/R/cells_citeseq_mtx.r
|
no_license
|
danjong99/dsb
|
R
| false
| false
| 665
|
r
|
#' @title sample CITEseq protein data 87 protein by 2872 cells
#'
#' @description A matrix of cells by antibodies Raw CITEseq data used for example scripts of the dsb package. raw data processed with CITE-seq-count https://hoohm.github.io/CITE-seq-Count/
#'
#' @format A matrix 87 protein by 2872 cells
#' \describe {
#' \item{cells_citeseq_mtx}{is an R matrix of cells as columns and 87 proteins as rows. It is a random even distribution of cells manually annotated from protein clustering of maximum of 100 cells per cluster on 30 clusters. Full celltype data annotations and more information is available in the referenced publication.}
#' }
"cells_citeseq_mtx"
|
# Generates Table 1
library(cem)
data(LL)
mod <- glm(treated ~ . - re78, data=LL)
w <- predict(mod)
idx <- which(LL$treated==1)
w[idx] <- 1-w[idx] # pscore weights
set.seed(123)
imb0 <- L1.profile(LL$treated,LL, max.cut=20, drop=c("treated","re78"),M=250,plot=FALSE)
#on the original data
raw <- imbalance(LL$treated, LL, drop=c("re78","treated"), br=imb0$medianCP)
# after pscore weighing
pw <- imbalance(LL$treated, LL, drop=c("re78","treated"), br=imb0$medianCP, weights = w)
nm <- names(LL)
nm <- nm[-c(1,9)]
br <- list()
for(i in nm)
br[i] <- 9
names(br) <- nm
mat <- cem("treated", LL, drop="re78",cut=br)
# after cem
cm <- imbalance(LL$treated, LL, drop=c("re78","treated"), br=imb0$medianCP, weights = mat$w)
m1 <- cbind( raw$tab["statistic"], pw$tab["statistic"], cm$tab["statistic"])
m2 <- rbind( m1, c(raw$L1$L1,pw$L1$L1,cm$L1$L1) )
colnames(m2) <- c("RAW", "PSW", "CEM")
rownames(m2)[NROW(m2)] <- "L1"
|
/CS112/daughter_effect/dataverse_files/forTable1.R
|
no_license
|
guydav/r
|
R
| false
| false
| 930
|
r
|
# Generates Table 1
library(cem)
data(LL)
mod <- glm(treated ~ . - re78, data=LL)
w <- predict(mod)
idx <- which(LL$treated==1)
w[idx] <- 1-w[idx] # pscore weights
set.seed(123)
imb0 <- L1.profile(LL$treated,LL, max.cut=20, drop=c("treated","re78"),M=250,plot=FALSE)
#on the original data
raw <- imbalance(LL$treated, LL, drop=c("re78","treated"), br=imb0$medianCP)
# after pscore weighing
pw <- imbalance(LL$treated, LL, drop=c("re78","treated"), br=imb0$medianCP, weights = w)
nm <- names(LL)
nm <- nm[-c(1,9)]
br <- list()
for(i in nm)
br[i] <- 9
names(br) <- nm
mat <- cem("treated", LL, drop="re78",cut=br)
# after cem
cm <- imbalance(LL$treated, LL, drop=c("re78","treated"), br=imb0$medianCP, weights = mat$w)
m1 <- cbind( raw$tab["statistic"], pw$tab["statistic"], cm$tab["statistic"])
m2 <- rbind( m1, c(raw$L1$L1,pw$L1$L1,cm$L1$L1) )
colnames(m2) <- c("RAW", "PSW", "CEM")
rownames(m2)[NROW(m2)] <- "L1"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mi2comb.R
\name{mi2comb}
\alias{mi2comb}
\title{combintion following nested multiple imputation}
\usage{
mi2comb(dt, alpha = 0.025)
}
\arguments{
\item{dt}{dataframe}
\item{alpha}{numeric, one-sided alpha, Default: 0.025}
}
\value{
dataframe, final result following multiple imputation and
combination
}
\description{
combines data following nested multiple imputation
}
\seealso{
\code{\link[stats]{cor}},\code{\link[stats]{TDist}}
}
|
/man/mi2comb.Rd
|
permissive
|
yuliasidi/nibinom
|
R
| false
| true
| 514
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mi2comb.R
\name{mi2comb}
\alias{mi2comb}
\title{combintion following nested multiple imputation}
\usage{
mi2comb(dt, alpha = 0.025)
}
\arguments{
\item{dt}{dataframe}
\item{alpha}{numeric, one-sided alpha, Default: 0.025}
}
\value{
dataframe, final result following multiple imputation and
combination
}
\description{
combines data following nested multiple imputation
}
\seealso{
\code{\link[stats]{cor}},\code{\link[stats]{TDist}}
}
|
tuneEcrossCausal = function(ecross_control_candidates = c(1,2,3),
ecross_moderate_candidates = c(1,2,3),
y, pihat, z, tgt, x_control, x_moderate,
pihatpred=0, zpred=0, tpred=0, xpred_control=matrix(0,0,0), xpred_moderate=matrix(0,0,0),
nburn=100, nsim=1000, ntree_control=200, ntree_moderate=50,
lambda=NULL, sigq=.9, sighat=NULL, nu=3,
base_control=.95, power_control=2,
base_moderate=.25, power_moderate=3,
sd_control=2*sd(y), sd_moderate=sd(y),
treatment_init = rep(1,length(unique(tgt))),
use_muscale=T, use_tauscale=T,
pihat_in_trt=F,
probit=FALSE, yobs=NULL){
#---------------------------------------------------
# FUNCTION: Optimizes expected number of crossings across specified grid.
#---------------------------------------------------
require(tidyverse)
require(ggthemes)
# Test that candidate list is long enough.
if(length(ecross_control_candidates)<3 & length(ecross_moderate_candidates)<3){
stop('Try at least 3 candidate values of at least one of the parameters for tuning.')
}
# Ensure correct col names on candidate df.
ecross_candidates = cbind.data.frame(
'ecross_control' = rep(ecross_control_candidates, each=length(ecross_moderate_candidates)),
'ecross_moderate' = rep(ecross_moderate_candidates, times=length(ecross_control_candidates))
)
# Calculate WAIC for each candidate ecross value.
waic = rep(NA,nrow(ecross_candidates))
for(i in 1:nrow(ecross_candidates)){
print(paste0('Iteration ', i, ' of ', nrow(ecross_candidates)))
# Fit tsbcf model for each ecross candidate.
fit = tsbcf(y, pihat, z, tgt, x_control, x_moderate,
pihatpred, zpred, tpred,
xpred_control=matrix(0,0,0), xpred_moderate=matrix(0,0,0),
nburn, nsim, ntree_control, ntree_moderate,
lambda, sigq, sighat, nu=nu,
base_control, power_control,
base_moderate, power_moderate,
sd_control, sd_moderate,
treatment_init,
use_muscale, use_tauscale,
ecross_control=ecross_candidates$ecross_control[i],
ecross_moderate=ecross_candidates$ecross_moderate[i],
pihat_in_trt,
probit=probit, yobs=yobs, verbose=F, mh=F, save_inputs=F)
# Calculate in-sample WAIC.
check = checkFit(y=y,
mcmcdraws = fit[["yhat"]],
sig = fit[["sigma"]],
probit=probit,
doWaic=TRUE,
yobs=yobs)
# Save WAIC.
waic[i] = check$waic
}
# Cubic spline fit. If only tuning one variable, loess fit over that variable only.
ec = ecross_candidates
n_candidates_con = length(unique(ec$ecross_control)) # number unique ec_con candidates.
n_candidates_mod = length(unique(ec$ecross_moderate))# Number unique ec_mod candidates.
if(n_candidates_con >= 3 & n_candidates_mod >= 3){ #both params being tuned
myfit = loess(waic ~ ec$ecross_control + ec$ecross_moderate, span=1.25)
} else if(n_candidates_con >= 3 & n_candidates_mod <3){ # Only control being tuned.
myfit = loess(waic ~ ec$ecross_control, span=1.25)
} else{ # Only moderate being tuned.
myfit = loess(waic ~ ec$ecross_moderate, span=1.25)
}
# Calculate sd(resids) from spline fit.
sd = sd(myfit$residuals)
# Data frames for calculation.
edf = cbind.data.frame(ec,waic)
sdf = cbind.data.frame('x' =ecross_candidates,'y' = myfit$fitted)
# Calculate optimal ecross.
ymin = sdf %>% filter(y==min(y)) %>% select(y)
# Save ecross value where WAIC is minimized.
df = cbind.data.frame(ec, waic,ymin+sd)
df$norm = df$ecross_control^2 + df$ecross_moderate^2
if(length(which(df[,3]<=df[,4]))>0){
exp_cross = df[which(df[,3]<=df[,4]),]
exp_cross = exp_cross[which(exp_cross$norm==min(exp_cross$norm)),]
exp_cross = exp_cross[,1:2]
} else{
exp_cross = data.frame(matrix(0,nrow=1,ncol=2))
colnames(exp_cross) = c('ecross_control','ecross_moderate')
exp_cross$ecross_control = min(ecross_candidates$ecross_control)
exp_cross$ecross_moderate = min(ecross_candidates$ecross_moderate)
}
if(n_candidates_con >= 3 & n_candidates_mod >= 3){ #both params being tuned
# Create plot. Need different plot if only one parameter being tuned.
df$linesize = 1
df$linesize[which(df$ecross_moderate==as.numeric(exp_cross$ecross_moderate))] = 2
waicplt=ggplot(df, aes(x=ecross_control, y=waic, colour=factor(ecross_moderate),
linetype=factor(linesize), size=factor(linesize))) +
geom_line() +
geom_hline(aes(yintercept=y), colour='grey') +
geom_vline(aes(xintercept=exp_cross$ecross_control),colour='red',linetype=6) +
scale_colour_colorblind(name='Ec_moderate') +
scale_linetype_manual(values=c(1,6),name='Optimal',labels=c('No','Yes'))+
scale_size_manual(values=c(.8,1.2),name='Optimal',labels=c('No','Yes'), guide=F)
} else if(n_candidates_con >= 3 & n_candidates_mod <3){ # Only control being tuned.
waicplt=ggplot(df, aes(x=ecross_control, y=waic)) +
geom_line() +
geom_hline(aes(yintercept=y), colour='grey') +
geom_vline(aes(xintercept=exp_cross$ecross_control),colour='red',linetype=6) +
scale_colour_colorblind(name='Ec_control')
} else{ # Only moderate being tuned.
waicplt=ggplot(df, aes(x=ecross_moderate, y=waic)) +
geom_line() +
geom_hline(aes(yintercept=y), colour='grey') +
geom_vline(aes(xintercept=exp_cross$ecross_moderate),colour='red',linetype=6) +
scale_colour_colorblind(name='Ec_moderate')
}
return(list('ecross_control'=exp_cross[1],
'ecross_moderate'=exp_cross[2],
'waic_plot' = waicplt,
'waic_grid' = cbind.data.frame('ec' = ecross_candidates, waic)))
}
|
/R/tuneECrossCausal.R
|
no_license
|
jestarling/tsbcf
|
R
| false
| false
| 6,301
|
r
|
tuneEcrossCausal = function(ecross_control_candidates = c(1,2,3),
ecross_moderate_candidates = c(1,2,3),
y, pihat, z, tgt, x_control, x_moderate,
pihatpred=0, zpred=0, tpred=0, xpred_control=matrix(0,0,0), xpred_moderate=matrix(0,0,0),
nburn=100, nsim=1000, ntree_control=200, ntree_moderate=50,
lambda=NULL, sigq=.9, sighat=NULL, nu=3,
base_control=.95, power_control=2,
base_moderate=.25, power_moderate=3,
sd_control=2*sd(y), sd_moderate=sd(y),
treatment_init = rep(1,length(unique(tgt))),
use_muscale=T, use_tauscale=T,
pihat_in_trt=F,
probit=FALSE, yobs=NULL){
#---------------------------------------------------
# FUNCTION: Optimizes expected number of crossings across specified grid.
#---------------------------------------------------
require(tidyverse)
require(ggthemes)
# Test that candidate list is long enough.
if(length(ecross_control_candidates)<3 & length(ecross_moderate_candidates)<3){
stop('Try at least 3 candidate values of at least one of the parameters for tuning.')
}
# Ensure correct col names on candidate df.
ecross_candidates = cbind.data.frame(
'ecross_control' = rep(ecross_control_candidates, each=length(ecross_moderate_candidates)),
'ecross_moderate' = rep(ecross_moderate_candidates, times=length(ecross_control_candidates))
)
# Calculate WAIC for each candidate ecross value.
waic = rep(NA,nrow(ecross_candidates))
for(i in 1:nrow(ecross_candidates)){
print(paste0('Iteration ', i, ' of ', nrow(ecross_candidates)))
# Fit tsbcf model for each ecross candidate.
fit = tsbcf(y, pihat, z, tgt, x_control, x_moderate,
pihatpred, zpred, tpred,
xpred_control=matrix(0,0,0), xpred_moderate=matrix(0,0,0),
nburn, nsim, ntree_control, ntree_moderate,
lambda, sigq, sighat, nu=nu,
base_control, power_control,
base_moderate, power_moderate,
sd_control, sd_moderate,
treatment_init,
use_muscale, use_tauscale,
ecross_control=ecross_candidates$ecross_control[i],
ecross_moderate=ecross_candidates$ecross_moderate[i],
pihat_in_trt,
probit=probit, yobs=yobs, verbose=F, mh=F, save_inputs=F)
# Calculate in-sample WAIC.
check = checkFit(y=y,
mcmcdraws = fit[["yhat"]],
sig = fit[["sigma"]],
probit=probit,
doWaic=TRUE,
yobs=yobs)
# Save WAIC.
waic[i] = check$waic
}
# Cubic spline fit. If only tuning one variable, loess fit over that variable only.
ec = ecross_candidates
n_candidates_con = length(unique(ec$ecross_control)) # number unique ec_con candidates.
n_candidates_mod = length(unique(ec$ecross_moderate))# Number unique ec_mod candidates.
if(n_candidates_con >= 3 & n_candidates_mod >= 3){ #both params being tuned
myfit = loess(waic ~ ec$ecross_control + ec$ecross_moderate, span=1.25)
} else if(n_candidates_con >= 3 & n_candidates_mod <3){ # Only control being tuned.
myfit = loess(waic ~ ec$ecross_control, span=1.25)
} else{ # Only moderate being tuned.
myfit = loess(waic ~ ec$ecross_moderate, span=1.25)
}
# Calculate sd(resids) from spline fit.
sd = sd(myfit$residuals)
# Data frames for calculation.
edf = cbind.data.frame(ec,waic)
sdf = cbind.data.frame('x' =ecross_candidates,'y' = myfit$fitted)
# Calculate optimal ecross.
ymin = sdf %>% filter(y==min(y)) %>% select(y)
# Save ecross value where WAIC is minimized.
df = cbind.data.frame(ec, waic,ymin+sd)
df$norm = df$ecross_control^2 + df$ecross_moderate^2
if(length(which(df[,3]<=df[,4]))>0){
exp_cross = df[which(df[,3]<=df[,4]),]
exp_cross = exp_cross[which(exp_cross$norm==min(exp_cross$norm)),]
exp_cross = exp_cross[,1:2]
} else{
exp_cross = data.frame(matrix(0,nrow=1,ncol=2))
colnames(exp_cross) = c('ecross_control','ecross_moderate')
exp_cross$ecross_control = min(ecross_candidates$ecross_control)
exp_cross$ecross_moderate = min(ecross_candidates$ecross_moderate)
}
if(n_candidates_con >= 3 & n_candidates_mod >= 3){ #both params being tuned
# Create plot. Need different plot if only one parameter being tuned.
df$linesize = 1
df$linesize[which(df$ecross_moderate==as.numeric(exp_cross$ecross_moderate))] = 2
waicplt=ggplot(df, aes(x=ecross_control, y=waic, colour=factor(ecross_moderate),
linetype=factor(linesize), size=factor(linesize))) +
geom_line() +
geom_hline(aes(yintercept=y), colour='grey') +
geom_vline(aes(xintercept=exp_cross$ecross_control),colour='red',linetype=6) +
scale_colour_colorblind(name='Ec_moderate') +
scale_linetype_manual(values=c(1,6),name='Optimal',labels=c('No','Yes'))+
scale_size_manual(values=c(.8,1.2),name='Optimal',labels=c('No','Yes'), guide=F)
} else if(n_candidates_con >= 3 & n_candidates_mod <3){ # Only control being tuned.
waicplt=ggplot(df, aes(x=ecross_control, y=waic)) +
geom_line() +
geom_hline(aes(yintercept=y), colour='grey') +
geom_vline(aes(xintercept=exp_cross$ecross_control),colour='red',linetype=6) +
scale_colour_colorblind(name='Ec_control')
} else{ # Only moderate being tuned.
waicplt=ggplot(df, aes(x=ecross_moderate, y=waic)) +
geom_line() +
geom_hline(aes(yintercept=y), colour='grey') +
geom_vline(aes(xintercept=exp_cross$ecross_moderate),colour='red',linetype=6) +
scale_colour_colorblind(name='Ec_moderate')
}
return(list('ecross_control'=exp_cross[1],
'ecross_moderate'=exp_cross[2],
'waic_plot' = waicplt,
'waic_grid' = cbind.data.frame('ec' = ecross_candidates, waic)))
}
|
/chap06/Krisp_Kreme.R
|
no_license
|
leetschau/app-of-rlang-in-stats
|
R
| false
| false
| 1,261
|
r
| ||
library(ggplot2)
library(kernlab)
library(caret)
library(caTools)
library(gridExtra)
################################################ Objective ################################################
# To succesfully classify handwritten digits (0-9) using pixel values
# Support Vector Machines will be applied
################################################# Loading data #############################################
mnist_train <- read.csv("mnist_train.csv", stringsAsFactors = F, header = F)
mnist_test <- read.csv("mnist_test.csv", stringsAsFactors = F, header = F)
View(mnist_train) # Data has no column names
View(mnist_test) # Data has no column names
names(mnist_test)[1] <- "label"
names(mnist_train)[1] <- "label"
################################## Data cleaning, preparation & understanding ##############################
#--------------------------------------------- Data cleaning ----------------------------------------------#
## Checking for missing values, unnecessary rows and columns
# headers and footers
head(mnist_test, 1) # no unnecessary headers
head(mnist_train, 1) # no unnecessary headers
tail(mnist_test, 1) # no unnecessary footers
tail(mnist_train, 1) # no unnecessary footers
# Duplicated rows
sum(duplicated(mnist_test)) # no duplicate rows
sum(duplicated(mnist_train)) # no duplicate rows
# Checking for NAs
sum(sapply(mnist_test, function(x) sum(is.na(x)))) # There are no missing values
sum(sapply(mnist_train, function(x) sum(is.na(x)))) # There are no missing values
#------------------------------------------- Data understanding -------------------------------------------#
# The MNIST database of handwritten digits has a training set of 60,000 examples,
# and a test set of 10,000 examples. It is a subset of a larger set available from NIST.
# The 784 columns apart from the label consist of 28*28 matrix describing the scanned image of the digits
# The digits have been size-normalized and centered in a fixed-size image
str(mnist_test) # all dependant variables are integers, 60000 observations, 785 variables
str(mnist_train) # all dependant variables integers, 10000 observations, 785 variables
summary(mnist_test[ , 2:100]) # some columns seem to be containing only zeros, Pixel values go upto 255,
summary(mnist_train[ , 2:100]) # but some only go up to ~100, data needs to be scaled
#-------------------------------------------- Data preparation --------------------------------------------#
# Convert label variable into factor
mnist_train$label <- factor(mnist_train$label)
summary(mnist_train$label)
mnist_test$label <- factor(mnist_test$label)
summary(mnist_test$label)
# Sampling training dataset
dim(mnist_train) # computation time would be unnaceptable for such a large dataset
set.seed(100)
sample_indices <- sample(1: nrow(mnist_train), 5000) # extracting subset of 5000 samples for modelling
train <- mnist_train[sample_indices, ]
# Scaling data
max(train[ ,2:ncol(train)]) # max pixel value is 255, lets use this to scale data
train[ , 2:ncol(train)] <- train[ , 2:ncol(train)]/255
test <- cbind(label = mnist_test[ ,1], mnist_test[ , 2:ncol(mnist_test)]/255)
#----------------------------------------- Exploratory Data Analysis --------------------------------------#
## Distribution of digits across all data sets
plot1 <- ggplot(mnist_train, aes(x = label, y = (..count..)/sum(..count..))) + geom_bar() + theme_light() +
labs(y = "Relative frequency", title = "mnist_train dataset") +
scale_y_continuous(labels=scales::percent, limits = c(0 , 0.15)) +
geom_text(stat = "count",
aes(label = scales:: percent((..count..)/sum(..count..)), vjust = -1))
plot2 <- ggplot(train, aes(x = label, y = (..count..)/sum(..count..))) + geom_bar() + theme_light() +
labs(y = "Relative frequency", title = "train dataset") +
scale_y_continuous(labels=scales::percent, limits = c(0 , 0.15)) +
geom_text(stat = "count",
aes(label = scales:: percent((..count..)/sum(..count..)), vjust = -1))
plot3 <- ggplot(test, aes(x = label, y = (..count..)/sum(..count..))) + geom_bar() + theme_light() +
labs(y = "Relative frequency", title = "test dataset") +
scale_y_continuous(labels=scales::percent, limits = c(0 , 0.15)) +
geom_text(stat = "count",
aes(label = scales:: percent((..count..)/sum(..count..)), vjust = -1))
grid.arrange(plot1, plot2, plot3, nrow = 3)
# Relative frequencies of the digits has been retained while sampling to create the reduced train data set
# Similar frequency in test dataset also observed
######################################### Model Building & Evaluation ######################################
#--------------------------------------------- Linear Kernel ----------------------------------------------#
## Linear kernel using default parameters
model1_linear <- ksvm(label ~ ., data = train, scaled = FALSE, kernel = "vanilladot", C = 1)
print(model1_linear)
eval1_linear <- predict(model1_linear, newdata = test, type = "response")
confusionMatrix(eval1_linear, test$label)
# Observations:
# Overall accuracy of 91.3%
# Specificities quite high > 99%
# Sensitivities good > 84%
## Linear kernel using stricter C
model2_linear <- ksvm(label ~ ., data = train, scaled = FALSE, kernel = "vanilladot", C = 10)
print(model2_linear)
eval2_linear <- predict(model2_linear, newdata = test, type = "response")
confusionMatrix(eval2_linear, test$label)
# Observations:
# Overall accuracy of 91%
# Model performance has slightly decreased, model may be overfitting
## Using cross validation to optimise C
grid_linear <- expand.grid(C= c(0.001, 0.1 ,1 ,10 ,100)) # defining range of C
fit.linear <- train(label ~ ., data = train, metric = "Accuracy", method = "svmLinear",
tuneGrid = grid_linear, preProcess = NULL,
trControl = trainControl(method = "cv", number = 5))
# printing results of 5 cross validation
print(fit.linear)
plot(fit.linear)
# Observations:
# Best accuracy of 92% at C = 0.1
# Higher values of C are overfitting and lower values are giving simple models
eval_cv_linear <- predict(fit.linear, newdata = test)
confusionMatrix(eval_cv_linear, test$label)
# Observations:
# Overall accuracy of 92.4%, slightly imporved
# Specificities quite high > 99%
# Sensitivities > 86%, improved from model1 by making model more generic i.e. lower C
#--------------------------------------------- Radial Kernel ----------------------------------------------#
## Radial kernel using default parameters
model1_rbf <- ksvm(label ~ ., data = train, scaled = FALSE, kernel = "rbfdot", C = 1, kpar = "automatic")
print(model1_rbf)
eval1_rbf <- predict(model1_rbf, newdata = test, type = "response")
confusionMatrix(eval1_rbf, test$label)
# Observations:
# Overall accuracy of 95%
# Specificities quite high > 99%
# Sensitivities high > 92%
# Increase in overall accuracy and sensitivty from linear kernel using C = 1, sigma = 0.0107
# data seems to have non linearity to it
## Redial kernel with higher sigma
model2_rbf <- ksvm(label ~ ., data = train, scaled = FALSE, kernel = "rbfdot",
C = 1, kpar = list(sigma = 1))
print(model2_rbf)
eval2_rbf <- predict(model2_rbf, newdata = test, type = "response")
confusionMatrix(eval2_rbf, test$label)
# Observations:
# Accuracy drops to 11% and class wise results are very poor
# sigma = 1 is too much non linearity and the model is overfitting
## Using cross validation to optimise C and sigma
# defining ranges of C and sigma
grid_rbf = expand.grid(C= c(0.01, 0.1, 1, 5, 10), sigma = c(0.001, 0.01, 0.1, 1, 5))
# Using only 2 folds to optimise run time
fit.rbf <- train(label ~ ., data = train, metric = "Accuracy", method = "svmRadial",tuneGrid = grid_rbf,
trControl = trainControl(method = "cv", number = 2), preProcess = NULL)
# printing results of 2 cross validation
print(fit.rbf)
plot(fit.rbf)
# Observations:
# Best sigma value is ~ 0.01
# Higher sigma values are overfitting and lower sigma values are not capturing non linearity adequately
# Accuracy increases with C until 5 and then decreases again, can be further optimised
# Optimising C further
grid_rbf = expand.grid(C= c(1,2, 3, 4, 5, 6 ,7, 8, 9, 10), sigma = 0.01)
fit.rbf2 <- train(label ~ ., data = train, metric = "Accuracy", method = "svmRadial",tuneGrid = grid_rbf,
trControl = trainControl(method = "cv", number = 5), preProcess = NULL)
# printing results of cross validation
print(fit.rbf2)
plot(fit.rbf2)
eval_cv_rbf <- predict(fit.rbf2, newdata = test)
confusionMatrix(eval_cv_rbf, test$label)
# Observations:
# Accuracy is highest at C = 3 and sigma = 0.01
# Higher C values are overfitting and lower C values have too much bias
# Accuracy of 96%
# High Sensitivities > 92%
# Very High Specificities > 99%
#--------------------------------------------- Polynomial Kernel ----------------------------------------------#
## Polynomial kernel with degree 2, default scale and offset
model1_poly <- ksvm(label ~ ., data = train, kernel = "polydot", scaled = FALSE, C = 1,
kpar = list(degree = 2, scale = 1, offset = 1))
print(model1_poly)
eval1_poly <- predict(model1_poly, newdata = test)
confusionMatrix(eval1_poly, test$label)
# Observations
# Good accuracy of 95.24%
# High Sensitivities > 92% and specificities > 99%
# Similar performance to radial kernel
## Polynomial kernel with varied scale
model2_poly <- ksvm(label ~ ., data = train, kernel = "polydot", scaled = FALSE, C = 1,
kpar = list(degree = 2, scale = -2, offset = 1))
print(model2_poly)
eval2_poly <- predict(model2_poly, newdata = test)
confusionMatrix(eval2_poly, test$label)
# Observations
# Slight reduction in accuracy but similar perfromance
## Polynomial kernel with varied offset
model3_poly <- ksvm(label ~ ., data = train, kernel = "polydot", scaled = FALSE, C = 1,
kpar = list(degree = 2, scale = 1, offset = 10))
print(model3_poly)
eval3_poly <- predict(model3_poly, newdata = test)
confusionMatrix(eval3_poly, test$label)
# Observations
# similar perfromance as before, scale and offset seem to have little effect on performance
## Polynomial kernel with higher C
model4_poly <- ksvm(label ~ ., data = train, kernel = "polydot", scaled = FALSE, C = 3,
kpar = list(degree = 2, scale = 1, offset = 1))
print(model4_poly)
eval4_poly <- predict(model4_poly, newdata = test)
confusionMatrix(eval4_poly, test$label)
# Observations
# similar perfromance as before
## Grid search to optimise hyperparameters
grid_poly = expand.grid(C= c(0.01, 0.1, 1, 10), degree = c(1, 2, 3, 4, 5),
scale = c(-100, -10, -1, 1, 10, 100))
fit.poly <- train(label ~ ., data = train, metric = "Accuracy", method = "svmPoly",tuneGrid = grid_poly,
trControl = trainControl(method = "cv", number = 2), preProcess = NULL)
# printing results of cross validation
print(fit.poly)
plot(fit.poly)
eval_cv_poly <- predict(fit.poly, newdata = test)
confusionMatrix(eval_cv_poly, test$label)
# Observations:
# Best model obtained for C = 0.01, degree = 2, scale = 1
# as data has been scaled already scale = 1 is optimum
# C has little to no effect on perfomance, C = 0.01 generic model has been picked as optimum
# degrees higher than 2 are overfitting
# Accuracy of 95.24%, sensitivities > 92%, specificities > 99%
## Implementing optmised polynomial model
model5_poly <- ksvm(label ~ ., data = train, kernel = "polydot", scaled = FALSE, C = 0.01,
kpar = list(degree = 2, scale = 1, offset = 0.5))
print(model5_poly)
eval5_poly <- predict(model5_poly, newdata = test)
confusionMatrix(eval5_poly, test$label)
# Observations:
# offset of 0.5 used as independent variables are in the range of 0 to 1
# best accuracy of polynomial kernels 95.25%
################################################ Conclusion ################################################
# Final model
final_model = fit.rbf2
## SVM using RBF kernel (C = 3, sigma = 0.01) achieved highest accuracy in predicting digits
# reduced training data set of 5000 instances (extracted using random sampling) has been used
# distribution of the dependent variable (digtits) has been preserved while sampling
# Model performance on validation data set of 10000 instances
# Accuracy = 95.46%
# Sensitivites > 92%
# Specificities > 99%
# Polynomial kernel (C = 0.01, degree = 2, scale = 1, offset = 0.05) also perfromed equally well
# performance metrics are only marginally lesser than radial kernel
# Run time is better than that of radial kernel
|
/tvmarketing.R
|
no_license
|
shub005/Data-Science
|
R
| false
| false
| 13,247
|
r
|
library(ggplot2)
library(kernlab)
library(caret)
library(caTools)
library(gridExtra)
################################################ Objective ################################################
# To succesfully classify handwritten digits (0-9) using pixel values
# Support Vector Machines will be applied
################################################# Loading data #############################################
mnist_train <- read.csv("mnist_train.csv", stringsAsFactors = F, header = F)
mnist_test <- read.csv("mnist_test.csv", stringsAsFactors = F, header = F)
View(mnist_train) # Data has no column names
View(mnist_test) # Data has no column names
names(mnist_test)[1] <- "label"
names(mnist_train)[1] <- "label"
################################## Data cleaning, preparation & understanding ##############################
#--------------------------------------------- Data cleaning ----------------------------------------------#
## Checking for missing values, unnecessary rows and columns
# headers and footers
head(mnist_test, 1) # no unnecessary headers
head(mnist_train, 1) # no unnecessary headers
tail(mnist_test, 1) # no unnecessary footers
tail(mnist_train, 1) # no unnecessary footers
# Duplicated rows
sum(duplicated(mnist_test)) # no duplicate rows
sum(duplicated(mnist_train)) # no duplicate rows
# Checking for NAs
sum(sapply(mnist_test, function(x) sum(is.na(x)))) # There are no missing values
sum(sapply(mnist_train, function(x) sum(is.na(x)))) # There are no missing values
#------------------------------------------- Data understanding -------------------------------------------#
# The MNIST database of handwritten digits has a training set of 60,000 examples,
# and a test set of 10,000 examples. It is a subset of a larger set available from NIST.
# The 784 columns apart from the label consist of 28*28 matrix describing the scanned image of the digits
# The digits have been size-normalized and centered in a fixed-size image
str(mnist_test) # all dependant variables are integers, 60000 observations, 785 variables
str(mnist_train) # all dependant variables integers, 10000 observations, 785 variables
summary(mnist_test[ , 2:100]) # some columns seem to be containing only zeros, Pixel values go upto 255,
summary(mnist_train[ , 2:100]) # but some only go up to ~100, data needs to be scaled
#-------------------------------------------- Data preparation --------------------------------------------#
# Convert label variable into factor
mnist_train$label <- factor(mnist_train$label)
summary(mnist_train$label)
mnist_test$label <- factor(mnist_test$label)
summary(mnist_test$label)
# Sampling training dataset
dim(mnist_train) # computation time would be unnaceptable for such a large dataset
set.seed(100)
sample_indices <- sample(1: nrow(mnist_train), 5000) # extracting subset of 5000 samples for modelling
train <- mnist_train[sample_indices, ]
# Scaling data
max(train[ ,2:ncol(train)]) # max pixel value is 255, lets use this to scale data
train[ , 2:ncol(train)] <- train[ , 2:ncol(train)]/255
test <- cbind(label = mnist_test[ ,1], mnist_test[ , 2:ncol(mnist_test)]/255)
#----------------------------------------- Exploratory Data Analysis --------------------------------------#
## Distribution of digits across all data sets
plot1 <- ggplot(mnist_train, aes(x = label, y = (..count..)/sum(..count..))) + geom_bar() + theme_light() +
labs(y = "Relative frequency", title = "mnist_train dataset") +
scale_y_continuous(labels=scales::percent, limits = c(0 , 0.15)) +
geom_text(stat = "count",
aes(label = scales:: percent((..count..)/sum(..count..)), vjust = -1))
plot2 <- ggplot(train, aes(x = label, y = (..count..)/sum(..count..))) + geom_bar() + theme_light() +
labs(y = "Relative frequency", title = "train dataset") +
scale_y_continuous(labels=scales::percent, limits = c(0 , 0.15)) +
geom_text(stat = "count",
aes(label = scales:: percent((..count..)/sum(..count..)), vjust = -1))
plot3 <- ggplot(test, aes(x = label, y = (..count..)/sum(..count..))) + geom_bar() + theme_light() +
labs(y = "Relative frequency", title = "test dataset") +
scale_y_continuous(labels=scales::percent, limits = c(0 , 0.15)) +
geom_text(stat = "count",
aes(label = scales:: percent((..count..)/sum(..count..)), vjust = -1))
grid.arrange(plot1, plot2, plot3, nrow = 3)
# Relative frequencies of the digits has been retained while sampling to create the reduced train data set
# Similar frequency in test dataset also observed
######################################### Model Building & Evaluation ######################################
#--------------------------------------------- Linear Kernel ----------------------------------------------#
## Linear kernel using default parameters
model1_linear <- ksvm(label ~ ., data = train, scaled = FALSE, kernel = "vanilladot", C = 1)
print(model1_linear)
eval1_linear <- predict(model1_linear, newdata = test, type = "response")
confusionMatrix(eval1_linear, test$label)
# Observations:
# Overall accuracy of 91.3%
# Specificities quite high > 99%
# Sensitivities good > 84%
## Linear kernel using stricter C
model2_linear <- ksvm(label ~ ., data = train, scaled = FALSE, kernel = "vanilladot", C = 10)
print(model2_linear)
eval2_linear <- predict(model2_linear, newdata = test, type = "response")
confusionMatrix(eval2_linear, test$label)
# Observations:
# Overall accuracy of 91%
# Model performance has slightly decreased, model may be overfitting
## Using cross validation to optimise C
grid_linear <- expand.grid(C= c(0.001, 0.1 ,1 ,10 ,100)) # defining range of C
fit.linear <- train(label ~ ., data = train, metric = "Accuracy", method = "svmLinear",
tuneGrid = grid_linear, preProcess = NULL,
trControl = trainControl(method = "cv", number = 5))
# printing results of 5 cross validation
print(fit.linear)
plot(fit.linear)
# Observations:
# Best accuracy of 92% at C = 0.1
# Higher values of C are overfitting and lower values are giving simple models
eval_cv_linear <- predict(fit.linear, newdata = test)
confusionMatrix(eval_cv_linear, test$label)
# Observations:
# Overall accuracy of 92.4%, slightly imporved
# Specificities quite high > 99%
# Sensitivities > 86%, improved from model1 by making model more generic i.e. lower C
#--------------------------------------------- Radial Kernel ----------------------------------------------#
## Radial kernel using default parameters
model1_rbf <- ksvm(label ~ ., data = train, scaled = FALSE, kernel = "rbfdot", C = 1, kpar = "automatic")
print(model1_rbf)
eval1_rbf <- predict(model1_rbf, newdata = test, type = "response")
confusionMatrix(eval1_rbf, test$label)
# Observations:
# Overall accuracy of 95%
# Specificities quite high > 99%
# Sensitivities high > 92%
# Increase in overall accuracy and sensitivty from linear kernel using C = 1, sigma = 0.0107
# data seems to have non linearity to it
## Redial kernel with higher sigma
model2_rbf <- ksvm(label ~ ., data = train, scaled = FALSE, kernel = "rbfdot",
C = 1, kpar = list(sigma = 1))
print(model2_rbf)
eval2_rbf <- predict(model2_rbf, newdata = test, type = "response")
confusionMatrix(eval2_rbf, test$label)
# Observations:
# Accuracy drops to 11% and class wise results are very poor
# sigma = 1 is too much non linearity and the model is overfitting
## Using cross validation to optimise C and sigma
# defining ranges of C and sigma
grid_rbf = expand.grid(C= c(0.01, 0.1, 1, 5, 10), sigma = c(0.001, 0.01, 0.1, 1, 5))
# Using only 2 folds to optimise run time
fit.rbf <- train(label ~ ., data = train, metric = "Accuracy", method = "svmRadial",tuneGrid = grid_rbf,
trControl = trainControl(method = "cv", number = 2), preProcess = NULL)
# printing results of 2 cross validation
print(fit.rbf)
plot(fit.rbf)
# Observations:
# Best sigma value is ~ 0.01
# Higher sigma values are overfitting and lower sigma values are not capturing non linearity adequately
# Accuracy increases with C until 5 and then decreases again, can be further optimised
# Optimising C further
grid_rbf = expand.grid(C= c(1,2, 3, 4, 5, 6 ,7, 8, 9, 10), sigma = 0.01)
fit.rbf2 <- train(label ~ ., data = train, metric = "Accuracy", method = "svmRadial",tuneGrid = grid_rbf,
trControl = trainControl(method = "cv", number = 5), preProcess = NULL)
# printing results of cross validation
print(fit.rbf2)
plot(fit.rbf2)
eval_cv_rbf <- predict(fit.rbf2, newdata = test)
confusionMatrix(eval_cv_rbf, test$label)
# Observations:
# Accuracy is highest at C = 3 and sigma = 0.01
# Higher C values are overfitting and lower C values have too much bias
# Accuracy of 96%
# High Sensitivities > 92%
# Very High Specificities > 99%
#--------------------------------------------- Polynomial Kernel ----------------------------------------------#
## Polynomial kernel with degree 2, default scale and offset
model1_poly <- ksvm(label ~ ., data = train, kernel = "polydot", scaled = FALSE, C = 1,
kpar = list(degree = 2, scale = 1, offset = 1))
print(model1_poly)
eval1_poly <- predict(model1_poly, newdata = test)
confusionMatrix(eval1_poly, test$label)
# Observations
# Good accuracy of 95.24%
# High Sensitivities > 92% and specificities > 99%
# Similar performance to radial kernel
## Polynomial kernel with varied scale
model2_poly <- ksvm(label ~ ., data = train, kernel = "polydot", scaled = FALSE, C = 1,
kpar = list(degree = 2, scale = -2, offset = 1))
print(model2_poly)
eval2_poly <- predict(model2_poly, newdata = test)
confusionMatrix(eval2_poly, test$label)
# Observations
# Slight reduction in accuracy but similar perfromance
## Polynomial kernel with varied offset
model3_poly <- ksvm(label ~ ., data = train, kernel = "polydot", scaled = FALSE, C = 1,
kpar = list(degree = 2, scale = 1, offset = 10))
print(model3_poly)
eval3_poly <- predict(model3_poly, newdata = test)
confusionMatrix(eval3_poly, test$label)
# Observations
# similar perfromance as before, scale and offset seem to have little effect on performance
## Polynomial kernel with higher C
model4_poly <- ksvm(label ~ ., data = train, kernel = "polydot", scaled = FALSE, C = 3,
kpar = list(degree = 2, scale = 1, offset = 1))
print(model4_poly)
eval4_poly <- predict(model4_poly, newdata = test)
confusionMatrix(eval4_poly, test$label)
# Observations
# similar perfromance as before
## Grid search to optimise hyperparameters
grid_poly = expand.grid(C= c(0.01, 0.1, 1, 10), degree = c(1, 2, 3, 4, 5),
scale = c(-100, -10, -1, 1, 10, 100))
fit.poly <- train(label ~ ., data = train, metric = "Accuracy", method = "svmPoly",tuneGrid = grid_poly,
trControl = trainControl(method = "cv", number = 2), preProcess = NULL)
# printing results of cross validation
print(fit.poly)
plot(fit.poly)
eval_cv_poly <- predict(fit.poly, newdata = test)
confusionMatrix(eval_cv_poly, test$label)
# Observations:
# Best model obtained for C = 0.01, degree = 2, scale = 1
# as data has been scaled already scale = 1 is optimum
# C has little to no effect on perfomance, C = 0.01 generic model has been picked as optimum
# degrees higher than 2 are overfitting
# Accuracy of 95.24%, sensitivities > 92%, specificities > 99%
## Implementing optmised polynomial model
model5_poly <- ksvm(label ~ ., data = train, kernel = "polydot", scaled = FALSE, C = 0.01,
kpar = list(degree = 2, scale = 1, offset = 0.5))
print(model5_poly)
eval5_poly <- predict(model5_poly, newdata = test)
confusionMatrix(eval5_poly, test$label)
# Observations:
# offset of 0.5 used as independent variables are in the range of 0 to 1
# best accuracy of polynomial kernels 95.25%
################################################ Conclusion ################################################
# Final model
final_model = fit.rbf2
## SVM using RBF kernel (C = 3, sigma = 0.01) achieved highest accuracy in predicting digits
# reduced training data set of 5000 instances (extracted using random sampling) has been used
# distribution of the dependent variable (digtits) has been preserved while sampling
# Model performance on validation data set of 10000 instances
# Accuracy = 95.46%
# Sensitivites > 92%
# Specificities > 99%
# Polynomial kernel (C = 0.01, degree = 2, scale = 1, offset = 0.05) also perfromed equally well
# performance metrics are only marginally lesser than radial kernel
# Run time is better than that of radial kernel
|
library(RISC)
library(ggplot2)
library(RColorBrewer)
library(irlba)
library(umap)
PATH = "/Path to the data/GSE111113"
###################################################################################
### RISC raw ###
###################################################################################
mat0 = read.table(file = paste0(PATH, '/GSE111113_Table_S1_FilterNormal10xExpMatrix.txt'), sep = '\t', header = T, stringsAsFactors = F)
mat1 = as.matrix(mat0[,-c(1:3)])
rownames(mat1) = mat0$gene_id
Group = sapply(colnames(mat1), function(x){strsplit(x, "_")[[1]][1]})
Symbol0 = mat0[,c(1, 3)]
colnames(Symbol0) = c('Ensembl', 'Symbol')
###################################################################################
### Uncorrected data ###
###################################################################################
dat0 = readscdata(count = mat1, cell = data.frame(Time = Group, row.names = colnames(mat1)), gene = data.frame(Symbol = rownames(mat1), row.names = rownames(mat1)))
dat0 = scFilter(dat0, min.UMI = 500, max.UMI = Inf, min.gene = 200, min.cell = 5)
cell0 = rownames(dat0@coldata)[dat0@coldata$Time %in% c('E16', 'E18', 'P4', 'Adu1', 'Adu2')]
dat0 = SubSet(dat0, cells = cell0)
dat0 = scNormalize(dat0)
dat0 = scDisperse(dat0)
length(dat0@vargene)
dat0 = scPCA(dat0)
dat0 = scUMAP(dat0)
UMAPlot(dat0, colFactor = 'Time', Colors = brewer.pal(5, 'Spectral'))
###################################################################################
### RISC integration ###
###################################################################################
cell0 = rownames(dat0@coldata)[dat0@coldata$Time == 'E16']
dat1 = SubSet(dat0, cells = cell0)
cell0 = rownames(dat0@coldata)[dat0@coldata$Time == 'E18']
dat2 = SubSet(dat0, cells = cell0)
cell0 = rownames(dat0@coldata)[dat0@coldata$Time == 'P4']
dat3 = SubSet(dat0, cells = cell0)
cell0 = rownames(dat0@coldata)[dat0@coldata$Time == 'Adu1']
dat4 = SubSet(dat0, cells = cell0)
cell0 = rownames(dat0@coldata)[dat0@coldata$Time == 'Adu2']
dat5 = SubSet(dat0, cells = cell0)
dat1 = scDisperse(dat1)
length(dat1@vargene)
dat2= scDisperse(dat2)
length(dat2@vargene)
dat3= scDisperse(dat3)
length(dat3@vargene)
dat4= scDisperse(dat4)
length(dat4@vargene)
dat5= scDisperse(dat5)
length(dat5@vargene)
###################################################################################
### RISC integration ###
###################################################################################
### Integration All
var0 = read.table(file = paste0(PATH, "/var.tsv"), sep = "\t", header = F, stringsAsFactors = F)
var0 = var0$V1
dat.all = list(dat4, dat5, dat3, dat2, dat1)
InPlot(dat.all, var.gene = var0)
dat.all = scMultiIntegrate(dat.all, eigens = 15, var.gene = var0, add.Id = c('Adult1', 'Adult2', 'P4', 'E18', 'E16'), ncore = 4)
dat.all = scUMAP(dat.all, npc = 15, use = 'PLS')
dat.all@coldata$Set = factor(dat.all@coldata$Set, levels = c('E16', 'E18', 'P4', 'Adult1', 'Adult2'))
DimPlot(dat.all, slot = "cell.umap", colFactor = 'Set')
UMAPlot(dat.all, genes = Symbol0$Ensembl[Symbol0$Symbol == 'Wfdc18'])
UMAPlot(dat.all, genes = Symbol0$Ensembl[Symbol0$Symbol == 'Sostdc1'])
## Integration Patial
dat.par = list(dat4, dat5, dat2, dat1)
dat.par = scMultiIntegrate(dat.par, eigens = 20, var.gene = var0, add.Id = c('Adult1', 'Adult2', 'E18', 'E16'), ncore = 4)
dat.par = scUMAP(dat.par, npc = 20, use = 'PLS')
dat.par@coldata$Set = factor(dat.par@coldata$Set, levels = c('E16', 'E18', 'Adult1', 'Adult2'))
DimPlot(dat.par, slot = "cell.umap", colFactor = 'Set')
UMAPlot(dat.par, genes = Symbol0$Ensembl[Symbol0$Symbol == 'Wfdc18'])
UMAPlot(dat.par, genes = Symbol0$Ensembl[Symbol0$Symbol == 'Sostdc1'])
|
/RISC_Supplementary/GSE111113/GSE111113.R
|
no_license
|
phycomlab/RISC
|
R
| false
| false
| 3,696
|
r
|
library(RISC)
library(ggplot2)
library(RColorBrewer)
library(irlba)
library(umap)
PATH = "/Path to the data/GSE111113"
###################################################################################
### RISC raw ###
###################################################################################
mat0 = read.table(file = paste0(PATH, '/GSE111113_Table_S1_FilterNormal10xExpMatrix.txt'), sep = '\t', header = T, stringsAsFactors = F)
mat1 = as.matrix(mat0[,-c(1:3)])
rownames(mat1) = mat0$gene_id
Group = sapply(colnames(mat1), function(x){strsplit(x, "_")[[1]][1]})
Symbol0 = mat0[,c(1, 3)]
colnames(Symbol0) = c('Ensembl', 'Symbol')
###################################################################################
### Uncorrected data ###
###################################################################################
dat0 = readscdata(count = mat1, cell = data.frame(Time = Group, row.names = colnames(mat1)), gene = data.frame(Symbol = rownames(mat1), row.names = rownames(mat1)))
dat0 = scFilter(dat0, min.UMI = 500, max.UMI = Inf, min.gene = 200, min.cell = 5)
cell0 = rownames(dat0@coldata)[dat0@coldata$Time %in% c('E16', 'E18', 'P4', 'Adu1', 'Adu2')]
dat0 = SubSet(dat0, cells = cell0)
dat0 = scNormalize(dat0)
dat0 = scDisperse(dat0)
length(dat0@vargene)
dat0 = scPCA(dat0)
dat0 = scUMAP(dat0)
UMAPlot(dat0, colFactor = 'Time', Colors = brewer.pal(5, 'Spectral'))
###################################################################################
### RISC integration ###
###################################################################################
cell0 = rownames(dat0@coldata)[dat0@coldata$Time == 'E16']
dat1 = SubSet(dat0, cells = cell0)
cell0 = rownames(dat0@coldata)[dat0@coldata$Time == 'E18']
dat2 = SubSet(dat0, cells = cell0)
cell0 = rownames(dat0@coldata)[dat0@coldata$Time == 'P4']
dat3 = SubSet(dat0, cells = cell0)
cell0 = rownames(dat0@coldata)[dat0@coldata$Time == 'Adu1']
dat4 = SubSet(dat0, cells = cell0)
cell0 = rownames(dat0@coldata)[dat0@coldata$Time == 'Adu2']
dat5 = SubSet(dat0, cells = cell0)
dat1 = scDisperse(dat1)
length(dat1@vargene)
dat2= scDisperse(dat2)
length(dat2@vargene)
dat3= scDisperse(dat3)
length(dat3@vargene)
dat4= scDisperse(dat4)
length(dat4@vargene)
dat5= scDisperse(dat5)
length(dat5@vargene)
###################################################################################
### RISC integration ###
###################################################################################
### Integration All
var0 = read.table(file = paste0(PATH, "/var.tsv"), sep = "\t", header = F, stringsAsFactors = F)
var0 = var0$V1
dat.all = list(dat4, dat5, dat3, dat2, dat1)
InPlot(dat.all, var.gene = var0)
dat.all = scMultiIntegrate(dat.all, eigens = 15, var.gene = var0, add.Id = c('Adult1', 'Adult2', 'P4', 'E18', 'E16'), ncore = 4)
dat.all = scUMAP(dat.all, npc = 15, use = 'PLS')
dat.all@coldata$Set = factor(dat.all@coldata$Set, levels = c('E16', 'E18', 'P4', 'Adult1', 'Adult2'))
DimPlot(dat.all, slot = "cell.umap", colFactor = 'Set')
UMAPlot(dat.all, genes = Symbol0$Ensembl[Symbol0$Symbol == 'Wfdc18'])
UMAPlot(dat.all, genes = Symbol0$Ensembl[Symbol0$Symbol == 'Sostdc1'])
## Integration Patial
dat.par = list(dat4, dat5, dat2, dat1)
dat.par = scMultiIntegrate(dat.par, eigens = 20, var.gene = var0, add.Id = c('Adult1', 'Adult2', 'E18', 'E16'), ncore = 4)
dat.par = scUMAP(dat.par, npc = 20, use = 'PLS')
dat.par@coldata$Set = factor(dat.par@coldata$Set, levels = c('E16', 'E18', 'Adult1', 'Adult2'))
DimPlot(dat.par, slot = "cell.umap", colFactor = 'Set')
UMAPlot(dat.par, genes = Symbol0$Ensembl[Symbol0$Symbol == 'Wfdc18'])
UMAPlot(dat.par, genes = Symbol0$Ensembl[Symbol0$Symbol == 'Sostdc1'])
|
#Convert list of names to grep commands. Or iterate through long names and replace with unique name (assign column of unique numbers for each name and replace each name with number reference. then in original data, just merge to assign names to numbers.)
library(rio)
library(tidyverse)
library(stringr)
library(stringi)
scrip <- import("http://scriptures.nephi.org/downloads/lds-scriptures.csv.zip")
savnames <- import("https://byuistats.github.io/M335/data/BoM_SaviorNames.rds")
#filter to bom
scripfilter <- scrip %>%
filter(volume_title == "Book of Mormon")
#mutate unique numbers column
savfilter <- savnames %>%
mutate(unique_id = c(1:111))
#Need to replace all special characters in the scripture_text with " " and then wrap
#each savior name in " " This gets rid of issue where "Many" is replaced with "111y"
scripfilter$scripture_text <- gsub("[^aA-zZ]", "~", scripfilter$scripture_text)
savfilter$name <- gsub(" ", "~", savfilter$name)
savfilter$name <- paste("~", savfilter$name, "~")
#replace names in scriptures with unique numbers !ISSUE! Replaces "Man" in "Manasseh" and alike things
names <- unlist(savfilter$name)
names <- gsub(" ", "", names)
countvar <- 1
for (i in names) {
unique_id <- paste0("!", countvar, "!")
scripfilter$scripture_text <- gsub(i, unique_id, scripfilter$scripture_text)
ben <- str_locate_all(scripfilter$scripture_text, unique_id)
bob$unique_id[[i]] <- ben[1] #count?
countvar <- countvar + 1
}
#replace each "~" with " " and each "~~" with " " (special characters no longer exist)
scripfilter$scripture_text <- gsub("~~", " ", scripfilter$scripture_text)
scripfilter$scripture_text <- gsub("~", " ", scripfilter$scripture_text)
scripfilter$scripture_text <- gsub("(\\s$)", "", scripfilter$scripture_text)
#Here we really go
for (i in seq_along(names)) {
ben <- str_locate_all(scripfilter$scripture_text, "![0-9]+!")
bob[,i] <- ben
}
|
/Data_Wrangling_Viz/Case_Study_06/analysis/Scratch_Paper.R
|
no_license
|
McKayMDavis/Portfolio
|
R
| false
| false
| 1,946
|
r
|
#Convert list of names to grep commands. Or iterate through long names and replace with unique name (assign column of unique numbers for each name and replace each name with number reference. then in original data, just merge to assign names to numbers.)
library(rio)
library(tidyverse)
library(stringr)
library(stringi)
scrip <- import("http://scriptures.nephi.org/downloads/lds-scriptures.csv.zip")
savnames <- import("https://byuistats.github.io/M335/data/BoM_SaviorNames.rds")
#filter to bom
scripfilter <- scrip %>%
filter(volume_title == "Book of Mormon")
#mutate unique numbers column
savfilter <- savnames %>%
mutate(unique_id = c(1:111))
#Need to replace all special characters in the scripture_text with " " and then wrap
#each savior name in " " This gets rid of issue where "Many" is replaced with "111y"
scripfilter$scripture_text <- gsub("[^aA-zZ]", "~", scripfilter$scripture_text)
savfilter$name <- gsub(" ", "~", savfilter$name)
savfilter$name <- paste("~", savfilter$name, "~")
#replace names in scriptures with unique numbers !ISSUE! Replaces "Man" in "Manasseh" and alike things
names <- unlist(savfilter$name)
names <- gsub(" ", "", names)
countvar <- 1
for (i in names) {
unique_id <- paste0("!", countvar, "!")
scripfilter$scripture_text <- gsub(i, unique_id, scripfilter$scripture_text)
ben <- str_locate_all(scripfilter$scripture_text, unique_id)
bob$unique_id[[i]] <- ben[1] #count?
countvar <- countvar + 1
}
#replace each "~" with " " and each "~~" with " " (special characters no longer exist)
scripfilter$scripture_text <- gsub("~~", " ", scripfilter$scripture_text)
scripfilter$scripture_text <- gsub("~", " ", scripfilter$scripture_text)
scripfilter$scripture_text <- gsub("(\\s$)", "", scripfilter$scripture_text)
#Here we really go
for (i in seq_along(names)) {
ben <- str_locate_all(scripfilter$scripture_text, "![0-9]+!")
bob[,i] <- ben
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/optimpute.R
\name{rand_imputation_learner}
\alias{rand_imputation_learner}
\title{Learner for conducting random imputation}
\usage{
rand_imputation_learner(...)
}
\arguments{
\item{...}{Use keyword arguments to set parameters on the resulting learner.
Refer to the Julia documentation for available parameters.}
}
\description{
Julia Equivalent:
\href{https://docs.interpretable.ai/v3.1.1/OptImpute/reference/#IAI.RandImputationLearner}{\code{IAI.RandImputationLearner}}
}
\examples{
\dontrun{lnr <- iai::rand_imputation_learner()}
}
|
/man/rand_imputation_learner.Rd
|
no_license
|
cran/iai
|
R
| false
| true
| 613
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/optimpute.R
\name{rand_imputation_learner}
\alias{rand_imputation_learner}
\title{Learner for conducting random imputation}
\usage{
rand_imputation_learner(...)
}
\arguments{
\item{...}{Use keyword arguments to set parameters on the resulting learner.
Refer to the Julia documentation for available parameters.}
}
\description{
Julia Equivalent:
\href{https://docs.interpretable.ai/v3.1.1/OptImpute/reference/#IAI.RandImputationLearner}{\code{IAI.RandImputationLearner}}
}
\examples{
\dontrun{lnr <- iai::rand_imputation_learner()}
}
|
args <- commandArgs(trailingOnly = TRUE)
library(VennDiagram)
locfile <- read.table(file = args[1], header = FALSE)
names(locfile) <- c('sig', 'caller')
allcaller <- unique(locfile[[2]])
loclist <- as.list(vector(length = length(allcaller)))
for (n in 1:length(allcaller)) {
loclist[n] <- locfile[locfile$caller == allcaller[n],]
}
names(loclist) <- allcaller
venn.diagram(loclist, height = 5000, width = 5200, resolution = 500, imagetype = "png", filename = paste(args[2], "_snps_venn.png"))
|
/opt/RNASeqAnalysis/3_VarCalling/exec/VennPlot.R
|
no_license
|
YU-Zhejian/DST2_Group
|
R
| false
| false
| 495
|
r
|
args <- commandArgs(trailingOnly = TRUE)
library(VennDiagram)
locfile <- read.table(file = args[1], header = FALSE)
names(locfile) <- c('sig', 'caller')
allcaller <- unique(locfile[[2]])
loclist <- as.list(vector(length = length(allcaller)))
for (n in 1:length(allcaller)) {
loclist[n] <- locfile[locfile$caller == allcaller[n],]
}
names(loclist) <- allcaller
venn.diagram(loclist, height = 5000, width = 5200, resolution = 500, imagetype = "png", filename = paste(args[2], "_snps_venn.png"))
|
dat = read.csv("C:/Dev/DataScience/R_doc/Bayes/mixture.csv", header=FALSE)
y = dat$V1
(n = length(y))
library("rjags")
mod_string = " model {
for (i in 1:length(y)) {
y[i] ~ dnorm(mu[z[i]], prec)
z[i] ~ dcat(omega)
}
mu[1] ~ dnorm(-1.0, 1.0/100.0)
mu[2] ~ dnorm(1.0, 1.0/100.0) T(mu[1],) # ensures mu[1] < mu[2]
prec ~ dgamma(1.0/2.0, 1.0*1.0/2.0)
sig = sqrt(1.0/prec)
omega ~ ddirich(c(1.0, 1.0))
} "
set.seed(11)
data_jags = list(y=y)
params = c("mu", "sig", "omega", "z[1]", "z[31]", "z[49]", "z[6]") # Select some z's to monitor
mod = jags.model(textConnection(mod_string), data=data_jags, n.chains=3)
update(mod, 1e3)
mod_sim = coda.samples(model=mod,
variable.names=params,
n.iter=5e3)
mod_csim = as.mcmc(do.call(rbind, mod_sim))
## convergence diagnostics
plot(mod_sim, ask=TRUE)
autocorr.diag(mod_sim)
effectiveSize(mod_sim)
## for the population parameters and the mixing weights
par(mfrow=c(3,2))
densplot(mod_csim[,c("mu[1]", "mu[2]", "omega[1]", "omega[2]", "sig")])
## for the z's
par(mfrow=c(2,2))
densplot(mod_csim[,c("z[1]", "z[31]", "z[49]", "z[6]")])
|
/R/RDSProject/R/mixture_model.R
|
no_license
|
sadiagit/DataScience-R
|
R
| false
| false
| 1,170
|
r
|
dat = read.csv("C:/Dev/DataScience/R_doc/Bayes/mixture.csv", header=FALSE)
y = dat$V1
(n = length(y))
library("rjags")
mod_string = " model {
for (i in 1:length(y)) {
y[i] ~ dnorm(mu[z[i]], prec)
z[i] ~ dcat(omega)
}
mu[1] ~ dnorm(-1.0, 1.0/100.0)
mu[2] ~ dnorm(1.0, 1.0/100.0) T(mu[1],) # ensures mu[1] < mu[2]
prec ~ dgamma(1.0/2.0, 1.0*1.0/2.0)
sig = sqrt(1.0/prec)
omega ~ ddirich(c(1.0, 1.0))
} "
set.seed(11)
data_jags = list(y=y)
params = c("mu", "sig", "omega", "z[1]", "z[31]", "z[49]", "z[6]") # Select some z's to monitor
mod = jags.model(textConnection(mod_string), data=data_jags, n.chains=3)
update(mod, 1e3)
mod_sim = coda.samples(model=mod,
variable.names=params,
n.iter=5e3)
mod_csim = as.mcmc(do.call(rbind, mod_sim))
## convergence diagnostics
plot(mod_sim, ask=TRUE)
autocorr.diag(mod_sim)
effectiveSize(mod_sim)
## for the population parameters and the mixing weights
par(mfrow=c(3,2))
densplot(mod_csim[,c("mu[1]", "mu[2]", "omega[1]", "omega[2]", "sig")])
## for the z's
par(mfrow=c(2,2))
densplot(mod_csim[,c("z[1]", "z[31]", "z[49]", "z[6]")])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{compute_force}
\alias{compute_force}
\title{Compute the force upon point \code{a} from point \code{b}.}
\usage{
compute_force(a, b, force = 1e-06)
}
\arguments{
\item{a}{A point like \code{c(x, y)}}
\item{b}{A point like \code{c(x, y)}}
\item{force}{Magnitude of the force (defaults to \code{1e-6})}
}
\description{
Compute the force upon point \code{a} from point \code{b}.
}
|
/man/compute_force.Rd
|
no_license
|
Gofer51/ggrepel
|
R
| false
| true
| 478
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{compute_force}
\alias{compute_force}
\title{Compute the force upon point \code{a} from point \code{b}.}
\usage{
compute_force(a, b, force = 1e-06)
}
\arguments{
\item{a}{A point like \code{c(x, y)}}
\item{b}{A point like \code{c(x, y)}}
\item{force}{Magnitude of the force (defaults to \code{1e-6})}
}
\description{
Compute the force upon point \code{a} from point \code{b}.
}
|
################################################################################
## Fitting, plotting, summarizing staggered synth
################################################################################
#' Fit staggered synth
#' @param form outcome ~ treatment
#' @param unit Name of unit column
#' @param time Name of time column
#' @param data Panel data as dataframe
#' @param n_leads How long past treatment effects should be estimated for, default is number of post treatment periods for last treated unit
#' @param n_lags Number of pre-treatment periods to balance, default is to balance all periods
#' @param nu Fraction of balance for individual balance
#' @param lambda Regularization hyperparameter, default = 0
#' @param fixedeff Whether to include a unit fixed effect, default F
#' @param n_factors Number of factors for interactive fixed effects, setting to NULL fits with CV, default is 0
#' @param scm Whether to fit scm weights
#' @param time_cohort Whether to average synthetic controls into time cohorts
#' @param eps_abs Absolute error tolerance for osqp
#' @param eps_rel Relative error tolerance for osqp
#' @param verbose Whether to print logs for osqp
#' @param ... Extra arguments
#'
#' @return multisynth object that contains:
#' \itemize{
#' \item{"weights"}{weights matrix where each column is a set of weights for a treated unit}
#' \item{"data"}{Panel data as matrices}
#' \item{"imbalance"}{Matrix of treatment minus synthetic control for pre-treatment time periods, each column corresponds to a treated unit}
#' \item{"global_l2"}{L2 imbalance for the pooled synthetic control}
#' \item{"scaled_global_l2"}{L2 imbalance for the pooled synthetic control, scaled by the imbalance for unitform weights}
#' \item{"ind_l2"}{Average L2 imbalance for the individual synthetic controls}
#' \item{"scaled_ind_l2"}{Average L2 imbalance for the individual synthetic controls, scaled by the imbalance for unitform weights}
#' \item{"n_leads", "n_lags"}{Number of post treatment outcomes (leads) and pre-treatment outcomes (lags) to include in the analysis}
#' \item{"nu"}{Fraction of balance for individual balance}
#' \item{"lambda"}{Regularization hyperparameter}
#' \item{"scm"}{Whether to fit scm weights}
#' \item{"grps"}{Time periods for treated units}
#' \item{"y0hat"}{Pilot estimates of control outcomes}
#' \item{"residuals"}{Difference between the observed outcomes and the pilot estimates}
#' \item{"n_factors"}{Number of factors for interactive fixed effects}
#' }
#' @export
multisynth <- function(form, unit, time, data,
n_leads=NULL, n_lags=NULL,
nu=NULL, lambda=0,
fixedeff = FALSE,
n_factors=0,
scm=T,
time_cohort = F,
eps_abs = 1e-4,
eps_rel = 1e-4,
verbose = FALSE, ...) {
call_name <- match.call()
form <- Formula::Formula(form)
unit <- enquo(unit)
time <- enquo(time)
## format data
outcome <- terms(formula(form, rhs=1))[[2]]
trt <- terms(formula(form, rhs=1))[[3]]
wide <- format_data_stag(outcome, trt, unit, time, data)
force <- if(fixedeff) 3 else 2
# if n_leads is NULL set it to be the largest possible number of leads
# for the last treated unit
if(is.null(n_leads)) {
n_leads <- ncol(wide$y)
} else if(n_leads > max(apply(1-wide$mask, 1, sum)) + ncol(wide$y)) {
n_leads <- max(apply(1-wide$mask, 1, sum)) + ncol(wide$y)
}
## if n_lags is NULL set it to the largest number of pre-treatment periods
if(is.null(n_lags)) {
n_lags <- ncol(wide$X)
} else if(n_lags > ncol(wide$X)) {
n_lags <- ncol(wide$X)
}
long_df <- data[c(quo_name(unit), quo_name(time), as.character(trt), as.character(outcome))]
msynth <- multisynth_formatted(wide = wide, relative = T,
n_leads = n_leads, n_lags = n_lags,
nu = nu, lambda = lambda,
force = force, n_factors = n_factors,
scm = scm, time_cohort = time_cohort,
time_w = F, lambda_t = 0,
fit_resids = TRUE, eps_abs = eps_abs,
eps_rel = eps_rel, verbose = verbose, long_df = long_df, ...)
units <- data %>% arrange(!!unit) %>% distinct(!!unit) %>% pull(!!unit)
rownames(msynth$weights) <- units
if(scm) {
## Get imbalance for uniform weights on raw data
## TODO: Get rid of this stupid hack of just fitting the weights again with big lambda
unif <- multisynth_qp(X=wide$X, ## X=residuals[,1:ncol(wide$X)],
trt=wide$trt,
mask=wide$mask,
n_leads=n_leads,
n_lags=n_lags,
relative=T,
nu=0, lambda=1e10,
time_cohort = time_cohort,
eps_rel = eps_rel,
eps_abs = eps_abs,
verbose = verbose)
## scaled global balance
## msynth$scaled_global_l2 <- msynth$global_l2 / sqrt(sum(unif$imbalance[,1]^2))
msynth$scaled_global_l2 <- msynth$global_l2 / unif$global_l2
## balance for individual estimates
## msynth$scaled_ind_l2 <- msynth$ind_l2 / sqrt(sum(unif$imbalance[,-1]^2))
msynth$scaled_ind_l2 <- msynth$ind_l2 / unif$ind_l2
}
msynth$call <- call_name
return(msynth)
}
#' Internal funciton to fit staggered synth with formatted data
#' @param wide List containing data elements
#' @param relative Whether to compute balance by relative time
#' @param n_leads How long past treatment effects should be estimated for
#' @param n_lags Number of pre-treatment periods to balance, default is to balance all periods
#' @param nu Fraction of balance for individual balance
#' @param lambda Regularization hyperparameter, default = 0
#' @param force c(0,1,2,3) what type of fixed effects to include
#' @param n_factors Number of factors for interactive fixed effects, default does CV
#' @param scm Whether to fit scm weights
#' @param time_cohort Whether to average synthetic controls into time cohorts
#' @param time_w Whether to fit time weights
#' @param lambda_t Regularization for time regression
#' @param fit_resids Whether to fit SCM on the residuals or not
#' @param eps_abs Absolute error tolerance for osqp
#' @param eps_rel Relative error tolerance for osqp
#' @param verbose Whether to print logs for osqp
#' @param long_df A long dataframe with 4 columns in the order unit, time, trt, outcome
#' @param ... Extra arguments
#' @noRd
#' @return multisynth object
multisynth_formatted <- function(wide, relative=T, n_leads, n_lags,
nu, lambda,
force,
n_factors,
scm, time_cohort,
time_w, lambda_t,
fit_resids,
eps_abs, eps_rel,
verbose, long_df, ...) {
## average together treatment groups
## grps <- unique(wide$trt) %>% sort()
if(time_cohort) {
grps <- unique(wide$trt[is.finite(wide$trt)])
} else {
grps <- wide$trt[is.finite(wide$trt)]
}
J <- length(grps)
## fit outcome models
if(time_w) {
# Autoregressive model
out <- fit_time_reg(cbind(wide$X, wide$y), wide$trt,
n_leads, lambda_t, ...)
y0hat <- out$y0hat
residuals <- out$residuals
params <- out$time_weights
} else if(is.null(n_factors)) {
out <- tryCatch({
fit_gsynth_multi(long_df, cbind(wide$X, wide$y), wide$trt, force=force)
}, error = function(error_condition) {
stop("Cannot run CV because there are too few pre-treatment periods.")
})
y0hat <- out$y0hat
params <- out$params
n_factors <- ncol(params$factor)
## get residuals from outcome model
residuals <- cbind(wide$X, wide$y) - y0hat
} else if (n_factors != 0) {
## if number of factors is provided don't do CV
out <- fit_gsynth_multi(long_df, cbind(wide$X, wide$y), wide$trt,
r=n_factors, CV=0, force=force)
y0hat <- out$y0hat
params <- out$params
## get residuals from outcome model
residuals <- cbind(wide$X, wide$y) - y0hat
} else if(force == 0 & n_factors == 0) {
# if no fixed effects or factors, just take out
# control averages at each time point
# time fixed effects from pure controls
pure_ctrl <- cbind(wide$X, wide$y)[!is.finite(wide$trt), , drop = F]
y0hat <- matrix(colMeans(pure_ctrl),
nrow = nrow(wide$X), ncol = ncol(pure_ctrl),
byrow = T)
residuals <- cbind(wide$X, wide$y) - y0hat
params <- NULL
} else {
## take out pre-treatment averages
fullmask <- cbind(wide$mask, matrix(0, nrow=nrow(wide$mask),
ncol=ncol(wide$y)))
out <- fit_feff(cbind(wide$X, wide$y), wide$trt, fullmask, force)
y0hat <- out$y0hat
residuals <- out$residuals
params <- NULL
}
## balance the residuals
if(fit_resids) {
if(time_w) {
# fit scm on residuals after taking out unit fixed effects
fullmask <- cbind(wide$mask, matrix(0, nrow=nrow(wide$mask),
ncol=ncol(wide$y)))
out <- fit_feff(cbind(wide$X, wide$y), wide$trt, fullmask, force)
bal_mat <- lapply(out$residuals, function(x) x[,1:ncol(wide$X)])
} else if(typeof(residuals) == "list") {
bal_mat <- lapply(residuals, function(x) x[,1:ncol(wide$X)])
} else {
bal_mat <- residuals[,1:ncol(wide$X)]
}
} else {
# if not balancing residuals, then take out control averages
# for each time
ctrl_avg <- matrix(colMeans(wide$X[!is.finite(wide$trt), , drop = F]),
nrow = nrow(wide$X), ncol = ncol(wide$X), byrow = T)
bal_mat <- wide$X - ctrl_avg
bal_mat <- wide$X
}
if(scm) {
## if no nu value is provided, use default based on
## global and individual imbalance for no-pooling estimator
if(is.null(nu)) {
## fit with nu = 0
nu_fit <- multisynth_qp(X=bal_mat,
trt=wide$trt,
mask=wide$mask,
n_leads=n_leads,
n_lags=n_lags,
relative=relative,
nu=0, lambda=lambda,
time_cohort = time_cohort,
eps_rel = eps_rel,
eps_abs = eps_abs,
verbose = verbose)
## select nu by triangle inequality ratio
glbl <- sqrt(sum(nu_fit$imbalance[,1]^2))
ind <- sum(apply(nu_fit$imbalance[,-1, drop = F], 2, function(x) sqrt(sum(x^2))))
nu <- glbl / ind
}
msynth <- multisynth_qp(X=bal_mat,
trt=wide$trt,
mask=wide$mask,
n_leads=n_leads,
n_lags=n_lags,
relative=relative,
nu=nu, lambda=lambda,
time_cohort = time_cohort,
eps_rel = eps_rel,
eps_abs = eps_abs,
verbose = verbose)
} else {
msynth <- list(weights = matrix(0, nrow = nrow(wide$X), ncol = J),
imbalance=NA,
global_l2=NA,
ind_l2=NA)
}
## put in data and hyperparams
msynth$data <- wide
msynth$relative <- relative
msynth$n_leads <- n_leads
msynth$n_lags <- n_lags
msynth$nu <- nu
msynth$lambda <- lambda
msynth$scm <- scm
msynth$time_cohort <- time_cohort
msynth$grps <- grps
msynth$y0hat <- y0hat
msynth$residuals <- residuals
msynth$n_factors <- n_factors
msynth$force <- force
## outcome model parameters
msynth$params <- params
# more arguments
msynth$scm <- scm
msynth$time_w <- time_w
msynth$lambda_t <- lambda_t
msynth$fit_resids <- fit_resids
msynth$extra_pars <- c(list(eps_abs = eps_abs,
eps_rel = eps_rel,
verbose = verbose),
list(...))
msynth$long_df <- long_df
##format output
class(msynth) <- "multisynth"
return(msynth)
}
#' Get prediction of average outcome under control or ATT
#' @param object Fit multisynth object
#' @param ... Optional arguments
#'
#' @return Matrix of predicted post-treatment control outcomes for each treated unit
#' @export
predict.multisynth <- function(object, ...) {
if ("relative" %in% names(list(...))) {
relative <- list(...)$relative
} else {
relative <- NULL
}
if ("att" %in% names(list(...))) {
att <- list(...)$att
} else {
att <- F
}
multisynth <- object
time_cohort <- multisynth$time_cohort
if(is.null(relative)) {
relative <- multisynth$relative
}
n_leads <- multisynth$n_leads
d <- ncol(multisynth$data$X)
n <- nrow(multisynth$data$X)
fulldat <- cbind(multisynth$data$X, multisynth$data$y)
ttot <- ncol(fulldat)
grps <- multisynth$grps
J <- length(grps)
if(time_cohort) {
which_t <- lapply(grps,
function(tj) (1:n)[multisynth$data$trt == tj])
mask <- unique(multisynth$data$mask)
} else {
which_t <- (1:n)[is.finite(multisynth$data$trt)]
mask <- multisynth$data$mask
}
n1 <- sapply(1:J, function(j) length(which_t[[j]]))
fullmask <- cbind(mask, matrix(0, nrow = J, ncol = (ttot - d)))
## estimate the post-treatment values to get att estimates
mu1hat <- vapply(1:J,
function(j) colMeans(fulldat[which_t[[j]],
, drop=FALSE]),
numeric(ttot))
## get average outcome model estimates and reweight residuals
if(typeof(multisynth$y0hat) == "list") {
mu0hat <- vapply(1:J,
function(j) {
y0hat <- colMeans(multisynth$y0hat[[j]][which_t[[j]],
, drop=FALSE])
if(!all(multisynth$weights == 0)) {
y0hat + t(multisynth$residuals[[j]]) %*%
multisynth$weights[,j] /
sum(multisynth$weights[,j])
} else {
y0hat
}
}
, numeric(ttot)
)
} else {
mu0hat <- vapply(1:J,
function(j) {
y0hat <- colMeans(multisynth$y0hat[which_t[[j]],
, drop=FALSE])
if(!all(multisynth$weights == 0)) {
y0hat + t(multisynth$residuals) %*%
multisynth$weights[,j] /
sum(multisynth$weights[,j])
} else {
y0hat
}
}
, numeric(ttot)
)
}
tauhat <- mu1hat - mu0hat
## re-index time if relative to treatment
if(relative) {
total_len <- min(d + n_leads, ttot + d - min(grps)) ## total length of predictions
mu0hat <- vapply(1:J,
function(j) {
vec <- c(rep(NA, d-grps[j]),
mu0hat[1:grps[j],j],
mu0hat[(grps[j]+1):(min(grps[j] + n_leads, ttot)), j])
## last row is post-treatment average
c(vec,
rep(NA, total_len - length(vec)),
mean(mu0hat[(grps[j]+1):(min(grps[j] + n_leads, ttot)), j]))
},
numeric(total_len +1
))
tauhat <- vapply(1:J,
function(j) {
vec <- c(rep(NA, d-grps[j]),
tauhat[1:grps[j],j],
tauhat[(grps[j]+1):(min(grps[j] + n_leads, ttot)), j])
## last row is post-treatment average
c(vec,
rep(NA, total_len - length(vec)),
mean(tauhat[(grps[j]+1):(min(grps[j] + n_leads, ttot)), j]))
},
numeric(total_len +1
))
## get the overall average estimate
avg <- apply(mu0hat, 1, function(z) sum(n1 * z, na.rm=T) / sum(n1 * !is.na(z)))
mu0hat <- cbind(avg, mu0hat)
avg <- apply(tauhat, 1, function(z) sum(n1 * z, na.rm=T) / sum(n1 * !is.na(z)))
tauhat <- cbind(avg, tauhat)
} else {
## remove all estimates for t > T_j + n_leads
vapply(1:J,
function(j) c(mu0hat[1:min(grps[j]+n_leads, ttot),j],
rep(NA, max(0, ttot-(grps[j] + n_leads)))),
numeric(ttot)) -> mu0hat
vapply(1:J,
function(j) c(tauhat[1:min(grps[j]+n_leads, ttot),j],
rep(NA, max(0, ttot-(grps[j] + n_leads)))),
numeric(ttot)) -> tauhat
## only average currently treated units
avg1 <- rowSums(t(fullmask) * mu0hat * n1) /
rowSums(t(fullmask) * n1)
avg2 <- rowSums(t(1-fullmask) * mu0hat * n1) /
rowSums(t(1-fullmask) * n1)
avg <- replace_na(avg1, 0) * apply(fullmask, 2, min) +
replace_na(avg2,0) * apply(1-fullmask, 2, max)
cbind(avg, mu0hat) -> mu0hat
## only average currently treated units
avg1 <- rowSums(t(fullmask) * tauhat * n1) /
rowSums(t(fullmask) * n1)
avg2 <- rowSums(t(1-fullmask) * tauhat * n1) /
rowSums(t(1-fullmask) * n1)
avg <- replace_na(avg1, 0) * apply(fullmask, 2, min) +
replace_na(avg2,0) * apply(1 - fullmask, 2, max)
cbind(avg, tauhat) -> tauhat
}
if(att) {
return(tauhat)
} else {
return(mu0hat)
}
}
#' Print function for multisynth
#' @param x multisynth object
#' @param ... Optional arguments
#' @export
print.multisynth <- function(x, ...) {
multisynth <- x
## straight from lm
cat("\nCall:\n", paste(deparse(multisynth$call),
sep="\n", collapse="\n"), "\n\n", sep="")
# print att estimates
att_post <- predict(multisynth, att=T)[,1]
att_post <- att_post[length(att_post)]
cat(paste("Average ATT Estimate: ",
format(round(mean(att_post),3), nsmall = 3), "\n\n", sep=""))
}
#' Plot function for multisynth
#' @importFrom graphics plot
#' @param x Augsynth object to be plotted
#' @param ... Optional arguments
#' @export
plot.multisynth <- function(x, ...) {
if ("se" %in% names(list(...))) {
se <- list(...)$se
} else {
se <- T
}
if ("levels" %in% names(list(...))) {
levels <- list(...)$levels
} else {
levels <- NULL
}
if ("jackknife" %in% names(list(...))) {
jackknife <- list(...)$jackknife
} else {
jackknife <- T
}
multisynth <- x
plot(summary(multisynth, jackknife=jackknife), levels, se)
}
compute_se <- function(multisynth, relative=NULL) {
## get info from the multisynth object
if(is.null(relative)) {
relative <- multisynth$relative
}
n_leads <- multisynth$n_leads
d <- ncol(multisynth$data$X)
fulldat <- cbind(multisynth$data$X, multisynth$data$y)
ttot <- ncol(fulldat)
J <- length(multisynth$grps)
n1 <- multisynth$data$trt[is.finite(multisynth$data$trt)] %>%
table() %>% as.numeric()
grps <- multisynth$grps
fullmask <- cbind(multisynth$data$mask, matrix(0, nrow=J, ncol=(ttot-d)))
## use weighted control residuals to estimate variance for treated units
if(typeof(multisynth$residuals) == "list") {
trt_var <- vapply(1:J,
function(j) {
colSums(multisynth$residuals[[j]]^2 * multisynth$weights[,j]) / n1[j]
},
numeric(ttot))
## standard error estimate of imputed counterfactual mean
## from control residuals and weights
ctrl_var <- vapply(1:J,
function(j) colSums(multisynth$residuals[[j]]^2 * multisynth$weights[,j]^2),
numeric(ttot))
} else {
trt_var <- vapply(1:J,
function(j) {
colSums(multisynth$residuals^2 * multisynth$weights[,j]) / n1[j]
},
numeric(ttot))
## standard error estimate of imputed counterfactual mean
## from control residuals and weights
ctrl_var <- vapply(1:J,
function(j) colSums(multisynth$residuals^2 * multisynth$weights[,j]^2),
numeric(ttot))
}
## standard error
se <- sqrt(trt_var + ctrl_var)
## re-index time if relative to treatment
if(relative) {
total_len <- min(d + n_leads, ttot + d - min(grps)) ## total length of predictions
se <- vapply(1:J,
function(j) {
vec <- c(rep(NA, d-grps[j]),
se[1:grps[j],j],
se[(grps[j]+1):(min(grps[j] + n_leads, ttot)), j])
c(vec, rep(NA, total_len - length(vec)))
},
numeric(total_len))
## get the overall standard error estimate
avg_se <- apply(se, 1, function(z) sqrt(sum(n1^2 * z^2, na.rm=T)) / sum(n1 * !is.na(z)))
se <- cbind(avg_se, se)
} else {
## remove all estimates for t > T_j + n_leads
vapply(1:J,
function(j) c(se[1:min(grps[j]+n_leads, ttot),j],
rep(NA, max(0, ttot-(grps[j] + n_leads)))),
numeric(ttot)) -> tauhat
## only average currently treated units
avg1 <- sqrt(rowSums(t(fullmask) * se^2 * n1^2)) /
rowSums(t(fullmask) * n1)
avg2 <- sqrt(rowSums(t(1-fullmask) * se^2 * n1^2)) /
rowSums(t(1-fullmask) * n1)
avg_se <- replace_na(avg1, 0) * apply(fullmask, 2, min) +
replace_na(avg2,0) * apply(1-fullmask, 2, max)
se <- cbind(avg_se, se)
}
return(se)
}
#' Summary function for multisynth
#' @param object multisynth object
#' @param ... Optional arguments
#'
#' @return summary.multisynth object that contains:
#' \itemize{
#' \item{"att"}{Dataframe with ATT estimates, standard errors for each treated unit}
#' \item{"global_l2"}{L2 imbalance for the pooled synthetic control}
#' \item{"scaled_global_l2"}{L2 imbalance for the pooled synthetic control, scaled by the imbalance for unitform weights}
#' \item{"ind_l2"}{Average L2 imbalance for the individual synthetic controls}
#' \item{"scaled_ind_l2"}{Average L2 imbalance for the individual synthetic controls, scaled by the imbalance for unitform weights}
#' \item{"n_leads", "n_lags"}{Number of post treatment outcomes (leads) and pre-treatment outcomes (lags) to include in the analysis}
#' }
#' @export
summary.multisynth <- function(object, ...) {
if ("jackknife" %in% names(list(...))) {
jackknife <- list(...)$jackknife
} else {
jackknife <- T
}
multisynth <- object
relative <- T
n_leads <- multisynth$n_leads
d <- ncol(multisynth$data$X)
n <- nrow(multisynth$data$X)
ttot <- d + ncol(multisynth$data$y)
trt <- multisynth$data$trt
time_cohort <- multisynth$time_cohort
if(time_cohort) {
grps <- unique(trt[is.finite(trt)])
which_t <- lapply(grps, function(tj) (1:n)[trt == tj])
} else {
grps <- trt[is.finite(trt)]
which_t <- (1:n)[is.finite(trt)]
}
# grps <- unique(multisynth$data$trt) %>% sort()
J <- length(grps)
# which_t <- (1:n)[is.finite(multisynth$data$trt)]
times <- multisynth$data$time
summ <- list()
## post treatment estimate for each group and overall
att <- predict(multisynth, relative, att=T)
if(jackknife) {
se <- jackknife_se_multi(multisynth, relative)
} else {
# se <- compute_se(multisynth, relative)
se <- matrix(NA, nrow(att), ncol(att))
}
if(relative) {
att <- data.frame(cbind(c(-(d-1):min(n_leads, ttot-min(grps)), NA),
att))
if(time_cohort) {
col_names <- c("Time", "Average",
as.character(times[grps + 1]))
} else {
col_names <- c("Time", "Average",
as.character(multisynth$data$units[which_t]))
}
names(att) <- col_names
att %>% gather(Level, Estimate, -Time) %>%
rename("Time"=Time) %>%
mutate(Time=Time-1) -> att
se <- data.frame(cbind(c(-(d-1):min(n_leads, ttot-min(grps)), NA),
se))
names(se) <- col_names
se %>% gather(Level, Std.Error, -Time) %>%
rename("Time"=Time) %>%
mutate(Time=Time-1)-> se
} else {
att <- data.frame(cbind(times, att))
names(att) <- c("Time", "Average", times[grps[1:J]])
att %>% gather(Level, Estimate, -Time) -> att
se <- data.frame(cbind(times, se))
names(se) <- c("Time", "Average", times[grps[1:J]])
se %>% gather(Level, Std.Error, -Time) -> se
}
summ$att <- inner_join(att, se, by = c("Time", "Level"))
summ$relative <- relative
summ$grps <- grps
summ$call <- multisynth$call
summ$global_l2 <- multisynth$global_l2
summ$scaled_global_l2 <- multisynth$scaled_global_l2
summ$ind_l2 <- multisynth$ind_l2
summ$scaled_ind_l2 <- multisynth$scaled_ind_l2
summ$n_leads <- multisynth$n_leads
summ$n_lags <- multisynth$n_lags
class(summ) <- "summary.multisynth"
return(summ)
}
#' Print function for summary function for multisynth
#' @param x summary object
#' @param ... Optional arguments
#' @export
print.summary.multisynth <- function(x, ...) {
if ("level" %in% names(list(...))) {
level <- list(...)$level
} else {
level <- "Average"
}
summ <- x
## straight from lm
cat("\nCall:\n", paste(deparse(summ$call), sep="\n", collapse="\n"), "\n\n", sep="")
first_lvl <- summ$att %>% filter(Level != "Average") %>% pull(Level) %>% min()
## get ATT estimates for treatment level, post treatment
if(summ$relative) {
summ$att %>%
filter(Time >= 0, Level==level) %>%
rename("Time Since Treatment"=Time) -> att_est
} else if(level == "Average") {
summ$att %>% filter(Time > first_lvl, Level=="Average") -> att_est
} else {
summ$att %>% filter(Time > level, Level==level) -> att_est
}
cat(paste("Average ATT Estimate (Std. Error): ",
summ$att %>%
filter(Level == level, is.na(Time)) %>%
pull(Estimate) %>%
round(3) %>% format(nsmall=3),
" (",
summ$att %>%
filter(Level == level, is.na(Time)) %>%
pull(Std.Error) %>%
round(3) %>% format(nsmall=3),
")\n\n", sep=""))
cat(paste("Global L2 Imbalance: ",
format(round(summ$global_l2,3), nsmall=3), "\n",
"Scaled Global L2 Imbalance: ",
format(round(summ$scaled_global_l2,3), nsmall=3), "\n",
"Percent improvement from uniform global weights: ",
format(round(1-summ$scaled_global_l2,3)*100), "\n\n",
"Individual L2 Imbalance: ",
format(round(summ$ind_l2,3), nsmall=3), "\n",
"Scaled Individual L2 Imbalance: ",
format(round(summ$scaled_ind_l2,3), nsmall=3), "\n",
"Percent improvement from uniform individual weights: ",
format(round(1-summ$scaled_ind_l2,3)*100), "\t",
"\n\n",
sep=""))
print(att_est, row.names=F)
}
#' Plot function for summary function for multisynth
#' @importFrom ggplot2 aes
#'
#' @param x summary object
#' @param ... Optional arguments
#' @export
plot.summary.multisynth <- function(x, ...) {
if ("se" %in% names(list(...))) {
se <- list(...)$se
} else {
se <- T
}
if ("levels" %in% names(list(...))) {
levels <- list(...)$levels
} else {
levels <- NULL
}
summ <- x
## get the last time period for each level
summ$att %>%
filter(!is.na(Estimate),
Time >= -summ$n_lags,
Time <= summ$n_leads) %>%
group_by(Level) %>%
summarise(last_time=max(Time)) -> last_times
if(is.null(levels)) levels <- unique(summ$att$Level)
summ$att %>% inner_join(last_times) %>%
filter(Level %in% levels) %>%
mutate(label=ifelse(Time == last_time, Level, NA),
is_avg = ifelse(("Average" %in% levels) * (Level == "Average"),
"A", "B")) %>%
ggplot2::ggplot(ggplot2::aes(x=Time, y=Estimate,
group=Level,
color=is_avg,
alpha=is_avg)) +
ggplot2::geom_line(size=1) +
ggplot2::geom_point(size=1) +
ggrepel::geom_label_repel(ggplot2::aes(label=label),
nudge_x=1, na.rm=T) +
ggplot2::geom_hline(yintercept=0, lty=2) -> p
if(summ$relative) {
p <- p + ggplot2::geom_vline(xintercept=0, lty=2) +
ggplot2::xlab("Time Relative to Treatment")
} else {
p <- p + ggplot2::geom_vline(aes(xintercept=as.numeric(Level)),
lty=2, alpha=0.5,
summ$att %>% filter(Level != "Average"))
}
## add ses
if(se) {
max_time <- max(summ$att$Time, na.rm = T)
if(max_time == 0) {
error_plt <- ggplot2::geom_errorbar
clr <- "black"
alph <- 1
} else {
error_plt <- ggplot2::geom_ribbon
clr <- NA
alph <- 0.2
}
if("Average" %in% levels) {
p <- p + error_plt(
ggplot2::aes(ymin=Estimate-2*Std.Error,
ymax=Estimate+2*Std.Error),
alpha = alph, color=clr,
data = summ$att %>%
filter(Level == "Average",
Time >= 0))
} else {
p <- p + error_plt(
ggplot2::aes(ymin=Estimate-2*Std.Error,
ymax=Estimate+2*Std.Error),
data = . %>% filter(Time >= 0),
alpha = alph, color = clr)
}
}
p <- p + ggplot2::scale_alpha_manual(values=c(1, 0.5)) +
ggplot2::scale_color_manual(values=c("#333333", "#818181")) +
ggplot2::guides(alpha=F, color=F) +
ggplot2::theme_bw()
return(p)
}
|
/R/multisynth_class.R
|
permissive
|
speedtriple955/augsynth
|
R
| false
| false
| 33,081
|
r
|
################################################################################
## Fitting, plotting, summarizing staggered synth
################################################################################
#' Fit staggered synth
#' @param form outcome ~ treatment
#' @param unit Name of unit column
#' @param time Name of time column
#' @param data Panel data as dataframe
#' @param n_leads How long past treatment effects should be estimated for, default is number of post treatment periods for last treated unit
#' @param n_lags Number of pre-treatment periods to balance, default is to balance all periods
#' @param nu Fraction of balance for individual balance
#' @param lambda Regularization hyperparameter, default = 0
#' @param fixedeff Whether to include a unit fixed effect, default F
#' @param n_factors Number of factors for interactive fixed effects, setting to NULL fits with CV, default is 0
#' @param scm Whether to fit scm weights
#' @param time_cohort Whether to average synthetic controls into time cohorts
#' @param eps_abs Absolute error tolerance for osqp
#' @param eps_rel Relative error tolerance for osqp
#' @param verbose Whether to print logs for osqp
#' @param ... Extra arguments
#'
#' @return multisynth object that contains:
#' \itemize{
#' \item{"weights"}{weights matrix where each column is a set of weights for a treated unit}
#' \item{"data"}{Panel data as matrices}
#' \item{"imbalance"}{Matrix of treatment minus synthetic control for pre-treatment time periods, each column corresponds to a treated unit}
#' \item{"global_l2"}{L2 imbalance for the pooled synthetic control}
#' \item{"scaled_global_l2"}{L2 imbalance for the pooled synthetic control, scaled by the imbalance for unitform weights}
#' \item{"ind_l2"}{Average L2 imbalance for the individual synthetic controls}
#' \item{"scaled_ind_l2"}{Average L2 imbalance for the individual synthetic controls, scaled by the imbalance for unitform weights}
#' \item{"n_leads", "n_lags"}{Number of post treatment outcomes (leads) and pre-treatment outcomes (lags) to include in the analysis}
#' \item{"nu"}{Fraction of balance for individual balance}
#' \item{"lambda"}{Regularization hyperparameter}
#' \item{"scm"}{Whether to fit scm weights}
#' \item{"grps"}{Time periods for treated units}
#' \item{"y0hat"}{Pilot estimates of control outcomes}
#' \item{"residuals"}{Difference between the observed outcomes and the pilot estimates}
#' \item{"n_factors"}{Number of factors for interactive fixed effects}
#' }
#' @export
multisynth <- function(form, unit, time, data,
n_leads=NULL, n_lags=NULL,
nu=NULL, lambda=0,
fixedeff = FALSE,
n_factors=0,
scm=T,
time_cohort = F,
eps_abs = 1e-4,
eps_rel = 1e-4,
verbose = FALSE, ...) {
call_name <- match.call()
form <- Formula::Formula(form)
unit <- enquo(unit)
time <- enquo(time)
## format data
outcome <- terms(formula(form, rhs=1))[[2]]
trt <- terms(formula(form, rhs=1))[[3]]
wide <- format_data_stag(outcome, trt, unit, time, data)
force <- if(fixedeff) 3 else 2
# if n_leads is NULL set it to be the largest possible number of leads
# for the last treated unit
if(is.null(n_leads)) {
n_leads <- ncol(wide$y)
} else if(n_leads > max(apply(1-wide$mask, 1, sum)) + ncol(wide$y)) {
n_leads <- max(apply(1-wide$mask, 1, sum)) + ncol(wide$y)
}
## if n_lags is NULL set it to the largest number of pre-treatment periods
if(is.null(n_lags)) {
n_lags <- ncol(wide$X)
} else if(n_lags > ncol(wide$X)) {
n_lags <- ncol(wide$X)
}
long_df <- data[c(quo_name(unit), quo_name(time), as.character(trt), as.character(outcome))]
msynth <- multisynth_formatted(wide = wide, relative = T,
n_leads = n_leads, n_lags = n_lags,
nu = nu, lambda = lambda,
force = force, n_factors = n_factors,
scm = scm, time_cohort = time_cohort,
time_w = F, lambda_t = 0,
fit_resids = TRUE, eps_abs = eps_abs,
eps_rel = eps_rel, verbose = verbose, long_df = long_df, ...)
units <- data %>% arrange(!!unit) %>% distinct(!!unit) %>% pull(!!unit)
rownames(msynth$weights) <- units
if(scm) {
## Get imbalance for uniform weights on raw data
## TODO: Get rid of this stupid hack of just fitting the weights again with big lambda
unif <- multisynth_qp(X=wide$X, ## X=residuals[,1:ncol(wide$X)],
trt=wide$trt,
mask=wide$mask,
n_leads=n_leads,
n_lags=n_lags,
relative=T,
nu=0, lambda=1e10,
time_cohort = time_cohort,
eps_rel = eps_rel,
eps_abs = eps_abs,
verbose = verbose)
## scaled global balance
## msynth$scaled_global_l2 <- msynth$global_l2 / sqrt(sum(unif$imbalance[,1]^2))
msynth$scaled_global_l2 <- msynth$global_l2 / unif$global_l2
## balance for individual estimates
## msynth$scaled_ind_l2 <- msynth$ind_l2 / sqrt(sum(unif$imbalance[,-1]^2))
msynth$scaled_ind_l2 <- msynth$ind_l2 / unif$ind_l2
}
msynth$call <- call_name
return(msynth)
}
#' Internal funciton to fit staggered synth with formatted data
#' @param wide List containing data elements
#' @param relative Whether to compute balance by relative time
#' @param n_leads How long past treatment effects should be estimated for
#' @param n_lags Number of pre-treatment periods to balance, default is to balance all periods
#' @param nu Fraction of balance for individual balance
#' @param lambda Regularization hyperparameter, default = 0
#' @param force c(0,1,2,3) what type of fixed effects to include
#' @param n_factors Number of factors for interactive fixed effects, default does CV
#' @param scm Whether to fit scm weights
#' @param time_cohort Whether to average synthetic controls into time cohorts
#' @param time_w Whether to fit time weights
#' @param lambda_t Regularization for time regression
#' @param fit_resids Whether to fit SCM on the residuals or not
#' @param eps_abs Absolute error tolerance for osqp
#' @param eps_rel Relative error tolerance for osqp
#' @param verbose Whether to print logs for osqp
#' @param long_df A long dataframe with 4 columns in the order unit, time, trt, outcome
#' @param ... Extra arguments
#' @noRd
#' @return multisynth object
multisynth_formatted <- function(wide, relative=T, n_leads, n_lags,
nu, lambda,
force,
n_factors,
scm, time_cohort,
time_w, lambda_t,
fit_resids,
eps_abs, eps_rel,
verbose, long_df, ...) {
## average together treatment groups
## grps <- unique(wide$trt) %>% sort()
if(time_cohort) {
grps <- unique(wide$trt[is.finite(wide$trt)])
} else {
grps <- wide$trt[is.finite(wide$trt)]
}
J <- length(grps)
## fit outcome models
if(time_w) {
# Autoregressive model
out <- fit_time_reg(cbind(wide$X, wide$y), wide$trt,
n_leads, lambda_t, ...)
y0hat <- out$y0hat
residuals <- out$residuals
params <- out$time_weights
} else if(is.null(n_factors)) {
out <- tryCatch({
fit_gsynth_multi(long_df, cbind(wide$X, wide$y), wide$trt, force=force)
}, error = function(error_condition) {
stop("Cannot run CV because there are too few pre-treatment periods.")
})
y0hat <- out$y0hat
params <- out$params
n_factors <- ncol(params$factor)
## get residuals from outcome model
residuals <- cbind(wide$X, wide$y) - y0hat
} else if (n_factors != 0) {
## if number of factors is provided don't do CV
out <- fit_gsynth_multi(long_df, cbind(wide$X, wide$y), wide$trt,
r=n_factors, CV=0, force=force)
y0hat <- out$y0hat
params <- out$params
## get residuals from outcome model
residuals <- cbind(wide$X, wide$y) - y0hat
} else if(force == 0 & n_factors == 0) {
# if no fixed effects or factors, just take out
# control averages at each time point
# time fixed effects from pure controls
pure_ctrl <- cbind(wide$X, wide$y)[!is.finite(wide$trt), , drop = F]
y0hat <- matrix(colMeans(pure_ctrl),
nrow = nrow(wide$X), ncol = ncol(pure_ctrl),
byrow = T)
residuals <- cbind(wide$X, wide$y) - y0hat
params <- NULL
} else {
## take out pre-treatment averages
fullmask <- cbind(wide$mask, matrix(0, nrow=nrow(wide$mask),
ncol=ncol(wide$y)))
out <- fit_feff(cbind(wide$X, wide$y), wide$trt, fullmask, force)
y0hat <- out$y0hat
residuals <- out$residuals
params <- NULL
}
## balance the residuals
if(fit_resids) {
if(time_w) {
# fit scm on residuals after taking out unit fixed effects
fullmask <- cbind(wide$mask, matrix(0, nrow=nrow(wide$mask),
ncol=ncol(wide$y)))
out <- fit_feff(cbind(wide$X, wide$y), wide$trt, fullmask, force)
bal_mat <- lapply(out$residuals, function(x) x[,1:ncol(wide$X)])
} else if(typeof(residuals) == "list") {
bal_mat <- lapply(residuals, function(x) x[,1:ncol(wide$X)])
} else {
bal_mat <- residuals[,1:ncol(wide$X)]
}
} else {
# if not balancing residuals, then take out control averages
# for each time
ctrl_avg <- matrix(colMeans(wide$X[!is.finite(wide$trt), , drop = F]),
nrow = nrow(wide$X), ncol = ncol(wide$X), byrow = T)
bal_mat <- wide$X - ctrl_avg
bal_mat <- wide$X
}
if(scm) {
## if no nu value is provided, use default based on
## global and individual imbalance for no-pooling estimator
if(is.null(nu)) {
## fit with nu = 0
nu_fit <- multisynth_qp(X=bal_mat,
trt=wide$trt,
mask=wide$mask,
n_leads=n_leads,
n_lags=n_lags,
relative=relative,
nu=0, lambda=lambda,
time_cohort = time_cohort,
eps_rel = eps_rel,
eps_abs = eps_abs,
verbose = verbose)
## select nu by triangle inequality ratio
glbl <- sqrt(sum(nu_fit$imbalance[,1]^2))
ind <- sum(apply(nu_fit$imbalance[,-1, drop = F], 2, function(x) sqrt(sum(x^2))))
nu <- glbl / ind
}
msynth <- multisynth_qp(X=bal_mat,
trt=wide$trt,
mask=wide$mask,
n_leads=n_leads,
n_lags=n_lags,
relative=relative,
nu=nu, lambda=lambda,
time_cohort = time_cohort,
eps_rel = eps_rel,
eps_abs = eps_abs,
verbose = verbose)
} else {
msynth <- list(weights = matrix(0, nrow = nrow(wide$X), ncol = J),
imbalance=NA,
global_l2=NA,
ind_l2=NA)
}
## put in data and hyperparams
msynth$data <- wide
msynth$relative <- relative
msynth$n_leads <- n_leads
msynth$n_lags <- n_lags
msynth$nu <- nu
msynth$lambda <- lambda
msynth$scm <- scm
msynth$time_cohort <- time_cohort
msynth$grps <- grps
msynth$y0hat <- y0hat
msynth$residuals <- residuals
msynth$n_factors <- n_factors
msynth$force <- force
## outcome model parameters
msynth$params <- params
# more arguments
msynth$scm <- scm
msynth$time_w <- time_w
msynth$lambda_t <- lambda_t
msynth$fit_resids <- fit_resids
msynth$extra_pars <- c(list(eps_abs = eps_abs,
eps_rel = eps_rel,
verbose = verbose),
list(...))
msynth$long_df <- long_df
##format output
class(msynth) <- "multisynth"
return(msynth)
}
#' Get prediction of average outcome under control or ATT
#' @param object Fit multisynth object
#' @param ... Optional arguments
#'
#' @return Matrix of predicted post-treatment control outcomes for each treated unit
#' @export
predict.multisynth <- function(object, ...) {
if ("relative" %in% names(list(...))) {
relative <- list(...)$relative
} else {
relative <- NULL
}
if ("att" %in% names(list(...))) {
att <- list(...)$att
} else {
att <- F
}
multisynth <- object
time_cohort <- multisynth$time_cohort
if(is.null(relative)) {
relative <- multisynth$relative
}
n_leads <- multisynth$n_leads
d <- ncol(multisynth$data$X)
n <- nrow(multisynth$data$X)
fulldat <- cbind(multisynth$data$X, multisynth$data$y)
ttot <- ncol(fulldat)
grps <- multisynth$grps
J <- length(grps)
if(time_cohort) {
which_t <- lapply(grps,
function(tj) (1:n)[multisynth$data$trt == tj])
mask <- unique(multisynth$data$mask)
} else {
which_t <- (1:n)[is.finite(multisynth$data$trt)]
mask <- multisynth$data$mask
}
n1 <- sapply(1:J, function(j) length(which_t[[j]]))
fullmask <- cbind(mask, matrix(0, nrow = J, ncol = (ttot - d)))
## estimate the post-treatment values to get att estimates
mu1hat <- vapply(1:J,
function(j) colMeans(fulldat[which_t[[j]],
, drop=FALSE]),
numeric(ttot))
## get average outcome model estimates and reweight residuals
if(typeof(multisynth$y0hat) == "list") {
mu0hat <- vapply(1:J,
function(j) {
y0hat <- colMeans(multisynth$y0hat[[j]][which_t[[j]],
, drop=FALSE])
if(!all(multisynth$weights == 0)) {
y0hat + t(multisynth$residuals[[j]]) %*%
multisynth$weights[,j] /
sum(multisynth$weights[,j])
} else {
y0hat
}
}
, numeric(ttot)
)
} else {
mu0hat <- vapply(1:J,
function(j) {
y0hat <- colMeans(multisynth$y0hat[which_t[[j]],
, drop=FALSE])
if(!all(multisynth$weights == 0)) {
y0hat + t(multisynth$residuals) %*%
multisynth$weights[,j] /
sum(multisynth$weights[,j])
} else {
y0hat
}
}
, numeric(ttot)
)
}
tauhat <- mu1hat - mu0hat
## re-index time if relative to treatment
if(relative) {
total_len <- min(d + n_leads, ttot + d - min(grps)) ## total length of predictions
mu0hat <- vapply(1:J,
function(j) {
vec <- c(rep(NA, d-grps[j]),
mu0hat[1:grps[j],j],
mu0hat[(grps[j]+1):(min(grps[j] + n_leads, ttot)), j])
## last row is post-treatment average
c(vec,
rep(NA, total_len - length(vec)),
mean(mu0hat[(grps[j]+1):(min(grps[j] + n_leads, ttot)), j]))
},
numeric(total_len +1
))
tauhat <- vapply(1:J,
function(j) {
vec <- c(rep(NA, d-grps[j]),
tauhat[1:grps[j],j],
tauhat[(grps[j]+1):(min(grps[j] + n_leads, ttot)), j])
## last row is post-treatment average
c(vec,
rep(NA, total_len - length(vec)),
mean(tauhat[(grps[j]+1):(min(grps[j] + n_leads, ttot)), j]))
},
numeric(total_len +1
))
## get the overall average estimate
avg <- apply(mu0hat, 1, function(z) sum(n1 * z, na.rm=T) / sum(n1 * !is.na(z)))
mu0hat <- cbind(avg, mu0hat)
avg <- apply(tauhat, 1, function(z) sum(n1 * z, na.rm=T) / sum(n1 * !is.na(z)))
tauhat <- cbind(avg, tauhat)
} else {
## remove all estimates for t > T_j + n_leads
vapply(1:J,
function(j) c(mu0hat[1:min(grps[j]+n_leads, ttot),j],
rep(NA, max(0, ttot-(grps[j] + n_leads)))),
numeric(ttot)) -> mu0hat
vapply(1:J,
function(j) c(tauhat[1:min(grps[j]+n_leads, ttot),j],
rep(NA, max(0, ttot-(grps[j] + n_leads)))),
numeric(ttot)) -> tauhat
## only average currently treated units
avg1 <- rowSums(t(fullmask) * mu0hat * n1) /
rowSums(t(fullmask) * n1)
avg2 <- rowSums(t(1-fullmask) * mu0hat * n1) /
rowSums(t(1-fullmask) * n1)
avg <- replace_na(avg1, 0) * apply(fullmask, 2, min) +
replace_na(avg2,0) * apply(1-fullmask, 2, max)
cbind(avg, mu0hat) -> mu0hat
## only average currently treated units
avg1 <- rowSums(t(fullmask) * tauhat * n1) /
rowSums(t(fullmask) * n1)
avg2 <- rowSums(t(1-fullmask) * tauhat * n1) /
rowSums(t(1-fullmask) * n1)
avg <- replace_na(avg1, 0) * apply(fullmask, 2, min) +
replace_na(avg2,0) * apply(1 - fullmask, 2, max)
cbind(avg, tauhat) -> tauhat
}
if(att) {
return(tauhat)
} else {
return(mu0hat)
}
}
#' Print function for multisynth
#' @param x multisynth object
#' @param ... Optional arguments
#' @export
print.multisynth <- function(x, ...) {
multisynth <- x
## straight from lm
cat("\nCall:\n", paste(deparse(multisynth$call),
sep="\n", collapse="\n"), "\n\n", sep="")
# print att estimates
att_post <- predict(multisynth, att=T)[,1]
att_post <- att_post[length(att_post)]
cat(paste("Average ATT Estimate: ",
format(round(mean(att_post),3), nsmall = 3), "\n\n", sep=""))
}
#' Plot function for multisynth
#' @importFrom graphics plot
#' @param x Augsynth object to be plotted
#' @param ... Optional arguments
#' @export
plot.multisynth <- function(x, ...) {
if ("se" %in% names(list(...))) {
se <- list(...)$se
} else {
se <- T
}
if ("levels" %in% names(list(...))) {
levels <- list(...)$levels
} else {
levels <- NULL
}
if ("jackknife" %in% names(list(...))) {
jackknife <- list(...)$jackknife
} else {
jackknife <- T
}
multisynth <- x
plot(summary(multisynth, jackknife=jackknife), levels, se)
}
compute_se <- function(multisynth, relative=NULL) {
## get info from the multisynth object
if(is.null(relative)) {
relative <- multisynth$relative
}
n_leads <- multisynth$n_leads
d <- ncol(multisynth$data$X)
fulldat <- cbind(multisynth$data$X, multisynth$data$y)
ttot <- ncol(fulldat)
J <- length(multisynth$grps)
n1 <- multisynth$data$trt[is.finite(multisynth$data$trt)] %>%
table() %>% as.numeric()
grps <- multisynth$grps
fullmask <- cbind(multisynth$data$mask, matrix(0, nrow=J, ncol=(ttot-d)))
## use weighted control residuals to estimate variance for treated units
if(typeof(multisynth$residuals) == "list") {
trt_var <- vapply(1:J,
function(j) {
colSums(multisynth$residuals[[j]]^2 * multisynth$weights[,j]) / n1[j]
},
numeric(ttot))
## standard error estimate of imputed counterfactual mean
## from control residuals and weights
ctrl_var <- vapply(1:J,
function(j) colSums(multisynth$residuals[[j]]^2 * multisynth$weights[,j]^2),
numeric(ttot))
} else {
trt_var <- vapply(1:J,
function(j) {
colSums(multisynth$residuals^2 * multisynth$weights[,j]) / n1[j]
},
numeric(ttot))
## standard error estimate of imputed counterfactual mean
## from control residuals and weights
ctrl_var <- vapply(1:J,
function(j) colSums(multisynth$residuals^2 * multisynth$weights[,j]^2),
numeric(ttot))
}
## standard error
se <- sqrt(trt_var + ctrl_var)
## re-index time if relative to treatment
if(relative) {
total_len <- min(d + n_leads, ttot + d - min(grps)) ## total length of predictions
se <- vapply(1:J,
function(j) {
vec <- c(rep(NA, d-grps[j]),
se[1:grps[j],j],
se[(grps[j]+1):(min(grps[j] + n_leads, ttot)), j])
c(vec, rep(NA, total_len - length(vec)))
},
numeric(total_len))
## get the overall standard error estimate
avg_se <- apply(se, 1, function(z) sqrt(sum(n1^2 * z^2, na.rm=T)) / sum(n1 * !is.na(z)))
se <- cbind(avg_se, se)
} else {
## remove all estimates for t > T_j + n_leads
vapply(1:J,
function(j) c(se[1:min(grps[j]+n_leads, ttot),j],
rep(NA, max(0, ttot-(grps[j] + n_leads)))),
numeric(ttot)) -> tauhat
## only average currently treated units
avg1 <- sqrt(rowSums(t(fullmask) * se^2 * n1^2)) /
rowSums(t(fullmask) * n1)
avg2 <- sqrt(rowSums(t(1-fullmask) * se^2 * n1^2)) /
rowSums(t(1-fullmask) * n1)
avg_se <- replace_na(avg1, 0) * apply(fullmask, 2, min) +
replace_na(avg2,0) * apply(1-fullmask, 2, max)
se <- cbind(avg_se, se)
}
return(se)
}
#' Summary function for multisynth
#' @param object multisynth object
#' @param ... Optional arguments
#'
#' @return summary.multisynth object that contains:
#' \itemize{
#' \item{"att"}{Dataframe with ATT estimates, standard errors for each treated unit}
#' \item{"global_l2"}{L2 imbalance for the pooled synthetic control}
#' \item{"scaled_global_l2"}{L2 imbalance for the pooled synthetic control, scaled by the imbalance for unitform weights}
#' \item{"ind_l2"}{Average L2 imbalance for the individual synthetic controls}
#' \item{"scaled_ind_l2"}{Average L2 imbalance for the individual synthetic controls, scaled by the imbalance for unitform weights}
#' \item{"n_leads", "n_lags"}{Number of post treatment outcomes (leads) and pre-treatment outcomes (lags) to include in the analysis}
#' }
#' @export
summary.multisynth <- function(object, ...) {
if ("jackknife" %in% names(list(...))) {
jackknife <- list(...)$jackknife
} else {
jackknife <- T
}
multisynth <- object
relative <- T
n_leads <- multisynth$n_leads
d <- ncol(multisynth$data$X)
n <- nrow(multisynth$data$X)
ttot <- d + ncol(multisynth$data$y)
trt <- multisynth$data$trt
time_cohort <- multisynth$time_cohort
if(time_cohort) {
grps <- unique(trt[is.finite(trt)])
which_t <- lapply(grps, function(tj) (1:n)[trt == tj])
} else {
grps <- trt[is.finite(trt)]
which_t <- (1:n)[is.finite(trt)]
}
# grps <- unique(multisynth$data$trt) %>% sort()
J <- length(grps)
# which_t <- (1:n)[is.finite(multisynth$data$trt)]
times <- multisynth$data$time
summ <- list()
## post treatment estimate for each group and overall
att <- predict(multisynth, relative, att=T)
if(jackknife) {
se <- jackknife_se_multi(multisynth, relative)
} else {
# se <- compute_se(multisynth, relative)
se <- matrix(NA, nrow(att), ncol(att))
}
if(relative) {
att <- data.frame(cbind(c(-(d-1):min(n_leads, ttot-min(grps)), NA),
att))
if(time_cohort) {
col_names <- c("Time", "Average",
as.character(times[grps + 1]))
} else {
col_names <- c("Time", "Average",
as.character(multisynth$data$units[which_t]))
}
names(att) <- col_names
att %>% gather(Level, Estimate, -Time) %>%
rename("Time"=Time) %>%
mutate(Time=Time-1) -> att
se <- data.frame(cbind(c(-(d-1):min(n_leads, ttot-min(grps)), NA),
se))
names(se) <- col_names
se %>% gather(Level, Std.Error, -Time) %>%
rename("Time"=Time) %>%
mutate(Time=Time-1)-> se
} else {
att <- data.frame(cbind(times, att))
names(att) <- c("Time", "Average", times[grps[1:J]])
att %>% gather(Level, Estimate, -Time) -> att
se <- data.frame(cbind(times, se))
names(se) <- c("Time", "Average", times[grps[1:J]])
se %>% gather(Level, Std.Error, -Time) -> se
}
summ$att <- inner_join(att, se, by = c("Time", "Level"))
summ$relative <- relative
summ$grps <- grps
summ$call <- multisynth$call
summ$global_l2 <- multisynth$global_l2
summ$scaled_global_l2 <- multisynth$scaled_global_l2
summ$ind_l2 <- multisynth$ind_l2
summ$scaled_ind_l2 <- multisynth$scaled_ind_l2
summ$n_leads <- multisynth$n_leads
summ$n_lags <- multisynth$n_lags
class(summ) <- "summary.multisynth"
return(summ)
}
#' Print function for summary function for multisynth
#' @param x summary object
#' @param ... Optional arguments
#' @export
print.summary.multisynth <- function(x, ...) {
if ("level" %in% names(list(...))) {
level <- list(...)$level
} else {
level <- "Average"
}
summ <- x
## straight from lm
cat("\nCall:\n", paste(deparse(summ$call), sep="\n", collapse="\n"), "\n\n", sep="")
first_lvl <- summ$att %>% filter(Level != "Average") %>% pull(Level) %>% min()
## get ATT estimates for treatment level, post treatment
if(summ$relative) {
summ$att %>%
filter(Time >= 0, Level==level) %>%
rename("Time Since Treatment"=Time) -> att_est
} else if(level == "Average") {
summ$att %>% filter(Time > first_lvl, Level=="Average") -> att_est
} else {
summ$att %>% filter(Time > level, Level==level) -> att_est
}
cat(paste("Average ATT Estimate (Std. Error): ",
summ$att %>%
filter(Level == level, is.na(Time)) %>%
pull(Estimate) %>%
round(3) %>% format(nsmall=3),
" (",
summ$att %>%
filter(Level == level, is.na(Time)) %>%
pull(Std.Error) %>%
round(3) %>% format(nsmall=3),
")\n\n", sep=""))
cat(paste("Global L2 Imbalance: ",
format(round(summ$global_l2,3), nsmall=3), "\n",
"Scaled Global L2 Imbalance: ",
format(round(summ$scaled_global_l2,3), nsmall=3), "\n",
"Percent improvement from uniform global weights: ",
format(round(1-summ$scaled_global_l2,3)*100), "\n\n",
"Individual L2 Imbalance: ",
format(round(summ$ind_l2,3), nsmall=3), "\n",
"Scaled Individual L2 Imbalance: ",
format(round(summ$scaled_ind_l2,3), nsmall=3), "\n",
"Percent improvement from uniform individual weights: ",
format(round(1-summ$scaled_ind_l2,3)*100), "\t",
"\n\n",
sep=""))
print(att_est, row.names=F)
}
#' Plot function for summary function for multisynth
#' @importFrom ggplot2 aes
#'
#' @param x summary object
#' @param ... Optional arguments
#' @export
plot.summary.multisynth <- function(x, ...) {
if ("se" %in% names(list(...))) {
se <- list(...)$se
} else {
se <- T
}
if ("levels" %in% names(list(...))) {
levels <- list(...)$levels
} else {
levels <- NULL
}
summ <- x
## get the last time period for each level
summ$att %>%
filter(!is.na(Estimate),
Time >= -summ$n_lags,
Time <= summ$n_leads) %>%
group_by(Level) %>%
summarise(last_time=max(Time)) -> last_times
if(is.null(levels)) levels <- unique(summ$att$Level)
summ$att %>% inner_join(last_times) %>%
filter(Level %in% levels) %>%
mutate(label=ifelse(Time == last_time, Level, NA),
is_avg = ifelse(("Average" %in% levels) * (Level == "Average"),
"A", "B")) %>%
ggplot2::ggplot(ggplot2::aes(x=Time, y=Estimate,
group=Level,
color=is_avg,
alpha=is_avg)) +
ggplot2::geom_line(size=1) +
ggplot2::geom_point(size=1) +
ggrepel::geom_label_repel(ggplot2::aes(label=label),
nudge_x=1, na.rm=T) +
ggplot2::geom_hline(yintercept=0, lty=2) -> p
if(summ$relative) {
p <- p + ggplot2::geom_vline(xintercept=0, lty=2) +
ggplot2::xlab("Time Relative to Treatment")
} else {
p <- p + ggplot2::geom_vline(aes(xintercept=as.numeric(Level)),
lty=2, alpha=0.5,
summ$att %>% filter(Level != "Average"))
}
## add ses
if(se) {
max_time <- max(summ$att$Time, na.rm = T)
if(max_time == 0) {
error_plt <- ggplot2::geom_errorbar
clr <- "black"
alph <- 1
} else {
error_plt <- ggplot2::geom_ribbon
clr <- NA
alph <- 0.2
}
if("Average" %in% levels) {
p <- p + error_plt(
ggplot2::aes(ymin=Estimate-2*Std.Error,
ymax=Estimate+2*Std.Error),
alpha = alph, color=clr,
data = summ$att %>%
filter(Level == "Average",
Time >= 0))
} else {
p <- p + error_plt(
ggplot2::aes(ymin=Estimate-2*Std.Error,
ymax=Estimate+2*Std.Error),
data = . %>% filter(Time >= 0),
alpha = alph, color = clr)
}
}
p <- p + ggplot2::scale_alpha_manual(values=c(1, 0.5)) +
ggplot2::scale_color_manual(values=c("#333333", "#818181")) +
ggplot2::guides(alpha=F, color=F) +
ggplot2::theme_bw()
return(p)
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 61063
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 61062
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 61062
c
c Input Parameter (command line, file):
c input filename QBFLIB/Sauer-Reimer/ITC99/b15_PR_4_90.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 20573
c no.of clauses 61063
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 61062
c
c QBFLIB/Sauer-Reimer/ITC99/b15_PR_4_90.qdimacs 20573 61063 E1 [1] 0 331 20036 61062 RED
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Sauer-Reimer/ITC99/b15_PR_4_90/b15_PR_4_90.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 719
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 61063
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 61062
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 61062
c
c Input Parameter (command line, file):
c input filename QBFLIB/Sauer-Reimer/ITC99/b15_PR_4_90.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 20573
c no.of clauses 61063
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 61062
c
c QBFLIB/Sauer-Reimer/ITC99/b15_PR_4_90.qdimacs 20573 61063 E1 [1] 0 331 20036 61062 RED
|
## demonstration of all summary functions
opa <- par(mfrow=c(1,1),
mar=c(0,0,1,0)+0.2)
## Ripley's K-function
plot(swedishpines)
par(mar=c(4,4,2,1)+0.2)
plot(Kest(swedishpines))
## Besag's transformation
plot(Lest(swedishpines))
## pair correlation function
plot(pcf(swedishpines))
par(mfrow=c(2,3),
mar=c(0,0,1,0)+0.2)
## Showing the utility of the K-function
plot(cells)
plot(nztrees)
plot(redwood)
par(mar=c(4,4,2,1)+0.2)
plot(Kest(cells))
plot(Kest(nztrees))
plot(Kest(redwood))
## Showing the utility of the pair correlation function
par(mar=c(0,0,1,0)+0.2)
plot(cells)
plot(nztrees)
plot(redwood)
par(mar=c(4,4,2,1)+0.2)
plot(pcf(cells))
plot(pcf(nztrees))
plot(pcf(redwood))
##
par(mfrow=c(1,1))
## Analogues for inhomogeneous patterns
## Reweighted K-function
plot(japanesepines)
fit <- ppm(japanesepines, ~polynom(x,y,2))
plot(predict(fit))
plot(Kinhom(japanesepines, fit))
plot(pcfinhom(japanesepines, fit))
plot(Linhom(japanesepines))
## Rescaled K-function
plot(unmark(bronzefilter))
plot(Kscaled(bronzefilter))
fit <- ppm(unmark(bronzefilter), ~x)
plot(predict(fit))
plot(unmark(bronzefilter), add=TRUE)
plot(Kscaled(bronzefilter, fit))
plot(Lscaled(bronzefilter, fit))
## Local indicators of spatial association
plot(localL(swedishpines))
plot(localK(swedishpines))
## anisotropic
plot(Ksector(redwood, 0, 90))
plot(Rf <- pairorient(redwood, 0.05, 0.15))
rose(Rf, main="Rose diagram of pair orientation distribution")
plot(deriv(Rf, spar=0.6, Dperiodic=TRUE))
rose(nnorient(redwood))
##
par(mfrow=c(2,3),
mar=rep(0.2, 4))
## Empty space function F
plot(cells)
plot(nztrees)
plot(redwood)
par(mar=c(4,4,2,1)+0.2)
plot(Fest(cells))
plot(Fest(nztrees))
plot(Fest(redwood))
## Nearest neighbour distance function G
par(mar=rep(0.2, 4))
plot(cells)
plot(nztrees)
plot(redwood)
par(mar=c(4,4,2,1)+0.2)
plot(Gest(cells))
plot(Gest(nztrees))
plot(Gest(redwood))
## J-function
par(mar=rep(0.2, 4))
plot(cells)
plot(nztrees)
plot(redwood)
par(mar=c(4,4,2,1)+0.2)
plot(Jest(cells))
plot(Jest(nztrees))
plot(Jest(redwood))
par(mfrow=c(1,1),
mar=c(4,4,2,1)+0.2)
## versions for inhomogeneous patterns
plot(Finhom(japanesepines))
plot(Ginhom(japanesepines))
plot(Jinhom(japanesepines))
## Display F,G,J,K
plot(allstats(swedishpines))
## Multitype patterns
plot(amacrine)
plot(Kcross(amacrine))
plot(Kdot(amacrine))
I <- (marks(amacrine) == "on")
J <- (marks(amacrine) == "off")
plot(Kmulti(amacrine, I, J))
plot(alltypes(amacrine, "K"))
plot(Lcross(amacrine))
plot(Ldot(amacrine))
plot(pcfcross(amacrine))
plot(pcfdot(amacrine))
plot(pcfmulti(amacrine, I, J))
plot(Gcross(amacrine))
plot(Gdot(amacrine))
plot(Gmulti(amacrine, I, J))
plot(alltypes(amacrine, "G"))
plot(Jcross(amacrine))
plot(Jdot(amacrine))
plot(Jmulti(amacrine,I,J))
plot(alltypes(amacrine, "J"))
plot(alltypes(amacrine, "F"))
plot(Iest(amacrine))
plot(markconnect(amacrine))
## Multitype, inhomogeneous
plot(Kcross.inhom(amacrine))
plot(Kdot.inhom(amacrine))
plot(Kmulti.inhom(amacrine, I, J))
plot(Lcross.inhom(amacrine))
plot(Ldot.inhom(amacrine))
plot(pcfcross.inhom(amacrine))
plot(pcfdot.inhom(amacrine))
plot(pcfmulti.inhom(amacrine, I, J))
## Numerical marks
plot(markcorr(longleaf))
plot(markvario(longleaf))
plot(Emark(longleaf))
plot(Vmark(longleaf))
## Linear networks
plot(chicago)
plot(linearK(chicago))
plot(linearKcross(chicago))
plot(linearKdot(chicago))
plot(linearpcf(chicago))
plot(linearpcfcross(chicago))
plot(linearpcfdot(chicago))
lam <- rep(intensity(unmark(chicago)), npoints(chicago))
A <- split(chicago)$assault
B <- split(chicago)$burglary
lamA <- rep(intensity(A), npoints(A))
lamB <- rep(intensity(B), npoints(B))
plot(linearKinhom(chicago, lam))
plot(linearKcross.inhom(chicago, "assault", "burglary", lamA, lamB))
plot(linearKdot.inhom(chicago, "assault", lamA, lam))
plot(linearpcfinhom(chicago, lam))
plot(linearpcfcross.inhom(chicago, "assault", "burglary", lamA, lamB))
plot(linearpcfdot.inhom(chicago, "assault", lamA, lam))
plot(linearmarkconnect(chicago))
plot(linearmarkequal(chicago))
rm(I,J,fit)
par(opa)
|
/demo/sumfun.R
|
no_license
|
rubak/spatstat
|
R
| false
| false
| 4,074
|
r
|
## demonstration of all summary functions
opa <- par(mfrow=c(1,1),
mar=c(0,0,1,0)+0.2)
## Ripley's K-function
plot(swedishpines)
par(mar=c(4,4,2,1)+0.2)
plot(Kest(swedishpines))
## Besag's transformation
plot(Lest(swedishpines))
## pair correlation function
plot(pcf(swedishpines))
par(mfrow=c(2,3),
mar=c(0,0,1,0)+0.2)
## Showing the utility of the K-function
plot(cells)
plot(nztrees)
plot(redwood)
par(mar=c(4,4,2,1)+0.2)
plot(Kest(cells))
plot(Kest(nztrees))
plot(Kest(redwood))
## Showing the utility of the pair correlation function
par(mar=c(0,0,1,0)+0.2)
plot(cells)
plot(nztrees)
plot(redwood)
par(mar=c(4,4,2,1)+0.2)
plot(pcf(cells))
plot(pcf(nztrees))
plot(pcf(redwood))
##
par(mfrow=c(1,1))
## Analogues for inhomogeneous patterns
## Reweighted K-function
plot(japanesepines)
fit <- ppm(japanesepines, ~polynom(x,y,2))
plot(predict(fit))
plot(Kinhom(japanesepines, fit))
plot(pcfinhom(japanesepines, fit))
plot(Linhom(japanesepines))
## Rescaled K-function
plot(unmark(bronzefilter))
plot(Kscaled(bronzefilter))
fit <- ppm(unmark(bronzefilter), ~x)
plot(predict(fit))
plot(unmark(bronzefilter), add=TRUE)
plot(Kscaled(bronzefilter, fit))
plot(Lscaled(bronzefilter, fit))
## Local indicators of spatial association
plot(localL(swedishpines))
plot(localK(swedishpines))
## anisotropic
plot(Ksector(redwood, 0, 90))
plot(Rf <- pairorient(redwood, 0.05, 0.15))
rose(Rf, main="Rose diagram of pair orientation distribution")
plot(deriv(Rf, spar=0.6, Dperiodic=TRUE))
rose(nnorient(redwood))
##
par(mfrow=c(2,3),
mar=rep(0.2, 4))
## Empty space function F
plot(cells)
plot(nztrees)
plot(redwood)
par(mar=c(4,4,2,1)+0.2)
plot(Fest(cells))
plot(Fest(nztrees))
plot(Fest(redwood))
## Nearest neighbour distance function G
par(mar=rep(0.2, 4))
plot(cells)
plot(nztrees)
plot(redwood)
par(mar=c(4,4,2,1)+0.2)
plot(Gest(cells))
plot(Gest(nztrees))
plot(Gest(redwood))
## J-function
par(mar=rep(0.2, 4))
plot(cells)
plot(nztrees)
plot(redwood)
par(mar=c(4,4,2,1)+0.2)
plot(Jest(cells))
plot(Jest(nztrees))
plot(Jest(redwood))
par(mfrow=c(1,1),
mar=c(4,4,2,1)+0.2)
## versions for inhomogeneous patterns
plot(Finhom(japanesepines))
plot(Ginhom(japanesepines))
plot(Jinhom(japanesepines))
## Display F,G,J,K
plot(allstats(swedishpines))
## Multitype patterns
plot(amacrine)
plot(Kcross(amacrine))
plot(Kdot(amacrine))
I <- (marks(amacrine) == "on")
J <- (marks(amacrine) == "off")
plot(Kmulti(amacrine, I, J))
plot(alltypes(amacrine, "K"))
plot(Lcross(amacrine))
plot(Ldot(amacrine))
plot(pcfcross(amacrine))
plot(pcfdot(amacrine))
plot(pcfmulti(amacrine, I, J))
plot(Gcross(amacrine))
plot(Gdot(amacrine))
plot(Gmulti(amacrine, I, J))
plot(alltypes(amacrine, "G"))
plot(Jcross(amacrine))
plot(Jdot(amacrine))
plot(Jmulti(amacrine,I,J))
plot(alltypes(amacrine, "J"))
plot(alltypes(amacrine, "F"))
plot(Iest(amacrine))
plot(markconnect(amacrine))
## Multitype, inhomogeneous
plot(Kcross.inhom(amacrine))
plot(Kdot.inhom(amacrine))
plot(Kmulti.inhom(amacrine, I, J))
plot(Lcross.inhom(amacrine))
plot(Ldot.inhom(amacrine))
plot(pcfcross.inhom(amacrine))
plot(pcfdot.inhom(amacrine))
plot(pcfmulti.inhom(amacrine, I, J))
## Numerical marks
plot(markcorr(longleaf))
plot(markvario(longleaf))
plot(Emark(longleaf))
plot(Vmark(longleaf))
## Linear networks
plot(chicago)
plot(linearK(chicago))
plot(linearKcross(chicago))
plot(linearKdot(chicago))
plot(linearpcf(chicago))
plot(linearpcfcross(chicago))
plot(linearpcfdot(chicago))
lam <- rep(intensity(unmark(chicago)), npoints(chicago))
A <- split(chicago)$assault
B <- split(chicago)$burglary
lamA <- rep(intensity(A), npoints(A))
lamB <- rep(intensity(B), npoints(B))
plot(linearKinhom(chicago, lam))
plot(linearKcross.inhom(chicago, "assault", "burglary", lamA, lamB))
plot(linearKdot.inhom(chicago, "assault", lamA, lam))
plot(linearpcfinhom(chicago, lam))
plot(linearpcfcross.inhom(chicago, "assault", "burglary", lamA, lamB))
plot(linearpcfdot.inhom(chicago, "assault", lamA, lam))
plot(linearmarkconnect(chicago))
plot(linearmarkequal(chicago))
rm(I,J,fit)
par(opa)
|
# Sven Chilton and Nathan Helm-Burger
# Signal Data Science Cohort 3
setwd('~/GitHub/signal-work/rscripts')
library(dplyr)
library(glmnet)
library(corrplot)
df = read.csv('../data/speeddating-aggregated.csv')
# Find the 4 most common careers
most_common_career_counts = sort(table(df[['career_c']]), decreasing = TRUE)[1:4]
most_common_career_codes = as.numeric(names(most_common_career_counts))
careers = c("Lawyer",
"Academic",
"Psychologist",
"Doctor",
"Engineer",
"Creative",
"Business",
"RealEstate",
"IntRelations",
"Undecided",
"SocialWork",
"Speech",
"Politics",
"Athletics",
"Other",
"Journalism",
"Architecture")
most_common_careers = careers[most_common_career_codes]
# Filter the df by these 4 careers
df = df[df[,'career_c'] %in% most_common_career_codes,]
# Extract the features
features = dplyr::select(df, attr_o:amb_o, sports:yoga)
# Extract the target, in this case, the filtered career_c column
target = df[['career_c']]
# Train a multinomial classification model with glmnet()
lambda = 0
multi_fit = glmnet(scale(features), scale(target), family="multinomial")
multi_fit_coeffs = coef(multi_fit, s=lambda)
multi_fit_preds = data.frame(predict(multi_fit, scale(features), s=lambda))
colnames(multi_fit_preds) = careers[c(1,2,6,7)]
# Convert the log-odds ratios in a given row of the prediction df
# to probabilities
probabilities = function(preds, rownum) {
logodds = preds[rownum,]
return(exp(logodds)/sum(exp(logodds)))
}
# Express our predictions in terms of probabilities
multi_fit_probs = data.frame(matrix(nrow=nrow(multi_fit_preds),
ncol=ncol(multi_fit_preds)))
colnames(multi_fit_probs) = colnames(multi_fit_preds)
for (rownum in 1:nrow(multi_fit_preds)) {
multi_fit_probs[rownum,] = probabilities(multi_fit_preds, rownum)
}
View(multi_fit_probs)
# Plot the coefficients with corrplot()
multi_fit_coeffs_mat = as.matrix(do.call(cbind, multi_fit_coeffs))
multi_fit_coeffs_mat = tail(multi_fit_coeffs_mat, -1)
colnames(multi_fit_coeffs_mat) = careers[c(1,2,6,7)]
corrplot(multi_fit_coeffs_mat, is.corr=FALSE)
max(multi_fit_coeffs_mat)
# Just for giggles, let's visualize the correlations among
# the features
corrplot(cor(features))
|
/rscripts/Multinomial-Speed-Dating.R
|
no_license
|
svenchilton/signal-work
|
R
| false
| false
| 2,431
|
r
|
# Sven Chilton and Nathan Helm-Burger
# Signal Data Science Cohort 3
setwd('~/GitHub/signal-work/rscripts')
library(dplyr)
library(glmnet)
library(corrplot)
df = read.csv('../data/speeddating-aggregated.csv')
# Find the 4 most common careers
most_common_career_counts = sort(table(df[['career_c']]), decreasing = TRUE)[1:4]
most_common_career_codes = as.numeric(names(most_common_career_counts))
careers = c("Lawyer",
"Academic",
"Psychologist",
"Doctor",
"Engineer",
"Creative",
"Business",
"RealEstate",
"IntRelations",
"Undecided",
"SocialWork",
"Speech",
"Politics",
"Athletics",
"Other",
"Journalism",
"Architecture")
most_common_careers = careers[most_common_career_codes]
# Filter the df by these 4 careers
df = df[df[,'career_c'] %in% most_common_career_codes,]
# Extract the features
features = dplyr::select(df, attr_o:amb_o, sports:yoga)
# Extract the target, in this case, the filtered career_c column
target = df[['career_c']]
# Train a multinomial classification model with glmnet()
lambda = 0
multi_fit = glmnet(scale(features), scale(target), family="multinomial")
multi_fit_coeffs = coef(multi_fit, s=lambda)
multi_fit_preds = data.frame(predict(multi_fit, scale(features), s=lambda))
colnames(multi_fit_preds) = careers[c(1,2,6,7)]
# Convert the log-odds ratios in a given row of the prediction df
# to probabilities
probabilities = function(preds, rownum) {
logodds = preds[rownum,]
return(exp(logodds)/sum(exp(logodds)))
}
# Express our predictions in terms of probabilities
multi_fit_probs = data.frame(matrix(nrow=nrow(multi_fit_preds),
ncol=ncol(multi_fit_preds)))
colnames(multi_fit_probs) = colnames(multi_fit_preds)
for (rownum in 1:nrow(multi_fit_preds)) {
multi_fit_probs[rownum,] = probabilities(multi_fit_preds, rownum)
}
View(multi_fit_probs)
# Plot the coefficients with corrplot()
multi_fit_coeffs_mat = as.matrix(do.call(cbind, multi_fit_coeffs))
multi_fit_coeffs_mat = tail(multi_fit_coeffs_mat, -1)
colnames(multi_fit_coeffs_mat) = careers[c(1,2,6,7)]
corrplot(multi_fit_coeffs_mat, is.corr=FALSE)
max(multi_fit_coeffs_mat)
# Just for giggles, let's visualize the correlations among
# the features
corrplot(cor(features))
|
library(supclust)
### Name: plot.wilma
### Title: 2-Dimensional Visualization of Wilma's Output
### Aliases: plot.wilma
### Keywords: classif cluster
### ** Examples
## Running the examples of Wilma's help page
example(wilma, echo = FALSE)
plot(fit)
|
/data/genthat_extracted_code/supclust/examples/plot.wilma.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 260
|
r
|
library(supclust)
### Name: plot.wilma
### Title: 2-Dimensional Visualization of Wilma's Output
### Aliases: plot.wilma
### Keywords: classif cluster
### ** Examples
## Running the examples of Wilma's help page
example(wilma, echo = FALSE)
plot(fit)
|
## function that returns lines of a given dataframe before and after a detected action
fctGetStateTransitions <- function(dfData, action, threshold = 5, linesBefore=15, linesAfter = 5, filterCloseActions = 1){
actionShifted <- action
actionShifted <- append(actionShifted, NA, after=0)
actionShifted <- actionShifted[-length(actionShifted)]
transAction <- round(action - actionShifted,2)
#get all lines above threshold
actionLines <- which(transAction >= threshold)
# delete actions happening in consecutive timesteps
if(filterCloseActions == 1){
diffLines <- 1
while(diffLines > 0){
lenLinesPre <- length(actionLines)
k <- 1
listClose <- NA
for(i in 1:(length(actionLines)-1)){
if(actionLines[i+1]-actionLines[i]==1){
listClose[k] <- i+1
k <- k+1
}
}
if(!is.na(listClose[1])){
actionLines <- actionLines[-listClose]
}
lenLinesPost <- length(actionLines)
diffLines <- lenLinesPre-lenLinesPost
}
} # end if filterCloseActions
# calculate line number before and after action to be considered
# do not use lines below first line of data.frame or beyond line number
startLines <- ifelse(actionLines - linesBefore <0, 0, actionLines - linesBefore)
endLines <- ifelse(actionLines + linesAfter > length(action), length(action), actionLines + linesAfter)
for(i in 1:length(actionLines)){
dfSub <- dfData[startLines[i]:endLines[i],]
dfSub$ActionNr <- i
if(i == 1){
dfX <- dfSub
} else {
dfX <- rbind(dfX, dfSub)
}
}
# returning dfX
dfX
} # end of function
### usage (not running!)
##dfData: a data frame with at least one column containing the state of a device (e.g. blinds)
#action <- dfData$Action # the column with the state of the device to be analysed (e.g. blinds)
#threshold <- 5 # threshold of change in state variable to be considered as action (e.g. .5 for binary variables)
#linesBefore <- 5 # number of lines to be returned before the detected action
#linesAfter <- 5 # number of lines to be returned after the detected action
#filterCloseActions <- 1 ### 1 = actions happening in consecutive timesteps will be erased (e.g. one blind action could appear in two to three timesteps in data due to length of blind movement being longer than 1 minute)
### call function
# dfActionsBefAft <- fctGetStateTransitions(dfData, action, .5, 15, 5, 0)
|
/R/fctGetStateTransitions.r
|
no_license
|
marcelschweiker/OB_r
|
R
| false
| false
| 2,283
|
r
|
## function that returns lines of a given dataframe before and after a detected action
fctGetStateTransitions <- function(dfData, action, threshold = 5, linesBefore=15, linesAfter = 5, filterCloseActions = 1){
actionShifted <- action
actionShifted <- append(actionShifted, NA, after=0)
actionShifted <- actionShifted[-length(actionShifted)]
transAction <- round(action - actionShifted,2)
#get all lines above threshold
actionLines <- which(transAction >= threshold)
# delete actions happening in consecutive timesteps
if(filterCloseActions == 1){
diffLines <- 1
while(diffLines > 0){
lenLinesPre <- length(actionLines)
k <- 1
listClose <- NA
for(i in 1:(length(actionLines)-1)){
if(actionLines[i+1]-actionLines[i]==1){
listClose[k] <- i+1
k <- k+1
}
}
if(!is.na(listClose[1])){
actionLines <- actionLines[-listClose]
}
lenLinesPost <- length(actionLines)
diffLines <- lenLinesPre-lenLinesPost
}
} # end if filterCloseActions
# calculate line number before and after action to be considered
# do not use lines below first line of data.frame or beyond line number
startLines <- ifelse(actionLines - linesBefore <0, 0, actionLines - linesBefore)
endLines <- ifelse(actionLines + linesAfter > length(action), length(action), actionLines + linesAfter)
for(i in 1:length(actionLines)){
dfSub <- dfData[startLines[i]:endLines[i],]
dfSub$ActionNr <- i
if(i == 1){
dfX <- dfSub
} else {
dfX <- rbind(dfX, dfSub)
}
}
# returning dfX
dfX
} # end of function
### usage (not running!)
##dfData: a data frame with at least one column containing the state of a device (e.g. blinds)
#action <- dfData$Action # the column with the state of the device to be analysed (e.g. blinds)
#threshold <- 5 # threshold of change in state variable to be considered as action (e.g. .5 for binary variables)
#linesBefore <- 5 # number of lines to be returned before the detected action
#linesAfter <- 5 # number of lines to be returned after the detected action
#filterCloseActions <- 1 ### 1 = actions happening in consecutive timesteps will be erased (e.g. one blind action could appear in two to three timesteps in data due to length of blind movement being longer than 1 minute)
### call function
# dfActionsBefAft <- fctGetStateTransitions(dfData, action, .5, 15, 5, 0)
|
vivid_server <- function(){
server <- function(input, output, session) {
.globals$vivid_server$child_queue$consumer$start()
.globals$remote_r$set_session(session)
session$userData$docs <- list()
session$userData$r_markdown <- list()
session$userData$r_output <- list()
server_documents(input, output, session)
server_rstudio(input, output, session)
observeEvent(input$interrupt_r, {
interrupt_r()
})
add_gizmo_server_hook(input, output, session, "gizmo_test","helloworld")
add_gizmo_server_hook(input, output, session, "gizdata","gizdata")
add_gizmo_server_hook(input, output, session, "wrangle_data","wrangle_data")
add_gizmo_server_hook(input, output, session, "scatter_3d","scatter_3d")
add_gizmo_server_hook(input, output, session, "menu_insert_markdown_block","markdown")
add_gizmo_server_hook(input, output, session, "dynamicui","dynamicui")
make_menu()
did <- add_new_document("Untitled")
set_active_document(did)
}
server
}
vivid <- function(child_process=TRUE, ...){
if(child_process)
return(launch_vivid_child_server(...))
.globals$vivid_server$parent_queue <- ipc::queue()
.globals$vivid_server$child_queue <- ipc::queue()
.globals$remote_r <- QueueLinkedR$new(parent_queue(), child_queue())
.globals$vivid_server$parent_queue$consumer$start(env=.GlobalEnv)
launch_vivid(...)
}
launch_vivid <- function(...){
ui <- vivid_ui()
server <- vivid_server()
runApp(shinyApp(ui, server), ...)
}
confirmDialog <- function(..., title="Message", callback=NULL, button_labels=c("Cancel","OK"), session = getDefaultReactiveDomain()){
uuid <- gen_uuid()
ns <- NS(uuid)
modal <- modalDialog(..., title=title, easyClose=FALSE, footer=tagList(
actionButton(ns("cancel"), button_labels[1]),
actionButton(ns("ok"), button_labels[2])
))
# if(!is.null(callback)){
# observeEvent(session$input[[ns("cancel")]], {
# callback(button_labels[1])
# #removeModal(session=session)
# },
# domain=session)
# observeEvent(session$input[[ns("cancel")]], {
# callback(button_labels[2])
# #removeModal(session=session)
# },
# domain=session)
# }
showModal(modal, session=session)
}
|
/R/main-server.R
|
no_license
|
fellstat/vivid
|
R
| false
| false
| 2,261
|
r
|
vivid_server <- function(){
server <- function(input, output, session) {
.globals$vivid_server$child_queue$consumer$start()
.globals$remote_r$set_session(session)
session$userData$docs <- list()
session$userData$r_markdown <- list()
session$userData$r_output <- list()
server_documents(input, output, session)
server_rstudio(input, output, session)
observeEvent(input$interrupt_r, {
interrupt_r()
})
add_gizmo_server_hook(input, output, session, "gizmo_test","helloworld")
add_gizmo_server_hook(input, output, session, "gizdata","gizdata")
add_gizmo_server_hook(input, output, session, "wrangle_data","wrangle_data")
add_gizmo_server_hook(input, output, session, "scatter_3d","scatter_3d")
add_gizmo_server_hook(input, output, session, "menu_insert_markdown_block","markdown")
add_gizmo_server_hook(input, output, session, "dynamicui","dynamicui")
make_menu()
did <- add_new_document("Untitled")
set_active_document(did)
}
server
}
vivid <- function(child_process=TRUE, ...){
if(child_process)
return(launch_vivid_child_server(...))
.globals$vivid_server$parent_queue <- ipc::queue()
.globals$vivid_server$child_queue <- ipc::queue()
.globals$remote_r <- QueueLinkedR$new(parent_queue(), child_queue())
.globals$vivid_server$parent_queue$consumer$start(env=.GlobalEnv)
launch_vivid(...)
}
launch_vivid <- function(...){
ui <- vivid_ui()
server <- vivid_server()
runApp(shinyApp(ui, server), ...)
}
confirmDialog <- function(..., title="Message", callback=NULL, button_labels=c("Cancel","OK"), session = getDefaultReactiveDomain()){
uuid <- gen_uuid()
ns <- NS(uuid)
modal <- modalDialog(..., title=title, easyClose=FALSE, footer=tagList(
actionButton(ns("cancel"), button_labels[1]),
actionButton(ns("ok"), button_labels[2])
))
# if(!is.null(callback)){
# observeEvent(session$input[[ns("cancel")]], {
# callback(button_labels[1])
# #removeModal(session=session)
# },
# domain=session)
# observeEvent(session$input[[ns("cancel")]], {
# callback(button_labels[2])
# #removeModal(session=session)
# },
# domain=session)
# }
showModal(modal, session=session)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ModellingServer.R
\name{ModellingServer}
\alias{ModellingServer}
\title{ModellingServer}
\usage{
ModellingServer(input, output)
}
\arguments{
\item{output}{}
}
\value{
output
}
\description{
Creates and exports the server for the 'modelling' part of the app
}
|
/babyShiny/man/ModellingServer.Rd
|
no_license
|
TomHSY/babyShiny
|
R
| false
| true
| 338
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ModellingServer.R
\name{ModellingServer}
\alias{ModellingServer}
\title{ModellingServer}
\usage{
ModellingServer(input, output)
}
\arguments{
\item{output}{}
}
\value{
output
}
\description{
Creates and exports the server for the 'modelling' part of the app
}
|
# This mini-project is based on the K-Means exercise from 'R in Action'
# Go here for the original blog post and solutions
# http://www.r-bloggers.com/k-means-clustering-from-r-in-action/
# Exercise 0: Install these packages if you don't have them already
install.packages(c("cluster", "rattle.data","NbClust"))
# Now load the data and look at the first few rows
data(wine, package="rattle.data")
head(wine)
# Exercise 1: Remove the first column from the data and scale
# it using the scale() function
df <- scale(wine[-1])
# Now we'd like to cluster the data using K-Means.
# How do we decide how many clusters to use if you don't know that already?
# We'll try two methods.
# Method 1: A plot of the total within-groups sums of squares against the
# number of clusters in a K-means solution can be helpful. A bend in the
# graph can suggest the appropriate number of clusters.
wssplot <- function(data, nc=15, seed=1234){
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i)$withinss)}
plot(1:nc, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
}
wssplot(df)
# Exercise 2:
# * How many clusters does this method suggest?
# * Why does this method work? What's the intuition behind it?
# * Look at the code for wssplot() and figure out how it works
#Solution - From cluster 1 to 3 there is a heavy drop and later it became steady so cluster
#3 will e good
# Method 2: Use the NbClust library, which runs many experiments
# and gives a distribution of potential number of clusters.
library(NbClust)
set.seed(1234)
nc <- NbClust(df, min.nc=2, max.nc=15, method="kmeans")
barplot(table(nc$Best.n[1,]),
xlab="Numer of Clusters", ylab="Number of Criteria",
main="Number of Clusters Chosen by 26 Criteria")
# Exercise 3: How many clusters does this method suggest?
# No of clusters can be 3
# Exercise 4: Once you've picked the number of clusters, run k-means
# using this number of clusters. Output the result of calling kmeans()
# into a variable fit.km
set.seed(1234)
fit.km <- kmeans(df,3,nstart=25)
fit.km$size
# Now we want to evaluate how well this clustering does.
fit.km$centers
aggregate(wine[-1], by=list(cluster=fit.km$cluster), mean)
# Exercise 5: using the table() function, show how the clusters in fit.km$clusters
# compares to the actual wine types in wine$Type. Would you consider this a good
# clustering?
ct.km <- table(wine$Type, fit.km$cluster)
ct.km
install.packages("flexclust")
library(flexclust)
randIndex(ct.km)
##The Randindex provides agreement between partitions. Here it is 0.89 so thats good
# Exercise 6:
# * Visualize these clusters using function clusplot() from the cluster library
# * Would you consider this a good clustering?
library(cluster)
clusplot(df,fit.km$cluster)
|
/Machine Learning/1505932415_clustering.R
|
no_license
|
annapooranic/Springboard---Foundations-of-Data-Science
|
R
| false
| false
| 2,975
|
r
|
# This mini-project is based on the K-Means exercise from 'R in Action'
# Go here for the original blog post and solutions
# http://www.r-bloggers.com/k-means-clustering-from-r-in-action/
# Exercise 0: Install these packages if you don't have them already
install.packages(c("cluster", "rattle.data","NbClust"))
# Now load the data and look at the first few rows
data(wine, package="rattle.data")
head(wine)
# Exercise 1: Remove the first column from the data and scale
# it using the scale() function
df <- scale(wine[-1])
# Now we'd like to cluster the data using K-Means.
# How do we decide how many clusters to use if you don't know that already?
# We'll try two methods.
# Method 1: A plot of the total within-groups sums of squares against the
# number of clusters in a K-means solution can be helpful. A bend in the
# graph can suggest the appropriate number of clusters.
wssplot <- function(data, nc=15, seed=1234){
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i)$withinss)}
plot(1:nc, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
}
wssplot(df)
# Exercise 2:
# * How many clusters does this method suggest?
# * Why does this method work? What's the intuition behind it?
# * Look at the code for wssplot() and figure out how it works
#Solution - From cluster 1 to 3 there is a heavy drop and later it became steady so cluster
#3 will e good
# Method 2: Use the NbClust library, which runs many experiments
# and gives a distribution of potential number of clusters.
library(NbClust)
set.seed(1234)
nc <- NbClust(df, min.nc=2, max.nc=15, method="kmeans")
barplot(table(nc$Best.n[1,]),
xlab="Numer of Clusters", ylab="Number of Criteria",
main="Number of Clusters Chosen by 26 Criteria")
# Exercise 3: How many clusters does this method suggest?
# No of clusters can be 3
# Exercise 4: Once you've picked the number of clusters, run k-means
# using this number of clusters. Output the result of calling kmeans()
# into a variable fit.km
set.seed(1234)
fit.km <- kmeans(df,3,nstart=25)
fit.km$size
# Now we want to evaluate how well this clustering does.
fit.km$centers
aggregate(wine[-1], by=list(cluster=fit.km$cluster), mean)
# Exercise 5: using the table() function, show how the clusters in fit.km$clusters
# compares to the actual wine types in wine$Type. Would you consider this a good
# clustering?
ct.km <- table(wine$Type, fit.km$cluster)
ct.km
install.packages("flexclust")
library(flexclust)
randIndex(ct.km)
##The Randindex provides agreement between partitions. Here it is 0.89 so thats good
# Exercise 6:
# * Visualize these clusters using function clusplot() from the cluster library
# * Would you consider this a good clustering?
library(cluster)
clusplot(df,fit.km$cluster)
|
/man/hbmr.Rd
|
no_license
|
cran/BMRV
|
R
| false
| false
| 4,459
|
rd
| ||
library(shiny)
library(ggplot2)
library(dplyr)
songs <- read.csv("songs.csv")
ui <- fluidPage(
titlePanel("Songs dataset", windowTitle = "Songs dataset"),
sidebarLayout(
sidebarPanel(
selectInput("FromID", "From",
choices = c("1990", "1991", "1992", "1993", "1994", "1995", "1996",
"1997", "1998", "1999", "2000", "2001", "2002", "2003",
"2004", "2005", "2006", "2007", "2008", "2009", "2010")),
selectInput("ToID", "To",
choices = c("1990", "1991", "1992", "1993", "1994", "1995", "1996",
"1997", "1998", "1999", "2000", "2001", "2002", "2003",
"2004", "2005", "2006", "2007", "2008", "2009", "2010")),
selectInput("VariableID", "Parameter on Y-axis",
choices = c("timesignature_confidence", "loudness", "tempo", "tempo_confidence", "key",
"key_confidence", "energy", "pitch", "timbre_0_min", "timbre_0_max",
"timbre_1_min", "timbre_1_max", "timbre_2_min", "timbre_2_max", "timbre_3_min",
"timbre_3_max", "timbre_4_min", "timbre_4_max", "timbre_5_min", "timbre_5_max",
"timbre_6_min", "timbre_6_max", "timbre_7_min", "timbre_7_max",
"timbre_8_min", "timbre_8_max", "timbre_9_min", "timbre_9_max", "timbre_10_min",
"timbre_10_max", "timbre_11_min", "timbre_11_max", "Top10")),
selectInput("artistInput", "artistname",
choices = c( "A Day to Remember",
"Adam Lambert",
"Analog Rebellion",
"Artists For Haiti" ,
"B.o.B",
"Britney Spears",
"Butch Walker",
"David Guetta",
"Dimmu Borgir",
"DragonForce",
"Drake" ,
"Drowning Pool",
"Eels",
"Emily Osment",
"Eminem",
"Enrique Iglesias" ,
"Far*East Movement",
"Fear Factory",
"Flo Rida" ,
"Glee Cast",
"Iyaz",
"Jason Derulo" ,
"Jay Sean",
"Jay-Z + Alicia Keys",
"Jay-Z + Mr. Hudson",
"Justin Bieber",
"Kashmir" ,
"Kate Nash",
"Katy Perry" ,
"Ke$ha",
"Kevin Rudolf" ,
"La Roux" ,
"Lady Antebellum",
"Lady Gaga",
"Lifehouse" ,
"Lil Scrappy",
"Lil Wayne",
"Ludacris" ,
"Manafest" ,
"Mike Posner" ,
"Miley Cyrus" ,
"Nadine" ,
"October Tide" ,
"Outlawz" ,
"Overkill" ,
"Owl City" ,
"Reba" ,
"Rihanna" ,
"Sara Bareilles" ,
"Shearwater" ,
"Shout Out Louds" ,
"Super Junior" ,
"Superchunk" ,
"Taio Cruz" ,
"Taylor Swift" ,
"The Bird and the Bee" ,
"The Black Eyed Peas" ,
"The Magnetic Fields" ,
"Travie McCoy" ,
"Trey Songz" ,
"Usher" ,
"Various Artists" ,
"Young Money" ,
"Athlete" ,
"Band of Skulls" ,
"Beyonce" ,
"Carrie Underwood" ,
"Cheryl Cole" ,
"Ciara" ,
"Cobra Starship" ,
"Coldplay" ,
"Collective Soul" ,
"Creed" ,
"Crooked X" ,
"Family Force 5" ,
"Flight of the Conchords" ,
"Hannah Montana" ,
"Hit the Lights" ,
"Ingrid Michaelson" ,
"Jamie Foxx" ,
"Jason DeRulo" ,
"Jason Mraz" ,
"Jay-Z" ,
"Jeremih" ,
"Jordin Sparks" ,
"Julien-K" ,
"Kanye West" ,
"Kelly Clarkson" ,
"Keri Hilson" ,
"Kid Cudi" ,
"Kings Of Leon" ,
"Kris Allen" ,
"Lady GaGa" ,
"Linkin Park" ,
"Manchester Orchestra" ,
"Mariah Carey" ,
"Merzbow" ,
"Mike Jones" ,
"MxPx" ,
"Nek" ,
"Newton Faulkner" ,
"Noah and the Whale" ,
"Pilot Speed" ,
"Pitbull" ,
"Powerman 5000" ,
"Rancid" ,
"Red Light Company" ,
"Saosin" ,
"Seabird" ,
"Sean Kingston" ,
"Shinedown" ,
"Silverstein" ,
"Slaughterhouse" ,
"Soulja Boy Tell 'em" ,
"Sugar Ray" ,
"T.I." ,
"The All-American Rejects" ,
"The Audition" ,
"The Fray" ,
"The Juan MacLean" ,
"Third Eye Blind" ,
"Umphrey's McGee" ,
"Various artists" ,
"VOTA" ,
"Whitney Houston" ,
"Woods" ,
"Yngwie Malmsteen" ,
"Akon" ,
"Alicia Keys" ,
"Anastacia" ,
"Andrea Bocelli" ,
"Black Kids" ,
"Brooks & Dunn" ,
"Buckcherry" ,
"Chris Brown" ,
"Christina Aguilera" ,
"Colbie Caillat" ,
"Danity Kane" ,
"David Archuleta" ,
"David Cook" ,
"Demi Lovato & Joe Jonas" ,
"Disfear" ,
"Dr. Dog" ,
"eMC" ,
"Estelle" ,
"Europe" ,
"Fergie" ,
"Finger Eleven" ,
"Fireflight" ,
"Flogging Molly" ,
"Haste the Day" ,
"Jay-Z & T.I." ,
"Jesse McCartney" ,
"John Mellencamp" ,
"Jonas Brothers" ,
"Joseph Williams" ,
"Kalmah" ,
"Kardinal Offishall" ,
"Ken Block" ,
"Lenny Kravitz" ,
"Leona Lewis" ,
"Lil Mama" ,
"Lupe Fiasco" ,
"M.I.A." ,
"Madonna" ,
"Metro Station" ,
"Ministry and Co-Conspirators" ,
"Nada Surf" ,
"Natasha Bedingfield" ,
"Ne-Yo" ,
"Nickelback" ,
"Pink" ,
"Plants and Animals" ,
"Plies" ,
"Ray J & Yung Berg" ,
"Richard Marx" ,
"Shwayze" ,
"Snoop Dogg" ,
"Soulja Boy" ,
"Strapping Young Lad" ,
"T-Pain" ,
"Taproot" ,
"Thao with the Get Down Stay Down" ,
"The Game" ,
"The Pete Best Band" ,
"The Pussycat Dolls" ,
"The Smashing Pumpkins" ,
"This Is Hell" ,
"Thursday" ,
"Timbaland" ,
"Trapt" ,
"Webbie" ,
"Yael Naim" ,
"50 Cent" ,
"Against Me!" ,
"Aly & AJ" ,
"Amy Winehouse" ,
"Another Animal" ,
"Aqueduct" ,
"August Burns Red" ,
"Avenged Sevenfold" ,
"Avril Lavigne" ,
"Baby Bash" ,
"Beyonce & Shakira" ,
"Blaqk Audio" ,
"Bone Thugs-N-Harmony" ,
"Bone Thugs-n-Harmony" ,
"Bow Wow" ,
"Bright Eyes" ,
"Carmen Rasmusen" ,
"Charlotte Hatherley" ,
"Chrisette Michele" ,
"Crime Mob" ,
"Dan Deacon" ,
"Dashboard Confessional" ,
"Daughtry" ,
"Deerhoof" ,
"Diddy" ,
"Dixie Chicks" ,
"Dying Fetus" ,
"Epica" ,
"Fabolous" ,
"Gorilla Zoe" ,
"Gwen Stefani" ,
"Gym Class Heroes" ,
"Hinder" ,
"Hurricane Chris" ,
"J. Holiday" ,
"Jason Aldean" ,
"Jennifer Lopez" ,
"Jim Jones" ,
"Justin Timberlake" ,
"Kenna" ,
"Keyshia Cole" ,
"Les Savy Fav" ,
"Lloyd" ,
"Maroon 5" ,
"McFly" ,
"Menomena" ,
"Mims" ,
"My Chemical Romance" ,
"Nelly Furtado" ,
"No Angels" ,
"Of Montreal" ,
"Oysterband" ,
"Plain White Ts" ,
"Poison" ,
"Poison the Well" ,
"Puscifer" ,
"Relient K" ,
"Rich Boy" ,
"Rihanna & Sean Paul" ,
"Scorpions" ,
"Shop Boyz" ,
"Sick Puppies" ,
"Silverchair" ,
"So They Say" ,
"Soilwork" ,
"Submersed" ,
"The Cliks" ,
"The Cult" ,
"The Flower Kings" ,
"The Operation M.D." ,
"The Police" ,
"The Wildhearts" ,
"Tori Amos" ,
"Unk" ,
"Visions of Atlantis" ,
"will.i.am" ,
"Wooden Stars" ,
"X-Clan" ,
"Ace Frehley" ,
"Angels & Airwaves" ,
"Boards of Canada" ,
"Brian McKnight" ,
"Bubba Sparxxx" ,
"Built to Spill" ,
"Cascada" ,
"Cassie" ,
"Cat Power" ,
"Cellador" ,
"Chamillionaire" ,
"Chingy" ,
"Comets on Fire" ,
"D4L" ,
"Daniel Powter" ,
"Dave Burrell" ,
"DMC" ,
"Donavon Frankenreiter" ,
"Duels" ,
"Evanescence" ,
"Field Mob" ,
"G. Love" ,
"Gnarls Barkley" ,
"Gossip" ,
"House of Lords" ,
"India.Arie" ,
"Isobel Campbell" ,
"James Blunt" ,
"Jessi Colter" ,
"Jibbs" ,
"Joe Satriani" ,
"John Mayer" ,
"JoJo" ,
"Juelz Santana" ,
"LeAnn Rimes" ,
"Lil Jon" ,
"LL Cool J" ,
"Los Lonely Boys" ,
"Lyfe Jennings" ,
"Mary J. Blige" ,
"Miss Kittin" ,
"Nelly" ,
"Nick Lachey" ,
"NOFX" ,
"Panic! At The Disco" ,
"Paul Stanley" ,
"Rascal Flatts" ,
"Red Hot Chili Peppers" ,
"Sean Paul" ,
"Shakira" ,
"Snow Patrol" ,
"Sparks" ,
"Styles P" ,
"Taylor Hicks" ,
"The Blow" ,
"The Red Jumpsuit Apparatus" ,
"The Residents" ,
"The Zutons" ,
"Therapy?" ,
"Valencia" ,
"Weird Al Yankovic" ,
"Willie Nile" ,
"Young Dro" ,
"Yung Joc" ,
"Aerosmith" ,
"American Hi-Fi" ,
"Amerie" ,
"Blockhead" ,
"Bo Bice" ,
"Bobby Valentino" ,
"Bruce Dickinson" ,
"Caribou" ,
"D.H.T." ,
"David Banner" ,
"Destinys Child" ,
"Erasure" ,
"Exodus" ,
"Fat Joe" ,
"Fort Minor" ,
"Frankie J" ,
"Gavin DeGraw" ,
"Green Day" ,
"Imogen Heap" ,
"Ja Rule" ,
"John Lennon" ,
"Kaiser Chiefs" ,
"Lagwagon" ,
"Life of Agony" ,
"Lil Jon & The East Side Boyz" ,
"Limp Bizkit" ,
"Lindsay Lohan" ,
"Liz Phair" ,
"Mario" ,
"Missy Elliott" ,
"Pretty Ricky" ,
"Reggie and the Full Effect" ,
"Rev. Run" ,
"Rob Thomas" ,
"Roger Miret and the Disasters" ,
"Sarah Hudson" ,
"Spin Doctors" ,
"The New Pornographers" ,
"The Rolling Stones" ,
"Tom Vek" ,
"Transplants" ,
"Usher And Alicia Keys" ,
"Utada" ,
"Violent Femmes" ,
"Weezer" ,
"Wilco" ,
"Will Smith" ,
"Young Jeezy" ,
"Ashlee Simpson" ,
"Autolux" ,
"Avoid One Thing" ,
"Beenie Man" ,
"Ben Harper" ,
"Ben Kweller" ,
"Bob Dylan" ,
"Brad Mehldau" ,
"Calexico" ,
"Candy Butchers" ,
"Cassidy" ,
"Christina Milian" ,
"Clay Aiken" ,
"D12" ,
"Dave Matthews Band" ,
"Dead Kennedys" ,
"Dogs Die in Hot Cars" ,
"Fantasia" ,
"George Harrison" ,
"Hoobastank" ,
"In Flames" ,
"J-Kwon" ,
"Jagged Edge" ,
"Jimmy Eat World" ,
"Juvenile" ,
"Kelis" ,
"Kevin Lyttle" ,
"Kidz Bop Kids" ,
"Le Tigre" ,
"Lil Flip" ,
"Lloyd Banks" ,
"Lostprophets" ,
"Mad Caddies" ,
"Manic Street Preachers" ,
"Mario Winans" ,
"Marissa Nadler" ,
"Nina Sky" ,
"No Doubt" ,
"OutKast" ,
"Petey Pablo" ,
"Rogue Wave" ,
"Ron Sexsmith" ,
"Ruben Studdard" ,
"Rufus Wainwright" ,
"Rush" ,
"Saliva" ,
"Sarah Harmer" ,
"Steve Earle" ,
"Terror Squad" ,
"The Carpenters" ,
"The Hold Steady" ,
"The Killers" ,
"The Vines" ,
"Trans-Siberian Orchestra" ,
"Trick Daddy" ,
"Twista" ,
"U2" ,
"Usher & Alicia Keys" ,
"Ying Yang Twins" ,
"Zero 7" ,
"A Static Lullaby" ,
"American Idol Finalists" ,
"Anthrax" ,
"Ashanti" ,
"Ben Folds" ,
"Bettie Serveert" ,
"Black Eyed Peas" ,
"Boo-Yaa T.R.I.B.E." ,
"Busta Rhymes & Mariah Carey" ,
"Carbon Leaf" ,
"Cex" ,
"Craig Morgan" ,
"Cult of Luna" ,
"Daft Punk" ,
"Eagles" ,
"Eisley" ,
"Erykah Badu" ,
"Ginuwine" ,
"Guster" ,
"Harry Connick" ,
"Kid Rock" ,
"Killah Priest" ,
"Kurt Elling" ,
"Lene Marlin" ,
"Lil Kim" ,
"Mark Owen" ,
"matchbox twenty" ,
"Meat Loaf" ,
"Melanie C" ,
"Michael Bubl_" ,
"Monica" ,
"Neal Morse" ,
"Nivea" ,
"O.A.R." ,
"Pharrell" ,
"Prefuse 73" ,
"R. Kelly" ,
"Rob Zombie" ,
"Ryan Malcolm" ,
"Santana" ,
"Spiritualized" ,
"The Jayhawks" ,
"Triumph" ,
"Tyrese" ,
"Uncle Kracker" ,
"Wellwater Conspiracy" ,
"Year of the Rabbit" ,
"YoungBloodZ" ,
"Aaliyah" ,
"Ace Troubleshooter" ,
"Aimee Mann" ,
"Angie Martinez" ,
"Ash" ,
"Audioslave" ,
"Black Sabbath" ,
"Brandy" ,
"Buckethead" ,
"Camron" ,
"Chad Kroeger" ,
"Chris Isaak" ,
"Craig David" ,
"Crazy Town" ,
"Daniel Bedingfield" ,
"Dave Davies" ,
"DJ Sammy & Yanou" ,
"Dream Theater" ,
"Entombed" ,
"Eve" , "Faith Hill" ,
"Hilary Duff" ,
"Jaheim" ,
"King Crimson" ,
"Kylie Minogue" ,
"Leonard Cohen" ,
"Lionel Richie" ,
"Lock Up" ,
"Michelle Branch" ,
"N.O.R.E." ,
"Napalm Death" ,
"P. Diddy" ,
"P. Diddy & Ginuwine" ,
"Phantom Planet" ,
"Puddle Of Mudd" ,
"Sam Roberts" ,
"Soulfly" ,
"Styles" ,
"The Calling" ,
"The Chemical Brothers" ,
"The Exploited" ,
"The Polyphonic Spree" ,
"The Streets" ,
"The Tragically Hip" ,
"They Might Be Giants" ,
"Treble Charger" ,
"Truth Hurts" ,
"Tweet" ,
"Vanessa Carlton" ,
"Voodoo Glow Skulls" ,
"Agnostic Front" ,
"Blu Cantrell" ,
"Buddy Guy" ,
"Case" ,
"City High" ,
"David Byrne" ,
"Daz Dillinger" ,
"Debelah Morgan" ,
"Dido" ,
"Dream" ,
"Edens Crush" ,
"Embrace" ,
"Enya" ,
"Evergrey" ,
"Incubus" ,
"Janet" ,
"Kool and the Gang" ,
"Kurupt" ,
"Lil Romeo" ,
"Mercury Rev" ,
"Michael Jackson" ,
"Moloko" ,
"Mystic" ,
"O-Town" ,
"Ocean Colour Scene" ,
"Ryan Adams" ,
"S Club 7" ,
"Shaggy" ,
"Staind" ,
"Stone Temple Pilots" ,
"t.A.T.u." ,
"Tamia" ,
"The Ex" ,
"Train" ,
"Trick Pony" ,
"Witchery" ,
"98 Degrees" ,
"Arch Enemy" ,
"Backstreet Boys" ,
"Baha Men" ,
"Blaque" ,
"Blink-182" ,
"Catch 22" ,
"Chris Rea" ,
"Chumbawamba" ,
"David Coverdale" ,
"Deftones" ,
"Donell Jones" ,
"Eiffel 65" ,
"Elastica" ,
"Five" ,
"Hanson" ,
"Jessica Simpson" ,
"Joe" ,
"Kenny G" ,
"Lonestar" ,
"Macy Gray" ,
"Marc Anthony" ,
"Melvins" ,
"Montell Jordan" ,
"Next" ,
"Nine Days" ,
"Old Mans Child" ,
"Poe" ,
"Rickie Lee Jones" ,
"Ruff Endz" ,
"Samantha Mumba" ,
"Savage Garden" ,
"Sisqo" ,
"Sonique" ,
"soulDecision" ,
"The Gathering" ,
"The Presidents of the United States of America" ,
"Vertical Horizon" ,
"Wyclef Jean" ,
"Yes" ,
"Alex Lloyd" ,
"Big Sugar" ,
"Bis" ,
"Black Label Society" ,
"Blur" ,
"Case " ,
"Cher" ,
"Chevelle" ,
"Divine" ,
"Dokken" ,
"Dr. Dooom" ,
"Eagle-Eye Cherry" ,
"Edguy" ,
"Faith Evans" ,
"Goldfinger" ,
"Guns N' Roses" ,
"Handsome Boy Modeling School" ,
"Hypocrisy" ,
"Jeff Beck" ,
"Jesse Powell" ,
"Jewel" ,
"Joey McIntyre" ,
"Jordan Knight" ,
"JT Money" ,
"Kevon Edmonds" ,
"KMFDM" ,
"Lauryn Hill" ,
"Led Zeppelin" ,
"Len" ,
"Lou Bega" ,
"Maxwell" ,
"Naughty By Nature" ,
"P.O.D." ,
"Paul McCartney" ,
"Pearl Jam" ,
"Phish" ,
"Ricky Martin" ,
"Sarah McLachlan" ,
"Shania Twain" ,
"Shawn Mullins" ,
"Silkk the Shocker" ,
"Sinergy" ,
"Sixpence None The Richer" ,
"Smash Mouth" ,
"Smog" ,
"Static-X" ,
"Texas" ,
"Tim McGraw" ,
"TLC" ,
"Total" ,
"Various" ,
"Vice Squad" ,
"Ace of Base" ,
"Air" ,
"Alkaline Trio" ,
"All Saints" ,
"Ani DiFranco" ,
"Bane" ,
"Barenaked Ladies" ,
"Bizzy Bone" ,
"Boyz II Men" ,
"Brandy " ,
"Brandy & Monica" ,
"Bruce Hornsby" ,
"Busta Rhymes" ,
"B_la Fleck and the Flecktones" ,
"Celine Dion" ,
"Deana Carter" ,
"Deborah Cox" ,
"Deep Purple" ,
"Dru Hill" ,
"Edwin McCain" ,
"Eric Clapton" ,
"Fuel" ,
"Fun Lovin Criminals" ,
"Goo Goo Dolls" ,
"Hole" ,
"Inoj" ,
"Jennifer Paige" ,
"Jerry Cantrell" ,
"K-Ci " ,
"K-Ci & JoJo" ,
"K.P. & Envyi" ,
"Kristin Hersh" ,
"Lord Tariq & Peter Gunz" ,
"LSG" ,
"Marcy Playground" ,
"Mase" ,
"Monifah" ,
"Montell Jordan Feat. Master P & Silkk The Shocker",
"Nicole" ,
"Plastilina Mosh" ,
"Public Announcement" ,
"Queen Latifah" ,
"Robyn" ,
"RZA" ,
"Spice Girls" ,
"Sylk-E. Fyne" ,
"Tatyana Ali" ,
"The Bouncing Souls" ,
"Uncle Sam" ,
"Van Halen" ,
"Voices Of Theory" ,
"Willie Nelson" ,
"Xscape" ,
"Zebrahead" ,
"Allure" ,
"Anal Cunt" ,
"Aqua" ,
"Az Yet" ,
"Babyface" ,
"BLACKstreet (" ,
"Blonde Redhead" ,
"Blues Traveler" ,
"Changing Faces" ,
"Cornershop" ,
"Foxy Brown" ,
"Great Big Sea" ,
"Keith Sweat" ,
"Mark Morrison" ,
"Meredith Brooks" ,
"Merril Bainbridge" ,
"Millencolin" ,
"No Use for a Name" ,
"Old 97's" ,
"Pat Benatar" ,
"Paula Cole" ,
"Puff Daddy & Faith Evans" ,
"Reel Big Fish" ,
"Ric Ocasek" ,
"Robert Miles" ,
"Rome" ,
"Sheryl Crow" ,
"Simple Minds" ,
"Somethin' For The People" ,
"The Apples in Stereo" ,
"The Beach Boys" ,
"The Jam" ,
"The Notorious B.I.G." ,
"The Verve Pipe" ,
"Third Day" ,
"Toni Braxton" ,
"Billy Bragg" ,
"Butthole Surfers" ,
"Catherine Wheel" ,
"D'Angelo" ,
"Dark Tranquillity" ,
"Deep Blue Something" ,
"Donna Lewis" ,
"Everything But The Girl" ,
"George Michael" ,
"Jennifer Love Hewitt" ,
"La Bouche" ,
"Mariah Carey & Boyz II Men" ,
"Metallica" ,
"New Edition" ,
"Nirvana" ,
"No Mercy" ,
"Oasis" ,
"Prince" ,
"Quad City DJ's" ,
"Shawn Colvin" ,
"SWV" ,
"The Beatles" ,
"The Tony Rich Project" ,
"Tracy Chapman" ,
"Travis Tritt" ,
"Warrant" ,
"Adina Howard" ,
"Alanis Morissette" ,
"All-4-One" ,
"Atari Teenage Riot" ,
"BLACKstreet" ,
"Blessid Union Of Souls" ,
"Bon Jovi" ,
"Brownstone" ,
"Bryan Adams" ,
"Carly Simon" ,
"Deep Forest" ,
"Del Amitri" ,
"Dionne Farris" ,
"Dishwalla" ,
"Energy Orchard" ,
"Everclear" ,
"Groove Theory" ,
"Hootie & The Blowfish" ,
"Janet Jackson" ,
"Joan Osborne" ,
"Jon B." ,
"Luniz" ,
"Melissa Etheridge" ,
"Method Man" ,
"MoKenStef" ,
"Natalie Merchant" ,
"Nicki French" ,
"Real McCoy" ,
"Sophie B. Hawkins" ,
"Soul For Real" ,
"Swans" ,
"Take That" ,
"Teenage Fanclub" ,
"The Innocence Mission" ,
"The Tea Party" ,
"Ace Of Base" ,
"Barry Manilow" ,
"Beck" ,
"Boston" ,
"Boz Scaggs" ,
"Cake" ,
"Coolio" ,
"Craig Mack" ,
"Crash Test Dummies" ,
"Da Brat" ,
"Des'ree" ,
"Domino" ,
"DRS" ,
"Enigma" ,
"Immature" ,
"Jon Secada" ,
"King's X" ,
"Luther Vandross " ,
"Mayhem" ,
"Michael Bolton" ,
"Sade" ,
"Salt-N-Pepa" ,
"Sugar" ,
"Tag Team" ,
"Talisman" ,
"Tevin Campbell" ,
"The Cranberries" ,
"The Sea and Cake" ,
"Warren G" ,
"Arrested Development" ,
"Billy Joel" ,
"Bobby Brown" ,
"Bruce Springsteen" ,
"Dr. Dre" ,
"Duran Duran" ,
"Firehose" ,
"Gabrielle" ,
"George Strait" ,
"H-Town" ,
"Haddaway" ,
"Heart" ,
"Heavy D & the Boyz" ,
"Infectious Grooves" ,
"Inner Circle" ,
"James" ,
"Jodeci" ,
"Morphine" ,
"Oleta Adams" ,
"Onyx" ,
"Orchestral Manoeuvres in the Dark" ,
"P.M. Dawn" ,
"Paperboy" ,
"Pennywise" ,
"Pete Townshend" ,
"Prince And The New Power Generation" ,
"Robin S." ,
"Rod Stewart" ,
"Shai" ,
"Silk" ,
"Snap" ,
"Snow" ,
"Soul Asylum" ,
"The Cure" ,
"The Roots" ,
"Toby Keith" ,
"Tony! Toni! Ton_!" ,
"various artists" ,
"Wreckx-N-Effect" ,
"Zhane" ,
"Amy Grant" ,
"Atlantic Starr" ,
"Billy Ray Cyrus" ,
"CeCe Peniston" ,
"Color Me Badd" ,
"Elton John" ,
"En Vogue" ,
"Firehouse" ,
"Genesis" ,
"House Of Pain" ,
"Iced Earth" ,
"Joe Public" ,
"Kris Kross" ,
"Marky Mark " ,
"Michelle Shocked" ,
"Mint Condition" ,
"Mr. Big" ,
"Neil Young" ,
"Paula Abdul" ,
"Pavement" ,
"Queen" ,
"Roy Orbison" ,
"Selena" ,
"Shanice" ,
"Sir Mix-A-Lot" ,
"Sister Souljah" ,
"Suicidal Tendencies" ,
"Technotronic" ,
"The Cover Girls" ,
"The Dead Milkmen" ,
"The Heights" ,
"The Lemonheads" ,
"The Mighty Mighty Bosstones" ,
"Tom Cochrane" ,
"Ugly Kid Joe" ,
"Vanessa Williams" ,
"Aaron Neville" ,
"Another Bad Creation" ,
"Babes in Toyland" ,
"Bette Midler" ,
"Black Box" ,
"Bonnie Raitt" ,
"C+C Music Factory" ,
"Cathedral" ,
"Cathy Dennis" ,
"Chesney Hawkes" ,
"Corina" ,
"Curtis Stigers" ,
"Damn Yankees" ,
"Divinyls" ,
"EMF" ,
"Enuff Znuff" ,
"Gerardo" ,
"Gloria Estefan" ,
"Hi-Five" ,
"Huey Lewis and the News" ,
"INXS" ,
"Jesus Jones" ,
"Karyn White" ,
"Londonbeat" ,
"Luis Miguel" ,
"Luther Vandross" ,
"Martika" ,
"Michael W. Smith" ,
"Monster Magnet" ,
"Natural Selection" ,
"Nelson" ,
"Nia Peeples" ,
"Queensryche" ,
"R.E.M." ,
"Ralph Tresvant" ,
"Rick Astley" ,
"Roxette" ,
"Seal" ,
"Spirit of the West" ,
"Stevie B" ,
"Sting" ,
"Styx" ,
"Surface" ,
"Tara Kemp" ,
"Tesla" ,
"The KLF" ,
"Throwing Muses" ,
"Timmy T." ,
"Tom Petty and the Heartbreakers" ,
"Tracie Spencer" ,
"UB40" ,
"Vanilla Ice" ,
"Wilson Phillips" ,
"AC/DC" ,
"After 7" ,
"Al B. Sure!" ,
"Alannah Myles" ,
"Alias" ,
"Bad English" ,
"Bell Biv Devoe" ,
"Biz Markie" ,
"Calloway" ,
"Candyman" ,
"Cannibal Corpse" ,
"Chicago" ,
"Death" ,
"Death Angel" ,
"Deee-Lite" ,
"Depeche Mode" ,
"Dino" ,
"Expose" ,
"Extreme" ,
"Faith No More" ,
"Ian Gillan" ,
"Jody Watley" ,
"Johnny Gill" ,
"Judas Priest" ,
"Kiss" ,
"Linear" ,
"Lisa Stansfield" ,
"Los Lobos" ,
"Lou Gramm" ,
"Mark Lanegan" ,
"Maxi Priest" ,
"Milli Vanilli" ,
"Motley Crue" ,
"New Kids On The Block" ,
"Paul Young" ,
"Pebbles" ,
"Phil Collins" ,
"Ramones" ,
"Seduction" ,
"Skid Row" ,
"Skinny Puppy" ,
"Soul II Soul" ,
"Sweet Sensation" ,
"Taylor Dayne" ,
"The B-52 s" ,
"The Las" ,
"The Mission" ,
"The Stranglers" ,
"The Time" ,
"Tom Petty" ,
"Tony Toni Tone",
"Tyler Collins",
"XTC",
"Y&T"),
selected = "Y&T")
),
mainPanel(
plotOutput("coolplot"),
br(), br(),
tableOutput("results")
)
)
)
library(ggplot2)
library(dplyr)
server <- function(input, output) {
filtered <- reactive({
songs %>%
filter(year >= input$FromID,
year <= input$ToID,
artistname == input$artistInput
#var = input$VariableID
)
})
output$coolplot <- renderPlot({
ggplot(filtered(), aes(year, energy)) +
geom_point()
})
output$results <- renderTable({
filtered()
})
}
shinyApp(ui = ui, server = server)
|
/app.R
|
no_license
|
vrsh/ShinyApp2
|
R
| false
| false
| 91,189
|
r
|
library(shiny)
library(ggplot2)
library(dplyr)
songs <- read.csv("songs.csv")
ui <- fluidPage(
titlePanel("Songs dataset", windowTitle = "Songs dataset"),
sidebarLayout(
sidebarPanel(
selectInput("FromID", "From",
choices = c("1990", "1991", "1992", "1993", "1994", "1995", "1996",
"1997", "1998", "1999", "2000", "2001", "2002", "2003",
"2004", "2005", "2006", "2007", "2008", "2009", "2010")),
selectInput("ToID", "To",
choices = c("1990", "1991", "1992", "1993", "1994", "1995", "1996",
"1997", "1998", "1999", "2000", "2001", "2002", "2003",
"2004", "2005", "2006", "2007", "2008", "2009", "2010")),
selectInput("VariableID", "Parameter on Y-axis",
choices = c("timesignature_confidence", "loudness", "tempo", "tempo_confidence", "key",
"key_confidence", "energy", "pitch", "timbre_0_min", "timbre_0_max",
"timbre_1_min", "timbre_1_max", "timbre_2_min", "timbre_2_max", "timbre_3_min",
"timbre_3_max", "timbre_4_min", "timbre_4_max", "timbre_5_min", "timbre_5_max",
"timbre_6_min", "timbre_6_max", "timbre_7_min", "timbre_7_max",
"timbre_8_min", "timbre_8_max", "timbre_9_min", "timbre_9_max", "timbre_10_min",
"timbre_10_max", "timbre_11_min", "timbre_11_max", "Top10")),
selectInput("artistInput", "artistname",
choices = c( "A Day to Remember",
"Adam Lambert",
"Analog Rebellion",
"Artists For Haiti" ,
"B.o.B",
"Britney Spears",
"Butch Walker",
"David Guetta",
"Dimmu Borgir",
"DragonForce",
"Drake" ,
"Drowning Pool",
"Eels",
"Emily Osment",
"Eminem",
"Enrique Iglesias" ,
"Far*East Movement",
"Fear Factory",
"Flo Rida" ,
"Glee Cast",
"Iyaz",
"Jason Derulo" ,
"Jay Sean",
"Jay-Z + Alicia Keys",
"Jay-Z + Mr. Hudson",
"Justin Bieber",
"Kashmir" ,
"Kate Nash",
"Katy Perry" ,
"Ke$ha",
"Kevin Rudolf" ,
"La Roux" ,
"Lady Antebellum",
"Lady Gaga",
"Lifehouse" ,
"Lil Scrappy",
"Lil Wayne",
"Ludacris" ,
"Manafest" ,
"Mike Posner" ,
"Miley Cyrus" ,
"Nadine" ,
"October Tide" ,
"Outlawz" ,
"Overkill" ,
"Owl City" ,
"Reba" ,
"Rihanna" ,
"Sara Bareilles" ,
"Shearwater" ,
"Shout Out Louds" ,
"Super Junior" ,
"Superchunk" ,
"Taio Cruz" ,
"Taylor Swift" ,
"The Bird and the Bee" ,
"The Black Eyed Peas" ,
"The Magnetic Fields" ,
"Travie McCoy" ,
"Trey Songz" ,
"Usher" ,
"Various Artists" ,
"Young Money" ,
"Athlete" ,
"Band of Skulls" ,
"Beyonce" ,
"Carrie Underwood" ,
"Cheryl Cole" ,
"Ciara" ,
"Cobra Starship" ,
"Coldplay" ,
"Collective Soul" ,
"Creed" ,
"Crooked X" ,
"Family Force 5" ,
"Flight of the Conchords" ,
"Hannah Montana" ,
"Hit the Lights" ,
"Ingrid Michaelson" ,
"Jamie Foxx" ,
"Jason DeRulo" ,
"Jason Mraz" ,
"Jay-Z" ,
"Jeremih" ,
"Jordin Sparks" ,
"Julien-K" ,
"Kanye West" ,
"Kelly Clarkson" ,
"Keri Hilson" ,
"Kid Cudi" ,
"Kings Of Leon" ,
"Kris Allen" ,
"Lady GaGa" ,
"Linkin Park" ,
"Manchester Orchestra" ,
"Mariah Carey" ,
"Merzbow" ,
"Mike Jones" ,
"MxPx" ,
"Nek" ,
"Newton Faulkner" ,
"Noah and the Whale" ,
"Pilot Speed" ,
"Pitbull" ,
"Powerman 5000" ,
"Rancid" ,
"Red Light Company" ,
"Saosin" ,
"Seabird" ,
"Sean Kingston" ,
"Shinedown" ,
"Silverstein" ,
"Slaughterhouse" ,
"Soulja Boy Tell 'em" ,
"Sugar Ray" ,
"T.I." ,
"The All-American Rejects" ,
"The Audition" ,
"The Fray" ,
"The Juan MacLean" ,
"Third Eye Blind" ,
"Umphrey's McGee" ,
"Various artists" ,
"VOTA" ,
"Whitney Houston" ,
"Woods" ,
"Yngwie Malmsteen" ,
"Akon" ,
"Alicia Keys" ,
"Anastacia" ,
"Andrea Bocelli" ,
"Black Kids" ,
"Brooks & Dunn" ,
"Buckcherry" ,
"Chris Brown" ,
"Christina Aguilera" ,
"Colbie Caillat" ,
"Danity Kane" ,
"David Archuleta" ,
"David Cook" ,
"Demi Lovato & Joe Jonas" ,
"Disfear" ,
"Dr. Dog" ,
"eMC" ,
"Estelle" ,
"Europe" ,
"Fergie" ,
"Finger Eleven" ,
"Fireflight" ,
"Flogging Molly" ,
"Haste the Day" ,
"Jay-Z & T.I." ,
"Jesse McCartney" ,
"John Mellencamp" ,
"Jonas Brothers" ,
"Joseph Williams" ,
"Kalmah" ,
"Kardinal Offishall" ,
"Ken Block" ,
"Lenny Kravitz" ,
"Leona Lewis" ,
"Lil Mama" ,
"Lupe Fiasco" ,
"M.I.A." ,
"Madonna" ,
"Metro Station" ,
"Ministry and Co-Conspirators" ,
"Nada Surf" ,
"Natasha Bedingfield" ,
"Ne-Yo" ,
"Nickelback" ,
"Pink" ,
"Plants and Animals" ,
"Plies" ,
"Ray J & Yung Berg" ,
"Richard Marx" ,
"Shwayze" ,
"Snoop Dogg" ,
"Soulja Boy" ,
"Strapping Young Lad" ,
"T-Pain" ,
"Taproot" ,
"Thao with the Get Down Stay Down" ,
"The Game" ,
"The Pete Best Band" ,
"The Pussycat Dolls" ,
"The Smashing Pumpkins" ,
"This Is Hell" ,
"Thursday" ,
"Timbaland" ,
"Trapt" ,
"Webbie" ,
"Yael Naim" ,
"50 Cent" ,
"Against Me!" ,
"Aly & AJ" ,
"Amy Winehouse" ,
"Another Animal" ,
"Aqueduct" ,
"August Burns Red" ,
"Avenged Sevenfold" ,
"Avril Lavigne" ,
"Baby Bash" ,
"Beyonce & Shakira" ,
"Blaqk Audio" ,
"Bone Thugs-N-Harmony" ,
"Bone Thugs-n-Harmony" ,
"Bow Wow" ,
"Bright Eyes" ,
"Carmen Rasmusen" ,
"Charlotte Hatherley" ,
"Chrisette Michele" ,
"Crime Mob" ,
"Dan Deacon" ,
"Dashboard Confessional" ,
"Daughtry" ,
"Deerhoof" ,
"Diddy" ,
"Dixie Chicks" ,
"Dying Fetus" ,
"Epica" ,
"Fabolous" ,
"Gorilla Zoe" ,
"Gwen Stefani" ,
"Gym Class Heroes" ,
"Hinder" ,
"Hurricane Chris" ,
"J. Holiday" ,
"Jason Aldean" ,
"Jennifer Lopez" ,
"Jim Jones" ,
"Justin Timberlake" ,
"Kenna" ,
"Keyshia Cole" ,
"Les Savy Fav" ,
"Lloyd" ,
"Maroon 5" ,
"McFly" ,
"Menomena" ,
"Mims" ,
"My Chemical Romance" ,
"Nelly Furtado" ,
"No Angels" ,
"Of Montreal" ,
"Oysterband" ,
"Plain White Ts" ,
"Poison" ,
"Poison the Well" ,
"Puscifer" ,
"Relient K" ,
"Rich Boy" ,
"Rihanna & Sean Paul" ,
"Scorpions" ,
"Shop Boyz" ,
"Sick Puppies" ,
"Silverchair" ,
"So They Say" ,
"Soilwork" ,
"Submersed" ,
"The Cliks" ,
"The Cult" ,
"The Flower Kings" ,
"The Operation M.D." ,
"The Police" ,
"The Wildhearts" ,
"Tori Amos" ,
"Unk" ,
"Visions of Atlantis" ,
"will.i.am" ,
"Wooden Stars" ,
"X-Clan" ,
"Ace Frehley" ,
"Angels & Airwaves" ,
"Boards of Canada" ,
"Brian McKnight" ,
"Bubba Sparxxx" ,
"Built to Spill" ,
"Cascada" ,
"Cassie" ,
"Cat Power" ,
"Cellador" ,
"Chamillionaire" ,
"Chingy" ,
"Comets on Fire" ,
"D4L" ,
"Daniel Powter" ,
"Dave Burrell" ,
"DMC" ,
"Donavon Frankenreiter" ,
"Duels" ,
"Evanescence" ,
"Field Mob" ,
"G. Love" ,
"Gnarls Barkley" ,
"Gossip" ,
"House of Lords" ,
"India.Arie" ,
"Isobel Campbell" ,
"James Blunt" ,
"Jessi Colter" ,
"Jibbs" ,
"Joe Satriani" ,
"John Mayer" ,
"JoJo" ,
"Juelz Santana" ,
"LeAnn Rimes" ,
"Lil Jon" ,
"LL Cool J" ,
"Los Lonely Boys" ,
"Lyfe Jennings" ,
"Mary J. Blige" ,
"Miss Kittin" ,
"Nelly" ,
"Nick Lachey" ,
"NOFX" ,
"Panic! At The Disco" ,
"Paul Stanley" ,
"Rascal Flatts" ,
"Red Hot Chili Peppers" ,
"Sean Paul" ,
"Shakira" ,
"Snow Patrol" ,
"Sparks" ,
"Styles P" ,
"Taylor Hicks" ,
"The Blow" ,
"The Red Jumpsuit Apparatus" ,
"The Residents" ,
"The Zutons" ,
"Therapy?" ,
"Valencia" ,
"Weird Al Yankovic" ,
"Willie Nile" ,
"Young Dro" ,
"Yung Joc" ,
"Aerosmith" ,
"American Hi-Fi" ,
"Amerie" ,
"Blockhead" ,
"Bo Bice" ,
"Bobby Valentino" ,
"Bruce Dickinson" ,
"Caribou" ,
"D.H.T." ,
"David Banner" ,
"Destinys Child" ,
"Erasure" ,
"Exodus" ,
"Fat Joe" ,
"Fort Minor" ,
"Frankie J" ,
"Gavin DeGraw" ,
"Green Day" ,
"Imogen Heap" ,
"Ja Rule" ,
"John Lennon" ,
"Kaiser Chiefs" ,
"Lagwagon" ,
"Life of Agony" ,
"Lil Jon & The East Side Boyz" ,
"Limp Bizkit" ,
"Lindsay Lohan" ,
"Liz Phair" ,
"Mario" ,
"Missy Elliott" ,
"Pretty Ricky" ,
"Reggie and the Full Effect" ,
"Rev. Run" ,
"Rob Thomas" ,
"Roger Miret and the Disasters" ,
"Sarah Hudson" ,
"Spin Doctors" ,
"The New Pornographers" ,
"The Rolling Stones" ,
"Tom Vek" ,
"Transplants" ,
"Usher And Alicia Keys" ,
"Utada" ,
"Violent Femmes" ,
"Weezer" ,
"Wilco" ,
"Will Smith" ,
"Young Jeezy" ,
"Ashlee Simpson" ,
"Autolux" ,
"Avoid One Thing" ,
"Beenie Man" ,
"Ben Harper" ,
"Ben Kweller" ,
"Bob Dylan" ,
"Brad Mehldau" ,
"Calexico" ,
"Candy Butchers" ,
"Cassidy" ,
"Christina Milian" ,
"Clay Aiken" ,
"D12" ,
"Dave Matthews Band" ,
"Dead Kennedys" ,
"Dogs Die in Hot Cars" ,
"Fantasia" ,
"George Harrison" ,
"Hoobastank" ,
"In Flames" ,
"J-Kwon" ,
"Jagged Edge" ,
"Jimmy Eat World" ,
"Juvenile" ,
"Kelis" ,
"Kevin Lyttle" ,
"Kidz Bop Kids" ,
"Le Tigre" ,
"Lil Flip" ,
"Lloyd Banks" ,
"Lostprophets" ,
"Mad Caddies" ,
"Manic Street Preachers" ,
"Mario Winans" ,
"Marissa Nadler" ,
"Nina Sky" ,
"No Doubt" ,
"OutKast" ,
"Petey Pablo" ,
"Rogue Wave" ,
"Ron Sexsmith" ,
"Ruben Studdard" ,
"Rufus Wainwright" ,
"Rush" ,
"Saliva" ,
"Sarah Harmer" ,
"Steve Earle" ,
"Terror Squad" ,
"The Carpenters" ,
"The Hold Steady" ,
"The Killers" ,
"The Vines" ,
"Trans-Siberian Orchestra" ,
"Trick Daddy" ,
"Twista" ,
"U2" ,
"Usher & Alicia Keys" ,
"Ying Yang Twins" ,
"Zero 7" ,
"A Static Lullaby" ,
"American Idol Finalists" ,
"Anthrax" ,
"Ashanti" ,
"Ben Folds" ,
"Bettie Serveert" ,
"Black Eyed Peas" ,
"Boo-Yaa T.R.I.B.E." ,
"Busta Rhymes & Mariah Carey" ,
"Carbon Leaf" ,
"Cex" ,
"Craig Morgan" ,
"Cult of Luna" ,
"Daft Punk" ,
"Eagles" ,
"Eisley" ,
"Erykah Badu" ,
"Ginuwine" ,
"Guster" ,
"Harry Connick" ,
"Kid Rock" ,
"Killah Priest" ,
"Kurt Elling" ,
"Lene Marlin" ,
"Lil Kim" ,
"Mark Owen" ,
"matchbox twenty" ,
"Meat Loaf" ,
"Melanie C" ,
"Michael Bubl_" ,
"Monica" ,
"Neal Morse" ,
"Nivea" ,
"O.A.R." ,
"Pharrell" ,
"Prefuse 73" ,
"R. Kelly" ,
"Rob Zombie" ,
"Ryan Malcolm" ,
"Santana" ,
"Spiritualized" ,
"The Jayhawks" ,
"Triumph" ,
"Tyrese" ,
"Uncle Kracker" ,
"Wellwater Conspiracy" ,
"Year of the Rabbit" ,
"YoungBloodZ" ,
"Aaliyah" ,
"Ace Troubleshooter" ,
"Aimee Mann" ,
"Angie Martinez" ,
"Ash" ,
"Audioslave" ,
"Black Sabbath" ,
"Brandy" ,
"Buckethead" ,
"Camron" ,
"Chad Kroeger" ,
"Chris Isaak" ,
"Craig David" ,
"Crazy Town" ,
"Daniel Bedingfield" ,
"Dave Davies" ,
"DJ Sammy & Yanou" ,
"Dream Theater" ,
"Entombed" ,
"Eve" , "Faith Hill" ,
"Hilary Duff" ,
"Jaheim" ,
"King Crimson" ,
"Kylie Minogue" ,
"Leonard Cohen" ,
"Lionel Richie" ,
"Lock Up" ,
"Michelle Branch" ,
"N.O.R.E." ,
"Napalm Death" ,
"P. Diddy" ,
"P. Diddy & Ginuwine" ,
"Phantom Planet" ,
"Puddle Of Mudd" ,
"Sam Roberts" ,
"Soulfly" ,
"Styles" ,
"The Calling" ,
"The Chemical Brothers" ,
"The Exploited" ,
"The Polyphonic Spree" ,
"The Streets" ,
"The Tragically Hip" ,
"They Might Be Giants" ,
"Treble Charger" ,
"Truth Hurts" ,
"Tweet" ,
"Vanessa Carlton" ,
"Voodoo Glow Skulls" ,
"Agnostic Front" ,
"Blu Cantrell" ,
"Buddy Guy" ,
"Case" ,
"City High" ,
"David Byrne" ,
"Daz Dillinger" ,
"Debelah Morgan" ,
"Dido" ,
"Dream" ,
"Edens Crush" ,
"Embrace" ,
"Enya" ,
"Evergrey" ,
"Incubus" ,
"Janet" ,
"Kool and the Gang" ,
"Kurupt" ,
"Lil Romeo" ,
"Mercury Rev" ,
"Michael Jackson" ,
"Moloko" ,
"Mystic" ,
"O-Town" ,
"Ocean Colour Scene" ,
"Ryan Adams" ,
"S Club 7" ,
"Shaggy" ,
"Staind" ,
"Stone Temple Pilots" ,
"t.A.T.u." ,
"Tamia" ,
"The Ex" ,
"Train" ,
"Trick Pony" ,
"Witchery" ,
"98 Degrees" ,
"Arch Enemy" ,
"Backstreet Boys" ,
"Baha Men" ,
"Blaque" ,
"Blink-182" ,
"Catch 22" ,
"Chris Rea" ,
"Chumbawamba" ,
"David Coverdale" ,
"Deftones" ,
"Donell Jones" ,
"Eiffel 65" ,
"Elastica" ,
"Five" ,
"Hanson" ,
"Jessica Simpson" ,
"Joe" ,
"Kenny G" ,
"Lonestar" ,
"Macy Gray" ,
"Marc Anthony" ,
"Melvins" ,
"Montell Jordan" ,
"Next" ,
"Nine Days" ,
"Old Mans Child" ,
"Poe" ,
"Rickie Lee Jones" ,
"Ruff Endz" ,
"Samantha Mumba" ,
"Savage Garden" ,
"Sisqo" ,
"Sonique" ,
"soulDecision" ,
"The Gathering" ,
"The Presidents of the United States of America" ,
"Vertical Horizon" ,
"Wyclef Jean" ,
"Yes" ,
"Alex Lloyd" ,
"Big Sugar" ,
"Bis" ,
"Black Label Society" ,
"Blur" ,
"Case " ,
"Cher" ,
"Chevelle" ,
"Divine" ,
"Dokken" ,
"Dr. Dooom" ,
"Eagle-Eye Cherry" ,
"Edguy" ,
"Faith Evans" ,
"Goldfinger" ,
"Guns N' Roses" ,
"Handsome Boy Modeling School" ,
"Hypocrisy" ,
"Jeff Beck" ,
"Jesse Powell" ,
"Jewel" ,
"Joey McIntyre" ,
"Jordan Knight" ,
"JT Money" ,
"Kevon Edmonds" ,
"KMFDM" ,
"Lauryn Hill" ,
"Led Zeppelin" ,
"Len" ,
"Lou Bega" ,
"Maxwell" ,
"Naughty By Nature" ,
"P.O.D." ,
"Paul McCartney" ,
"Pearl Jam" ,
"Phish" ,
"Ricky Martin" ,
"Sarah McLachlan" ,
"Shania Twain" ,
"Shawn Mullins" ,
"Silkk the Shocker" ,
"Sinergy" ,
"Sixpence None The Richer" ,
"Smash Mouth" ,
"Smog" ,
"Static-X" ,
"Texas" ,
"Tim McGraw" ,
"TLC" ,
"Total" ,
"Various" ,
"Vice Squad" ,
"Ace of Base" ,
"Air" ,
"Alkaline Trio" ,
"All Saints" ,
"Ani DiFranco" ,
"Bane" ,
"Barenaked Ladies" ,
"Bizzy Bone" ,
"Boyz II Men" ,
"Brandy " ,
"Brandy & Monica" ,
"Bruce Hornsby" ,
"Busta Rhymes" ,
"B_la Fleck and the Flecktones" ,
"Celine Dion" ,
"Deana Carter" ,
"Deborah Cox" ,
"Deep Purple" ,
"Dru Hill" ,
"Edwin McCain" ,
"Eric Clapton" ,
"Fuel" ,
"Fun Lovin Criminals" ,
"Goo Goo Dolls" ,
"Hole" ,
"Inoj" ,
"Jennifer Paige" ,
"Jerry Cantrell" ,
"K-Ci " ,
"K-Ci & JoJo" ,
"K.P. & Envyi" ,
"Kristin Hersh" ,
"Lord Tariq & Peter Gunz" ,
"LSG" ,
"Marcy Playground" ,
"Mase" ,
"Monifah" ,
"Montell Jordan Feat. Master P & Silkk The Shocker",
"Nicole" ,
"Plastilina Mosh" ,
"Public Announcement" ,
"Queen Latifah" ,
"Robyn" ,
"RZA" ,
"Spice Girls" ,
"Sylk-E. Fyne" ,
"Tatyana Ali" ,
"The Bouncing Souls" ,
"Uncle Sam" ,
"Van Halen" ,
"Voices Of Theory" ,
"Willie Nelson" ,
"Xscape" ,
"Zebrahead" ,
"Allure" ,
"Anal Cunt" ,
"Aqua" ,
"Az Yet" ,
"Babyface" ,
"BLACKstreet (" ,
"Blonde Redhead" ,
"Blues Traveler" ,
"Changing Faces" ,
"Cornershop" ,
"Foxy Brown" ,
"Great Big Sea" ,
"Keith Sweat" ,
"Mark Morrison" ,
"Meredith Brooks" ,
"Merril Bainbridge" ,
"Millencolin" ,
"No Use for a Name" ,
"Old 97's" ,
"Pat Benatar" ,
"Paula Cole" ,
"Puff Daddy & Faith Evans" ,
"Reel Big Fish" ,
"Ric Ocasek" ,
"Robert Miles" ,
"Rome" ,
"Sheryl Crow" ,
"Simple Minds" ,
"Somethin' For The People" ,
"The Apples in Stereo" ,
"The Beach Boys" ,
"The Jam" ,
"The Notorious B.I.G." ,
"The Verve Pipe" ,
"Third Day" ,
"Toni Braxton" ,
"Billy Bragg" ,
"Butthole Surfers" ,
"Catherine Wheel" ,
"D'Angelo" ,
"Dark Tranquillity" ,
"Deep Blue Something" ,
"Donna Lewis" ,
"Everything But The Girl" ,
"George Michael" ,
"Jennifer Love Hewitt" ,
"La Bouche" ,
"Mariah Carey & Boyz II Men" ,
"Metallica" ,
"New Edition" ,
"Nirvana" ,
"No Mercy" ,
"Oasis" ,
"Prince" ,
"Quad City DJ's" ,
"Shawn Colvin" ,
"SWV" ,
"The Beatles" ,
"The Tony Rich Project" ,
"Tracy Chapman" ,
"Travis Tritt" ,
"Warrant" ,
"Adina Howard" ,
"Alanis Morissette" ,
"All-4-One" ,
"Atari Teenage Riot" ,
"BLACKstreet" ,
"Blessid Union Of Souls" ,
"Bon Jovi" ,
"Brownstone" ,
"Bryan Adams" ,
"Carly Simon" ,
"Deep Forest" ,
"Del Amitri" ,
"Dionne Farris" ,
"Dishwalla" ,
"Energy Orchard" ,
"Everclear" ,
"Groove Theory" ,
"Hootie & The Blowfish" ,
"Janet Jackson" ,
"Joan Osborne" ,
"Jon B." ,
"Luniz" ,
"Melissa Etheridge" ,
"Method Man" ,
"MoKenStef" ,
"Natalie Merchant" ,
"Nicki French" ,
"Real McCoy" ,
"Sophie B. Hawkins" ,
"Soul For Real" ,
"Swans" ,
"Take That" ,
"Teenage Fanclub" ,
"The Innocence Mission" ,
"The Tea Party" ,
"Ace Of Base" ,
"Barry Manilow" ,
"Beck" ,
"Boston" ,
"Boz Scaggs" ,
"Cake" ,
"Coolio" ,
"Craig Mack" ,
"Crash Test Dummies" ,
"Da Brat" ,
"Des'ree" ,
"Domino" ,
"DRS" ,
"Enigma" ,
"Immature" ,
"Jon Secada" ,
"King's X" ,
"Luther Vandross " ,
"Mayhem" ,
"Michael Bolton" ,
"Sade" ,
"Salt-N-Pepa" ,
"Sugar" ,
"Tag Team" ,
"Talisman" ,
"Tevin Campbell" ,
"The Cranberries" ,
"The Sea and Cake" ,
"Warren G" ,
"Arrested Development" ,
"Billy Joel" ,
"Bobby Brown" ,
"Bruce Springsteen" ,
"Dr. Dre" ,
"Duran Duran" ,
"Firehose" ,
"Gabrielle" ,
"George Strait" ,
"H-Town" ,
"Haddaway" ,
"Heart" ,
"Heavy D & the Boyz" ,
"Infectious Grooves" ,
"Inner Circle" ,
"James" ,
"Jodeci" ,
"Morphine" ,
"Oleta Adams" ,
"Onyx" ,
"Orchestral Manoeuvres in the Dark" ,
"P.M. Dawn" ,
"Paperboy" ,
"Pennywise" ,
"Pete Townshend" ,
"Prince And The New Power Generation" ,
"Robin S." ,
"Rod Stewart" ,
"Shai" ,
"Silk" ,
"Snap" ,
"Snow" ,
"Soul Asylum" ,
"The Cure" ,
"The Roots" ,
"Toby Keith" ,
"Tony! Toni! Ton_!" ,
"various artists" ,
"Wreckx-N-Effect" ,
"Zhane" ,
"Amy Grant" ,
"Atlantic Starr" ,
"Billy Ray Cyrus" ,
"CeCe Peniston" ,
"Color Me Badd" ,
"Elton John" ,
"En Vogue" ,
"Firehouse" ,
"Genesis" ,
"House Of Pain" ,
"Iced Earth" ,
"Joe Public" ,
"Kris Kross" ,
"Marky Mark " ,
"Michelle Shocked" ,
"Mint Condition" ,
"Mr. Big" ,
"Neil Young" ,
"Paula Abdul" ,
"Pavement" ,
"Queen" ,
"Roy Orbison" ,
"Selena" ,
"Shanice" ,
"Sir Mix-A-Lot" ,
"Sister Souljah" ,
"Suicidal Tendencies" ,
"Technotronic" ,
"The Cover Girls" ,
"The Dead Milkmen" ,
"The Heights" ,
"The Lemonheads" ,
"The Mighty Mighty Bosstones" ,
"Tom Cochrane" ,
"Ugly Kid Joe" ,
"Vanessa Williams" ,
"Aaron Neville" ,
"Another Bad Creation" ,
"Babes in Toyland" ,
"Bette Midler" ,
"Black Box" ,
"Bonnie Raitt" ,
"C+C Music Factory" ,
"Cathedral" ,
"Cathy Dennis" ,
"Chesney Hawkes" ,
"Corina" ,
"Curtis Stigers" ,
"Damn Yankees" ,
"Divinyls" ,
"EMF" ,
"Enuff Znuff" ,
"Gerardo" ,
"Gloria Estefan" ,
"Hi-Five" ,
"Huey Lewis and the News" ,
"INXS" ,
"Jesus Jones" ,
"Karyn White" ,
"Londonbeat" ,
"Luis Miguel" ,
"Luther Vandross" ,
"Martika" ,
"Michael W. Smith" ,
"Monster Magnet" ,
"Natural Selection" ,
"Nelson" ,
"Nia Peeples" ,
"Queensryche" ,
"R.E.M." ,
"Ralph Tresvant" ,
"Rick Astley" ,
"Roxette" ,
"Seal" ,
"Spirit of the West" ,
"Stevie B" ,
"Sting" ,
"Styx" ,
"Surface" ,
"Tara Kemp" ,
"Tesla" ,
"The KLF" ,
"Throwing Muses" ,
"Timmy T." ,
"Tom Petty and the Heartbreakers" ,
"Tracie Spencer" ,
"UB40" ,
"Vanilla Ice" ,
"Wilson Phillips" ,
"AC/DC" ,
"After 7" ,
"Al B. Sure!" ,
"Alannah Myles" ,
"Alias" ,
"Bad English" ,
"Bell Biv Devoe" ,
"Biz Markie" ,
"Calloway" ,
"Candyman" ,
"Cannibal Corpse" ,
"Chicago" ,
"Death" ,
"Death Angel" ,
"Deee-Lite" ,
"Depeche Mode" ,
"Dino" ,
"Expose" ,
"Extreme" ,
"Faith No More" ,
"Ian Gillan" ,
"Jody Watley" ,
"Johnny Gill" ,
"Judas Priest" ,
"Kiss" ,
"Linear" ,
"Lisa Stansfield" ,
"Los Lobos" ,
"Lou Gramm" ,
"Mark Lanegan" ,
"Maxi Priest" ,
"Milli Vanilli" ,
"Motley Crue" ,
"New Kids On The Block" ,
"Paul Young" ,
"Pebbles" ,
"Phil Collins" ,
"Ramones" ,
"Seduction" ,
"Skid Row" ,
"Skinny Puppy" ,
"Soul II Soul" ,
"Sweet Sensation" ,
"Taylor Dayne" ,
"The B-52 s" ,
"The Las" ,
"The Mission" ,
"The Stranglers" ,
"The Time" ,
"Tom Petty" ,
"Tony Toni Tone",
"Tyler Collins",
"XTC",
"Y&T"),
selected = "Y&T")
),
mainPanel(
plotOutput("coolplot"),
br(), br(),
tableOutput("results")
)
)
)
library(ggplot2)
library(dplyr)
server <- function(input, output) {
filtered <- reactive({
songs %>%
filter(year >= input$FromID,
year <= input$ToID,
artistname == input$artistInput
#var = input$VariableID
)
})
output$coolplot <- renderPlot({
ggplot(filtered(), aes(year, energy)) +
geom_point()
})
output$results <- renderTable({
filtered()
})
}
shinyApp(ui = ui, server = server)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.