content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#!/applications/R/R-4.0.0/bin/Rscript
# author: Andy Tock
# contact: ajt200@cam.ac.uk
# date: 21.01.2021
# Calculate and plot metaprofiles CEN180 SNVs relative to CEN180 consensus
# (CEN180 windowed means and 95% confidence intervals, CIs)
# for all CEN180 sequences and randomly positioned loci
# Usage:
# /applications/R/R-4.0.0/bin/Rscript CEN180_CENgap_CENAthila_CENsoloLTR_SNVs_v_CEN180consensus_7metaprofiles.R 'Chr1,Chr2,Chr3,Chr4,Chr5' 180 2000 2000 2kb 10 10 10bp 10bp '0.02,0.96'
#chrName <- unlist(strsplit("Chr1,Chr2,Chr3,Chr4,Chr5",
# split = ","))
#bodyLength <- 180
#Athila_bodyLength <- 2000
#upstream <- 2000
#downstream <- 2000
#flankName <- "2kb"
#binSize <- 10
#Athila_binSize <- 10
#binName <- "10bp"
#Athila_binName <- "10bp"
## top left
#legendPos <- as.numeric(unlist(strsplit("0.02,0.96",
# split = ",")))
## top centre
#legendPos <- as.numeric(unlist(strsplit("0.38,0.96",
# split = ",")))
## top right
#legendPos <- as.numeric(unlist(strsplit("0.75,0.96",
# split = ",")))
## bottom left
#legendPos <- as.numeric(unlist(strsplit("0.02,0.40",
# split = ",")))
args <- commandArgs(trailingOnly = T)
chrName <- unlist(strsplit(args[1],
split = ","))
bodyLength <- as.numeric(args[2])
Athila_bodyLength <- as.numeric(args[3])
upstream <- as.numeric(args[4])
downstream <- as.numeric(args[4])
flankName <- args[5]
binSize <- as.numeric(args[6])
Athila_binSize <- as.numeric(args[7])
binName <- args[8]
Athila_binName <- args[9]
legendPos <- as.numeric(unlist(strsplit(args[10],
split = ",")))
ChIPNames <- c("all",
"SNV",
"indel",
"insertion",
"deletion",
"transition",
"transversion")
ChIPDirs <- rep("/home/ajt200/analysis/nanopore/T2T_Col/SNVs_v_CEN180consensus/CEN180profiles/matrices/",
length(ChIPNames))
ChIPNamesPlot <- c("All",
"SNVs",
"Indels",
"Insertions",
"Deletions",
"Transitions",
"Transversions")
ChIPColours <- c("navy",
"blue",
"deepskyblue",
"green2",
"darkgreen",
"deeppink",
"darkorange2")
yLabPlot <- "CEN180 consensus variants"
options(stringsAsFactors = F)
library(parallel)
library(tidyr)
library(dplyr)
library(ggplot2)
library(ggthemes)
library(grid)
library(gridExtra)
library(extrafont)
#extrafont::loadfonts()
outDir <- paste0(paste0(chrName, collapse = "_"), "/")
plotDir <- paste0(outDir, "plots/")
system(paste0("[ -d ", outDir, " ] || mkdir -p ", outDir))
system(paste0("[ -d ", plotDir, " ] || mkdir -p ", plotDir))
if(length(chrName) == 5) {
featureNamePlot <- "All CEN180"
ranLocNamePlot <- "All CENranLoc"
gapNamePlot <- "All CENgap"
AthilaNamePlot <- "All CENAthila"
soloLTRNamePlot <- "All CENsoloLTR"
} else {
featureNamePlot <- paste0(paste0(chrName, collapse = ","), " CEN180")
ranLocNamePlot <- paste0(paste0(chrName, collapse = ","), " CENranLoc")
gapNamePlot <- paste0(paste0(chrName, collapse = ","), " CENgap")
AthilaNamePlot <- paste0(paste0(chrName, collapse = ","), " CENAthila")
soloLTRNamePlot <- paste0(paste0(chrName, collapse = ","), " CENsoloLTR")
}
# Define feature start and end labels for plotting
featureStartLab <- "Start"
featureEndLab <- "End"
## ChIP
# feature
ChIP_featureMats <- mclapply(seq_along(ChIPNames), function(x) {
lapply(seq_along(chrName), function(y) {
as.matrix(read.table(paste0(ChIPDirs[x],
ChIPNames[x],
"_CEN180_consensus_variants_MappedOn_T2T_Col_around_CEN180_in_",
chrName[y], "_matrix_bin", binSize, "bp_flank", flankName, ".tab"),
header = T))[,101:318]
})
}, mc.cores = length(ChIPNames))
# If features from multiple chromosomes are to be analysed,
# concatenate the corresponding feature coverage matrices
ChIP_featureMats <- mclapply(seq_along(ChIP_featureMats), function(x) {
if(length(chrName) > 1) {
do.call(rbind, ChIP_featureMats[[x]])
} else {
ChIP_featureMats[[x]][[1]]
}
}, mc.cores = length(ChIP_featureMats))
## ChIP
# ranLoc
ChIP_ranLocMats <- mclapply(seq_along(ChIPNames), function(x) {
lapply(seq_along(chrName), function(y) {
as.matrix(read.table(paste0(ChIPDirs[x],
ChIPNames[x],
"_CEN180_consensus_variants_MappedOn_T2T_Col_around_CEN180_in_",
chrName[y], "_CENranLoc_matrix_bin", binSize, "bp_flank", flankName, ".tab"),
header = T))[,101:318]
})
}, mc.cores = length(ChIPNames))
# If ranLocs from multiple chromosomes are to be analysed,
# concatenate the corresponding ranLoc coverage matrices
ChIP_ranLocMats <- mclapply(seq_along(ChIP_ranLocMats), function(x) {
if(length(chrName) > 1) {
do.call(rbind, ChIP_ranLocMats[[x]])
} else {
ChIP_ranLocMats[[x]][[1]]
}
}, mc.cores = length(ChIP_ranLocMats))
## ChIP
# gap
ChIP_gapMats <- mclapply(seq_along(ChIPNames), function(x) {
lapply(seq_along(chrName), function(y) {
as.matrix(read.table(paste0(ChIPDirs[x],
ChIPNames[x],
"_CEN180_consensus_variants_MappedOn_T2T_Col_around_CENgap_in_",
chrName[y], "_matrix_bin", binSize, "bp_flank", flankName, ".tab"),
header = T))
})
}, mc.cores = length(ChIPNames))
# If gaps from multiple chromosomes are to be analysed,
# concatenate the corresponding gap coverage matrices
ChIP_gapMats <- mclapply(seq_along(ChIP_gapMats), function(x) {
if(length(chrName) > 1) {
do.call(rbind, ChIP_gapMats[[x]])
} else {
ChIP_gapMats[[x]][[1]]
}
}, mc.cores = length(ChIP_gapMats))
## ChIP
# Athila
ChIP_AthilaMats <- mclapply(seq_along(ChIPNames), function(x) {
lapply(seq_along(chrName), function(y) {
as.matrix(read.table(paste0(ChIPDirs[x],
ChIPNames[x],
"_CEN180_consensus_variants_MappedOn_T2T_Col_around_CENAthila_in_",
chrName[y], "_matrix_bin", binSize, "bp_flank", flankName, ".tab"),
header = T))
})
}, mc.cores = length(ChIPNames))
# If Athilas from multiple chromosomes are to be analysed,
# concatenate the corresponding Athila coverage matrices
ChIP_AthilaMats <- mclapply(seq_along(ChIP_AthilaMats), function(x) {
if(length(chrName) > 1) {
do.call(rbind, ChIP_AthilaMats[[x]])
} else {
ChIP_AthilaMats[[x]][[1]]
}
}, mc.cores = length(ChIP_AthilaMats))
## ChIP
# soloLTR
ChIP_soloLTRMats <- mclapply(seq_along(ChIPNames), function(x) {
lapply(which(chrName %in% c("Chr1", "Chr4", "Chr5")), function(y) {
as.matrix(read.table(paste0(ChIPDirs[x],
ChIPNames[x],
"_CEN180_consensus_variants_MappedOn_T2T_Col_around_CENsoloLTR_in_",
chrName[y], "_matrix_bin", binSize, "bp_flank", flankName, ".tab"),
header = T))
})
}, mc.cores = length(ChIPNames))
# If soloLTRs from multiple chromosomes are to be analysed,
# concatenate the corresponding soloLTR coverage matrices
ChIP_soloLTRMats <- mclapply(seq_along(ChIP_soloLTRMats), function(x) {
if(length(chrName) > 1) {
do.call(rbind, ChIP_soloLTRMats[[x]])
} else {
ChIP_soloLTRMats[[x]][[1]]
}
}, mc.cores = length(ChIP_soloLTRMats))
# ChIP
# Add column names
for(x in seq_along(ChIP_featureMats)) {
colnames(ChIP_featureMats[[x]]) <- c(paste0("u", 1:((upstream-1000)/binSize)),
paste0("t", (((upstream-1000)/binSize)+1):(((upstream-1000)+bodyLength)/binSize)),
paste0("d", ((((upstream-1000)+bodyLength)/binSize)+1):((((upstream-1000)+bodyLength)/binSize)+((downstream-1000)/binSize))))
colnames(ChIP_ranLocMats[[x]]) <- c(paste0("u", 1:((upstream-1000)/binSize)),
paste0("t", (((upstream-1000)/binSize)+1):(((upstream-1000)+bodyLength)/binSize)),
paste0("d", ((((upstream-1000)+bodyLength)/binSize)+1):((((upstream-1000)+bodyLength)/binSize)+((downstream-1000)/binSize))))
colnames(ChIP_gapMats[[x]]) <- c(paste0("u", 1:(upstream/Athila_binSize)),
paste0("t", ((upstream/Athila_binSize)+1):((upstream+Athila_bodyLength)/Athila_binSize)),
paste0("d", (((upstream+Athila_bodyLength)/Athila_binSize)+1):(((upstream+Athila_bodyLength)/Athila_binSize)+(downstream/Athila_binSize))))
colnames(ChIP_AthilaMats[[x]]) <- c(paste0("u", 1:(upstream/Athila_binSize)),
paste0("t", ((upstream/Athila_binSize)+1):((upstream+Athila_bodyLength)/Athila_binSize)),
paste0("d", (((upstream+Athila_bodyLength)/Athila_binSize)+1):(((upstream+Athila_bodyLength)/Athila_binSize)+(downstream/Athila_binSize))))
colnames(ChIP_soloLTRMats[[x]]) <- c(paste0("u", 1:(upstream/Athila_binSize)),
paste0("t", ((upstream/Athila_binSize)+1):((upstream+Athila_bodyLength)/Athila_binSize)),
paste0("d", (((upstream+Athila_bodyLength)/Athila_binSize)+1):(((upstream+Athila_bodyLength)/Athila_binSize)+(downstream/Athila_binSize))))
}
# Create list of lists in which each element in the enclosing list corresponds to a library
# and the two elements in the nested list correspond to coverage matrices for features and random loci
ChIP_mats <- mclapply(seq_along(ChIP_featureMats), function(x) {
list(
# features
ChIP_featureMats[[x]],
# ranLocs
ChIP_ranLocMats[[x]],
# gaps
ChIP_gapMats[[x]],
# Athilas
ChIP_AthilaMats[[x]],
# soloLTRs
ChIP_soloLTRMats[[x]]
)
}, mc.cores = length(ChIP_featureMats))
# Transpose matrix and convert into dataframe
# in which first column is window name
wideDFfeature_list_ChIP <- mclapply(seq_along(ChIP_mats), function(x) {
lapply(seq_along(ChIP_mats[[x]]), function(y) {
data.frame(window = colnames(ChIP_mats[[x]][[y]]),
t(ChIP_mats[[x]][[y]]))
})
}, mc.cores = length(ChIP_mats))
# Convert into tidy data.frame (long format)
tidyDFfeature_list_ChIP <- mclapply(seq_along(wideDFfeature_list_ChIP), function(x) {
lapply(seq_along(ChIP_mats[[x]]), function(y) {
gather(data = wideDFfeature_list_ChIP[[x]][[y]],
key = feature,
value = coverage,
-window)
})
}, mc.cores = length(wideDFfeature_list_ChIP))
# Order levels of factor "window" so that sequential levels
# correspond to sequential windows
for(x in seq_along(tidyDFfeature_list_ChIP)) {
for(y in seq_along(ChIP_mats[[x]])) {
tidyDFfeature_list_ChIP[[x]][[y]]$window <- factor(tidyDFfeature_list_ChIP[[x]][[y]]$window,
levels = as.character(wideDFfeature_list_ChIP[[x]][[y]]$window))
}
}
# Create summary data.frame in which each row corresponds to a window (Column 1),
# Column2 is the number of coverage values (features) per window,
# Column3 is the mean of coverage values per window,
# Column4 is the standard deviation of coverage values per window,
# Column5 is the standard error of the mean of coverage values per window,
# Column6 is the lower bound of the 95% confidence interval, and
# Column7 is the upper bound of the 95% confidence interval
summaryDFfeature_list_ChIP <- mclapply(seq_along(tidyDFfeature_list_ChIP), function(x) {
lapply(seq_along(ChIP_mats[[x]]), function(y) {
data.frame(window = as.character(wideDFfeature_list_ChIP[[x]][[y]]$window),
n = tapply(X = tidyDFfeature_list_ChIP[[x]][[y]]$coverage,
INDEX = tidyDFfeature_list_ChIP[[x]][[y]]$window,
FUN = length),
mean = tapply(X = tidyDFfeature_list_ChIP[[x]][[y]]$coverage,
INDEX = tidyDFfeature_list_ChIP[[x]][[y]]$window,
FUN = mean,
na.rm = TRUE),
sd = tapply(X = tidyDFfeature_list_ChIP[[x]][[y]]$coverage,
INDEX = tidyDFfeature_list_ChIP[[x]][[y]]$window,
FUN = sd,
na.rm = TRUE))
})
}, mc.cores = length(tidyDFfeature_list_ChIP))
for(x in seq_along(summaryDFfeature_list_ChIP)) {
for(y in seq_along(ChIP_mats[[x]])) {
summaryDFfeature_list_ChIP[[x]][[y]]$window <- factor(summaryDFfeature_list_ChIP[[x]][[y]]$window,
levels = as.character(wideDFfeature_list_ChIP[[x]][[y]]$window))
summaryDFfeature_list_ChIP[[x]][[y]]$winNo <- factor(1:dim(summaryDFfeature_list_ChIP[[x]][[y]])[1])
summaryDFfeature_list_ChIP[[x]][[y]]$sem <- summaryDFfeature_list_ChIP[[x]][[y]]$sd/sqrt(summaryDFfeature_list_ChIP[[x]][[y]]$n-1)
summaryDFfeature_list_ChIP[[x]][[y]]$CI_lower <- summaryDFfeature_list_ChIP[[x]][[y]]$mean -
qt(0.975, df = summaryDFfeature_list_ChIP[[x]][[y]]$n-1)*summaryDFfeature_list_ChIP[[x]][[y]]$sem
summaryDFfeature_list_ChIP[[x]][[y]]$CI_upper <- summaryDFfeature_list_ChIP[[x]][[y]]$mean +
qt(0.975, df = summaryDFfeature_list_ChIP[[x]][[y]]$n-1)*summaryDFfeature_list_ChIP[[x]][[y]]$sem
}
}
# Convert list of lists summaryDFfeature_list_ChIP into
# a list of single data.frames containing all meta-profiles for plotting
featureTmp <- lapply(seq_along(summaryDFfeature_list_ChIP), function(x) {
summaryDFfeature_list_ChIP[[x]][[1]]
})
ranLocTmp <- lapply(seq_along(summaryDFfeature_list_ChIP), function(x) {
summaryDFfeature_list_ChIP[[x]][[2]]
})
gapTmp <- lapply(seq_along(summaryDFfeature_list_ChIP), function(x) {
summaryDFfeature_list_ChIP[[x]][[3]]
})
AthilaTmp <- lapply(seq_along(summaryDFfeature_list_ChIP), function(x) {
summaryDFfeature_list_ChIP[[x]][[4]]
})
soloLTRTmp <- lapply(seq_along(summaryDFfeature_list_ChIP), function(x) {
summaryDFfeature_list_ChIP[[x]][[5]]
})
names(featureTmp) <- ChIPNamesPlot
names(ranLocTmp) <- ChIPNamesPlot
names(gapTmp) <- ChIPNamesPlot
names(AthilaTmp) <- ChIPNamesPlot
names(soloLTRTmp) <- ChIPNamesPlot
summaryDFfeature_ChIP <- list(
bind_rows(featureTmp, .id = "libName"),
bind_rows(ranLocTmp, .id = "libName"),
bind_rows(gapTmp, .id = "libName"),
bind_rows(AthilaTmp, .id = "libName"),
bind_rows(soloLTRTmp, .id = "libName")
)
for(x in seq_along(summaryDFfeature_ChIP)) {
summaryDFfeature_ChIP[[x]]$libName <- factor(summaryDFfeature_ChIP[[x]]$libName,
levels = ChIPNamesPlot)
}
# Define y-axis limits
ymin_ChIP <- min(c(summaryDFfeature_ChIP[[1]]$CI_lower,
summaryDFfeature_ChIP[[2]]$CI_lower,
summaryDFfeature_ChIP[[3]]$CI_lower,
summaryDFfeature_ChIP[[4]]$CI_lower,
summaryDFfeature_ChIP[[5]]$CI_lower))
ymax_ChIP <- max(c(summaryDFfeature_ChIP[[1]]$CI_upper,
summaryDFfeature_ChIP[[2]]$CI_upper,
summaryDFfeature_ChIP[[3]]$CI_upper,
summaryDFfeature_ChIP[[4]]$CI_upper,
summaryDFfeature_ChIP[[5]]$CI_upper))
# Define legend labels
legendLabs <- lapply(seq_along(ChIPNamesPlot), function(x) {
grobTree(textGrob(bquote(.(ChIPNamesPlot[x])),
x = legendPos[1], y = legendPos[2]-((x-1)*0.06), just = "left",
gp = gpar(col = ChIPColours[x], fontsize = 18)))
})
# Plot average profiles with 95% CI ribbon
## feature
summaryDFfeature <- summaryDFfeature_ChIP[[1]]
ggObj1_combined_ChIP <- ggplot(data = summaryDFfeature,
mapping = aes(x = winNo,
y = mean,
group = libName)
) +
geom_line(data = summaryDFfeature,
mapping = aes(colour = libName),
size = 1) +
scale_colour_manual(values = ChIPColours) +
geom_ribbon(data = summaryDFfeature,
mapping = aes(ymin = CI_lower,
ymax = CI_upper,
fill = libName),
alpha = 0.4) +
scale_fill_manual(values = ChIPColours) +
scale_y_continuous(limits = c(ymin_ChIP, ymax_ChIP),
labels = function(x) sprintf("%6.3f", x)) +
scale_x_discrete(breaks = c(1,
((upstream-1000)/binSize)+1,
(dim(summaryDFfeature_ChIP[[1]])[1]/length(ChIPNames))-((downstream-1000)/binSize),
dim(summaryDFfeature_ChIP[[1]])[1]/length(ChIPNames)),
labels = c(paste0("-", "1kb"),
featureStartLab,
featureEndLab,
paste0("+", "1kb"))) +
geom_vline(xintercept = c(((upstream-1000)/binSize)+1,
(dim(summaryDFfeature_ChIP[[1]])[1]/length(ChIPNames))-((downstream-1000)/binSize)),
linetype = "dashed",
size = 1) +
labs(x = "",
y = bquote(.(yLabPlot))) +
theme_bw() +
theme(
axis.ticks = element_line(size = 1.0, colour = "black"),
axis.ticks.length = unit(0.25, "cm"),
axis.text.x = element_text(size = 22, colour = "black"),
axis.text.y = element_text(size = 18, colour = "black", family = "Luxi Mono"),
axis.title = element_text(size = 30, colour = "black"),
legend.position = "none",
panel.grid = element_blank(),
panel.border = element_rect(size = 3.5, colour = "black"),
panel.background = element_blank(),
plot.margin = unit(c(0.3,1.2,0.0,0.3), "cm"),
plot.title = element_text(hjust = 0.5, size = 30)) +
ggtitle(bquote(.(featureNamePlot) ~ "(" * italic("n") ~ "=" ~
.(prettyNum(summaryDFfeature$n[1],
big.mark = ",", trim = T)) *
")"))
## ranLoc
summaryDFfeature <- summaryDFfeature_ChIP[[2]]
ggObj2_combined_ChIP <- ggplot(data = summaryDFfeature,
mapping = aes(x = winNo,
y = mean,
group = libName)
) +
geom_line(data = summaryDFfeature,
mapping = aes(colour = libName),
size = 1) +
scale_colour_manual(values = ChIPColours) +
geom_ribbon(data = summaryDFfeature,
mapping = aes(ymin = CI_lower,
ymax = CI_upper,
fill = libName),
alpha = 0.4) +
scale_fill_manual(values = ChIPColours) +
scale_y_continuous(limits = c(ymin_ChIP, ymax_ChIP),
labels = function(x) sprintf("%6.3f", x)) +
scale_x_discrete(breaks = c(1,
((upstream-1000)/binSize)+1,
(dim(summaryDFfeature_ChIP[[2]])[1]/length(ChIPNames))-((downstream-1000)/binSize),
dim(summaryDFfeature_ChIP[[2]])[1]/length(ChIPNames)),
labels = c(paste0("-", "1kb"),
featureStartLab,
featureEndLab,
paste0("+", "1kb"))) +
geom_vline(xintercept = c(((upstream-1000)/binSize)+1,
(dim(summaryDFfeature_ChIP[[2]])[1]/length(ChIPNames))-((downstream-1000)/binSize)),
linetype = "dashed",
size = 1) +
labs(x = "",
y = bquote(.(yLabPlot))) +
annotation_custom(legendLabs[[1]]) +
annotation_custom(legendLabs[[2]]) +
annotation_custom(legendLabs[[3]]) +
annotation_custom(legendLabs[[4]]) +
annotation_custom(legendLabs[[5]]) +
annotation_custom(legendLabs[[6]]) +
annotation_custom(legendLabs[[7]]) +
theme_bw() +
theme(
axis.ticks = element_line(size = 1.0, colour = "black"),
axis.ticks.length = unit(0.25, "cm"),
axis.text.x = element_text(size = 22, colour = "black"),
axis.text.y = element_text(size = 18, colour = "black", family = "Luxi Mono"),
axis.title = element_text(size = 30, colour = "black"),
legend.position = "none",
panel.grid = element_blank(),
panel.border = element_rect(size = 3.5, colour = "black"),
panel.background = element_blank(),
plot.margin = unit(c(0.3,1.2,0.0,0.3), "cm"),
plot.title = element_text(hjust = 0.5, size = 30)) +
ggtitle(bquote(.(ranLocNamePlot) ~ "(" * italic("n") ~ "=" ~
.(prettyNum(summaryDFfeature$n[1],
big.mark = ",", trim = T)) *
")"))
## gap
summaryDFfeature <- summaryDFfeature_ChIP[[3]]
ggObj3_combined_ChIP <- ggplot(data = summaryDFfeature,
mapping = aes(x = winNo,
y = mean,
group = libName)
) +
geom_line(data = summaryDFfeature,
mapping = aes(colour = libName),
size = 1) +
scale_colour_manual(values = ChIPColours) +
geom_ribbon(data = summaryDFfeature,
mapping = aes(ymin = CI_lower,
ymax = CI_upper,
fill = libName),
alpha = 0.4) +
scale_fill_manual(values = ChIPColours) +
scale_y_continuous(limits = c(ymin_ChIP, ymax_ChIP),
labels = function(x) sprintf("%6.3f", x)) +
scale_x_discrete(breaks = c(1,
(upstream/Athila_binSize)+1,
(dim(summaryDFfeature_ChIP[[3]])[1]/length(ChIPNames))-(downstream/Athila_binSize),
dim(summaryDFfeature_ChIP[[3]])[1]/length(ChIPNames)),
labels = c(paste0("-", flankName),
featureStartLab,
featureEndLab,
paste0("+", flankName))) +
geom_vline(xintercept = c((upstream/Athila_binSize)+1,
(dim(summaryDFfeature_ChIP[[3]])[1]/length(ChIPNames))-(downstream/Athila_binSize)),
linetype = "dashed",
size = 1) +
labs(x = "",
y = bquote(.(yLabPlot))) +
theme_bw() +
theme(
axis.ticks = element_line(size = 1.0, colour = "black"),
axis.ticks.length = unit(0.25, "cm"),
axis.text.x = element_text(size = 22, colour = "black"),
axis.text.y = element_text(size = 18, colour = "black", family = "Luxi Mono"),
axis.title = element_text(size = 30, colour = "black"),
legend.position = "none",
panel.grid = element_blank(),
panel.border = element_rect(size = 3.5, colour = "black"),
panel.background = element_blank(),
plot.margin = unit(c(0.3,1.2,0.0,0.3), "cm"),
plot.title = element_text(hjust = 0.5, size = 30)) +
ggtitle(bquote(.(gapNamePlot) ~ "(" * italic("n") ~ "=" ~
.(prettyNum(summaryDFfeature$n[1],
big.mark = ",", trim = T)) *
")"))
## Athila
summaryDFfeature <- summaryDFfeature_ChIP[[4]]
ggObj4_combined_ChIP <- ggplot(data = summaryDFfeature,
mapping = aes(x = winNo,
y = mean,
group = libName)
) +
geom_line(data = summaryDFfeature,
mapping = aes(colour = libName),
size = 1) +
scale_colour_manual(values = ChIPColours) +
geom_ribbon(data = summaryDFfeature,
mapping = aes(ymin = CI_lower,
ymax = CI_upper,
fill = libName),
alpha = 0.4) +
scale_fill_manual(values = ChIPColours) +
scale_y_continuous(limits = c(ymin_ChIP, ymax_ChIP),
labels = function(x) sprintf("%6.3f", x)) +
scale_x_discrete(breaks = c(1,
(upstream/Athila_binSize)+1,
(dim(summaryDFfeature_ChIP[[4]])[1]/length(ChIPNames))-(downstream/Athila_binSize),
dim(summaryDFfeature_ChIP[[4]])[1]/length(ChIPNames)),
labels = c(paste0("-", flankName),
featureStartLab,
featureEndLab,
paste0("+", flankName))) +
geom_vline(xintercept = c((upstream/Athila_binSize)+1,
(dim(summaryDFfeature_ChIP[[4]])[1]/length(ChIPNames))-(downstream/Athila_binSize)),
linetype = "dashed",
size = 1) +
labs(x = "",
y = bquote(.(yLabPlot))) +
theme_bw() +
theme(
axis.ticks = element_line(size = 1.0, colour = "black"),
axis.ticks.length = unit(0.25, "cm"),
axis.text.x = element_text(size = 22, colour = "black"),
axis.text.y = element_text(size = 18, colour = "black", family = "Luxi Mono"),
axis.title = element_text(size = 30, colour = "black"),
legend.position = "none",
panel.grid = element_blank(),
panel.border = element_rect(size = 3.5, colour = "black"),
panel.background = element_blank(),
plot.margin = unit(c(0.3,1.2,0.0,0.3), "cm"),
plot.title = element_text(hjust = 0.5, size = 30)) +
ggtitle(bquote(.(AthilaNamePlot) ~ "(" * italic("n") ~ "=" ~
.(prettyNum(summaryDFfeature$n[1],
big.mark = ",", trim = T)) *
")"))
## soloLTR
summaryDFfeature <- summaryDFfeature_ChIP[[5]]
ggObj5_combined_ChIP <- ggplot(data = summaryDFfeature,
mapping = aes(x = winNo,
y = mean,
group = libName)
) +
geom_line(data = summaryDFfeature,
mapping = aes(colour = libName),
size = 1) +
scale_colour_manual(values = ChIPColours) +
geom_ribbon(data = summaryDFfeature,
mapping = aes(ymin = CI_lower,
ymax = CI_upper,
fill = libName),
alpha = 0.4) +
scale_fill_manual(values = ChIPColours) +
scale_y_continuous(limits = c(ymin_ChIP, ymax_ChIP),
labels = function(x) sprintf("%6.3f", x)) +
scale_x_discrete(breaks = c(1,
(upstream/Athila_binSize)+1,
(dim(summaryDFfeature_ChIP[[5]])[1]/length(ChIPNames))-(downstream/Athila_binSize),
dim(summaryDFfeature_ChIP[[5]])[1]/length(ChIPNames)),
labels = c(paste0("-", flankName),
featureStartLab,
featureEndLab,
paste0("+", flankName))) +
geom_vline(xintercept = c((upstream/Athila_binSize)+1,
(dim(summaryDFfeature_ChIP[[5]])[1]/length(ChIPNames))-(downstream/Athila_binSize)),
linetype = "dashed",
size = 1) +
labs(x = "",
y = bquote(.(yLabPlot))) +
theme_bw() +
theme(
axis.ticks = element_line(size = 1.0, colour = "black"),
axis.ticks.length = unit(0.25, "cm"),
axis.text.x = element_text(size = 22, colour = "black"),
axis.text.y = element_text(size = 18, colour = "black", family = "Luxi Mono"),
axis.title = element_text(size = 30, colour = "black"),
legend.position = "none",
panel.grid = element_blank(),
panel.border = element_rect(size = 3.5, colour = "black"),
panel.background = element_blank(),
plot.margin = unit(c(0.3,1.2,0.0,0.3), "cm"),
plot.title = element_text(hjust = 0.5, size = 30)) +
ggtitle(bquote(.(soloLTRNamePlot) ~ "(" * italic("n") ~ "=" ~
.(prettyNum(summaryDFfeature$n[1],
big.mark = ",", trim = T)) *
")"))
ggObjGA_combined <- grid.arrange(grobs = list(
ggObj1_combined_ChIP,
ggObj2_combined_ChIP,
ggObj3_combined_ChIP,
ggObj4_combined_ChIP,
ggObj5_combined_ChIP
),
layout_matrix = cbind(
1,
2,
3,
4,
5
))
ggsave(paste0(plotDir,
"CEN180_variants_vs_CEN180_consensus_",
# paste0(ChIPNames, collapse = "_"),
"_avgProfiles_around",
"_CEN180_CENranLoc_CENgap_CENAthila_CENsoloLTR_in_T2T_Col_",
paste0(chrName, collapse = "_"), ".pdf"),
plot = ggObjGA_combined,
height = 6.5, width = 7*5, limitsize = FALSE)
| /CEN180_in_T2T_Col/CEN180_CENgap_CENAthila_CENsoloLTR_SNVs_v_CEN180consensus_7metaprofiles.R | no_license | ajtock/repeats | R | false | false | 29,478 | r | #!/applications/R/R-4.0.0/bin/Rscript
# author: Andy Tock
# contact: ajt200@cam.ac.uk
# date: 21.01.2021
# Calculate and plot metaprofiles CEN180 SNVs relative to CEN180 consensus
# (CEN180 windowed means and 95% confidence intervals, CIs)
# for all CEN180 sequences and randomly positioned loci
# Usage:
# /applications/R/R-4.0.0/bin/Rscript CEN180_CENgap_CENAthila_CENsoloLTR_SNVs_v_CEN180consensus_7metaprofiles.R 'Chr1,Chr2,Chr3,Chr4,Chr5' 180 2000 2000 2kb 10 10 10bp 10bp '0.02,0.96'
#chrName <- unlist(strsplit("Chr1,Chr2,Chr3,Chr4,Chr5",
# split = ","))
#bodyLength <- 180
#Athila_bodyLength <- 2000
#upstream <- 2000
#downstream <- 2000
#flankName <- "2kb"
#binSize <- 10
#Athila_binSize <- 10
#binName <- "10bp"
#Athila_binName <- "10bp"
## top left
#legendPos <- as.numeric(unlist(strsplit("0.02,0.96",
# split = ",")))
## top centre
#legendPos <- as.numeric(unlist(strsplit("0.38,0.96",
# split = ",")))
## top right
#legendPos <- as.numeric(unlist(strsplit("0.75,0.96",
# split = ",")))
## bottom left
#legendPos <- as.numeric(unlist(strsplit("0.02,0.40",
# split = ",")))
args <- commandArgs(trailingOnly = T)
chrName <- unlist(strsplit(args[1],
split = ","))
bodyLength <- as.numeric(args[2])
Athila_bodyLength <- as.numeric(args[3])
upstream <- as.numeric(args[4])
downstream <- as.numeric(args[4])
flankName <- args[5]
binSize <- as.numeric(args[6])
Athila_binSize <- as.numeric(args[7])
binName <- args[8]
Athila_binName <- args[9]
legendPos <- as.numeric(unlist(strsplit(args[10],
split = ",")))
ChIPNames <- c("all",
"SNV",
"indel",
"insertion",
"deletion",
"transition",
"transversion")
ChIPDirs <- rep("/home/ajt200/analysis/nanopore/T2T_Col/SNVs_v_CEN180consensus/CEN180profiles/matrices/",
length(ChIPNames))
ChIPNamesPlot <- c("All",
"SNVs",
"Indels",
"Insertions",
"Deletions",
"Transitions",
"Transversions")
ChIPColours <- c("navy",
"blue",
"deepskyblue",
"green2",
"darkgreen",
"deeppink",
"darkorange2")
yLabPlot <- "CEN180 consensus variants"
options(stringsAsFactors = F)
library(parallel)
library(tidyr)
library(dplyr)
library(ggplot2)
library(ggthemes)
library(grid)
library(gridExtra)
library(extrafont)
#extrafont::loadfonts()
outDir <- paste0(paste0(chrName, collapse = "_"), "/")
plotDir <- paste0(outDir, "plots/")
system(paste0("[ -d ", outDir, " ] || mkdir -p ", outDir))
system(paste0("[ -d ", plotDir, " ] || mkdir -p ", plotDir))
if(length(chrName) == 5) {
featureNamePlot <- "All CEN180"
ranLocNamePlot <- "All CENranLoc"
gapNamePlot <- "All CENgap"
AthilaNamePlot <- "All CENAthila"
soloLTRNamePlot <- "All CENsoloLTR"
} else {
featureNamePlot <- paste0(paste0(chrName, collapse = ","), " CEN180")
ranLocNamePlot <- paste0(paste0(chrName, collapse = ","), " CENranLoc")
gapNamePlot <- paste0(paste0(chrName, collapse = ","), " CENgap")
AthilaNamePlot <- paste0(paste0(chrName, collapse = ","), " CENAthila")
soloLTRNamePlot <- paste0(paste0(chrName, collapse = ","), " CENsoloLTR")
}
# Define feature start and end labels for plotting
featureStartLab <- "Start"
featureEndLab <- "End"
## ChIP
# feature
ChIP_featureMats <- mclapply(seq_along(ChIPNames), function(x) {
lapply(seq_along(chrName), function(y) {
as.matrix(read.table(paste0(ChIPDirs[x],
ChIPNames[x],
"_CEN180_consensus_variants_MappedOn_T2T_Col_around_CEN180_in_",
chrName[y], "_matrix_bin", binSize, "bp_flank", flankName, ".tab"),
header = T))[,101:318]
})
}, mc.cores = length(ChIPNames))
# If features from multiple chromosomes are to be analysed,
# concatenate the corresponding feature coverage matrices
ChIP_featureMats <- mclapply(seq_along(ChIP_featureMats), function(x) {
if(length(chrName) > 1) {
do.call(rbind, ChIP_featureMats[[x]])
} else {
ChIP_featureMats[[x]][[1]]
}
}, mc.cores = length(ChIP_featureMats))
## ChIP
# ranLoc
ChIP_ranLocMats <- mclapply(seq_along(ChIPNames), function(x) {
lapply(seq_along(chrName), function(y) {
as.matrix(read.table(paste0(ChIPDirs[x],
ChIPNames[x],
"_CEN180_consensus_variants_MappedOn_T2T_Col_around_CEN180_in_",
chrName[y], "_CENranLoc_matrix_bin", binSize, "bp_flank", flankName, ".tab"),
header = T))[,101:318]
})
}, mc.cores = length(ChIPNames))
# If ranLocs from multiple chromosomes are to be analysed,
# concatenate the corresponding ranLoc coverage matrices
ChIP_ranLocMats <- mclapply(seq_along(ChIP_ranLocMats), function(x) {
if(length(chrName) > 1) {
do.call(rbind, ChIP_ranLocMats[[x]])
} else {
ChIP_ranLocMats[[x]][[1]]
}
}, mc.cores = length(ChIP_ranLocMats))
## ChIP
# gap
ChIP_gapMats <- mclapply(seq_along(ChIPNames), function(x) {
lapply(seq_along(chrName), function(y) {
as.matrix(read.table(paste0(ChIPDirs[x],
ChIPNames[x],
"_CEN180_consensus_variants_MappedOn_T2T_Col_around_CENgap_in_",
chrName[y], "_matrix_bin", binSize, "bp_flank", flankName, ".tab"),
header = T))
})
}, mc.cores = length(ChIPNames))
# If gaps from multiple chromosomes are to be analysed,
# concatenate the corresponding gap coverage matrices
ChIP_gapMats <- mclapply(seq_along(ChIP_gapMats), function(x) {
if(length(chrName) > 1) {
do.call(rbind, ChIP_gapMats[[x]])
} else {
ChIP_gapMats[[x]][[1]]
}
}, mc.cores = length(ChIP_gapMats))
## ChIP
# Athila
ChIP_AthilaMats <- mclapply(seq_along(ChIPNames), function(x) {
lapply(seq_along(chrName), function(y) {
as.matrix(read.table(paste0(ChIPDirs[x],
ChIPNames[x],
"_CEN180_consensus_variants_MappedOn_T2T_Col_around_CENAthila_in_",
chrName[y], "_matrix_bin", binSize, "bp_flank", flankName, ".tab"),
header = T))
})
}, mc.cores = length(ChIPNames))
# If Athilas from multiple chromosomes are to be analysed,
# concatenate the corresponding Athila coverage matrices
ChIP_AthilaMats <- mclapply(seq_along(ChIP_AthilaMats), function(x) {
if(length(chrName) > 1) {
do.call(rbind, ChIP_AthilaMats[[x]])
} else {
ChIP_AthilaMats[[x]][[1]]
}
}, mc.cores = length(ChIP_AthilaMats))
## ChIP
# soloLTR
ChIP_soloLTRMats <- mclapply(seq_along(ChIPNames), function(x) {
lapply(which(chrName %in% c("Chr1", "Chr4", "Chr5")), function(y) {
as.matrix(read.table(paste0(ChIPDirs[x],
ChIPNames[x],
"_CEN180_consensus_variants_MappedOn_T2T_Col_around_CENsoloLTR_in_",
chrName[y], "_matrix_bin", binSize, "bp_flank", flankName, ".tab"),
header = T))
})
}, mc.cores = length(ChIPNames))
# If soloLTRs from multiple chromosomes are to be analysed,
# concatenate the corresponding soloLTR coverage matrices
ChIP_soloLTRMats <- mclapply(seq_along(ChIP_soloLTRMats), function(x) {
if(length(chrName) > 1) {
do.call(rbind, ChIP_soloLTRMats[[x]])
} else {
ChIP_soloLTRMats[[x]][[1]]
}
}, mc.cores = length(ChIP_soloLTRMats))
# ChIP
# Add column names
for(x in seq_along(ChIP_featureMats)) {
colnames(ChIP_featureMats[[x]]) <- c(paste0("u", 1:((upstream-1000)/binSize)),
paste0("t", (((upstream-1000)/binSize)+1):(((upstream-1000)+bodyLength)/binSize)),
paste0("d", ((((upstream-1000)+bodyLength)/binSize)+1):((((upstream-1000)+bodyLength)/binSize)+((downstream-1000)/binSize))))
colnames(ChIP_ranLocMats[[x]]) <- c(paste0("u", 1:((upstream-1000)/binSize)),
paste0("t", (((upstream-1000)/binSize)+1):(((upstream-1000)+bodyLength)/binSize)),
paste0("d", ((((upstream-1000)+bodyLength)/binSize)+1):((((upstream-1000)+bodyLength)/binSize)+((downstream-1000)/binSize))))
colnames(ChIP_gapMats[[x]]) <- c(paste0("u", 1:(upstream/Athila_binSize)),
paste0("t", ((upstream/Athila_binSize)+1):((upstream+Athila_bodyLength)/Athila_binSize)),
paste0("d", (((upstream+Athila_bodyLength)/Athila_binSize)+1):(((upstream+Athila_bodyLength)/Athila_binSize)+(downstream/Athila_binSize))))
colnames(ChIP_AthilaMats[[x]]) <- c(paste0("u", 1:(upstream/Athila_binSize)),
paste0("t", ((upstream/Athila_binSize)+1):((upstream+Athila_bodyLength)/Athila_binSize)),
paste0("d", (((upstream+Athila_bodyLength)/Athila_binSize)+1):(((upstream+Athila_bodyLength)/Athila_binSize)+(downstream/Athila_binSize))))
colnames(ChIP_soloLTRMats[[x]]) <- c(paste0("u", 1:(upstream/Athila_binSize)),
paste0("t", ((upstream/Athila_binSize)+1):((upstream+Athila_bodyLength)/Athila_binSize)),
paste0("d", (((upstream+Athila_bodyLength)/Athila_binSize)+1):(((upstream+Athila_bodyLength)/Athila_binSize)+(downstream/Athila_binSize))))
}
# Create list of lists in which each element in the enclosing list corresponds to a library
# and the two elements in the nested list correspond to coverage matrices for features and random loci
ChIP_mats <- mclapply(seq_along(ChIP_featureMats), function(x) {
list(
# features
ChIP_featureMats[[x]],
# ranLocs
ChIP_ranLocMats[[x]],
# gaps
ChIP_gapMats[[x]],
# Athilas
ChIP_AthilaMats[[x]],
# soloLTRs
ChIP_soloLTRMats[[x]]
)
}, mc.cores = length(ChIP_featureMats))
# Transpose matrix and convert into dataframe
# in which first column is window name
wideDFfeature_list_ChIP <- mclapply(seq_along(ChIP_mats), function(x) {
lapply(seq_along(ChIP_mats[[x]]), function(y) {
data.frame(window = colnames(ChIP_mats[[x]][[y]]),
t(ChIP_mats[[x]][[y]]))
})
}, mc.cores = length(ChIP_mats))
# Convert into tidy data.frame (long format)
tidyDFfeature_list_ChIP <- mclapply(seq_along(wideDFfeature_list_ChIP), function(x) {
lapply(seq_along(ChIP_mats[[x]]), function(y) {
gather(data = wideDFfeature_list_ChIP[[x]][[y]],
key = feature,
value = coverage,
-window)
})
}, mc.cores = length(wideDFfeature_list_ChIP))
# Order levels of factor "window" so that sequential levels
# correspond to sequential windows
for(x in seq_along(tidyDFfeature_list_ChIP)) {
for(y in seq_along(ChIP_mats[[x]])) {
tidyDFfeature_list_ChIP[[x]][[y]]$window <- factor(tidyDFfeature_list_ChIP[[x]][[y]]$window,
levels = as.character(wideDFfeature_list_ChIP[[x]][[y]]$window))
}
}
# Create summary data.frame in which each row corresponds to a window (Column 1),
# Column2 is the number of coverage values (features) per window,
# Column3 is the mean of coverage values per window,
# Column4 is the standard deviation of coverage values per window,
# Column5 is the standard error of the mean of coverage values per window,
# Column6 is the lower bound of the 95% confidence interval, and
# Column7 is the upper bound of the 95% confidence interval
summaryDFfeature_list_ChIP <- mclapply(seq_along(tidyDFfeature_list_ChIP), function(x) {
lapply(seq_along(ChIP_mats[[x]]), function(y) {
data.frame(window = as.character(wideDFfeature_list_ChIP[[x]][[y]]$window),
n = tapply(X = tidyDFfeature_list_ChIP[[x]][[y]]$coverage,
INDEX = tidyDFfeature_list_ChIP[[x]][[y]]$window,
FUN = length),
mean = tapply(X = tidyDFfeature_list_ChIP[[x]][[y]]$coverage,
INDEX = tidyDFfeature_list_ChIP[[x]][[y]]$window,
FUN = mean,
na.rm = TRUE),
sd = tapply(X = tidyDFfeature_list_ChIP[[x]][[y]]$coverage,
INDEX = tidyDFfeature_list_ChIP[[x]][[y]]$window,
FUN = sd,
na.rm = TRUE))
})
}, mc.cores = length(tidyDFfeature_list_ChIP))
for(x in seq_along(summaryDFfeature_list_ChIP)) {
for(y in seq_along(ChIP_mats[[x]])) {
summaryDFfeature_list_ChIP[[x]][[y]]$window <- factor(summaryDFfeature_list_ChIP[[x]][[y]]$window,
levels = as.character(wideDFfeature_list_ChIP[[x]][[y]]$window))
summaryDFfeature_list_ChIP[[x]][[y]]$winNo <- factor(1:dim(summaryDFfeature_list_ChIP[[x]][[y]])[1])
summaryDFfeature_list_ChIP[[x]][[y]]$sem <- summaryDFfeature_list_ChIP[[x]][[y]]$sd/sqrt(summaryDFfeature_list_ChIP[[x]][[y]]$n-1)
summaryDFfeature_list_ChIP[[x]][[y]]$CI_lower <- summaryDFfeature_list_ChIP[[x]][[y]]$mean -
qt(0.975, df = summaryDFfeature_list_ChIP[[x]][[y]]$n-1)*summaryDFfeature_list_ChIP[[x]][[y]]$sem
summaryDFfeature_list_ChIP[[x]][[y]]$CI_upper <- summaryDFfeature_list_ChIP[[x]][[y]]$mean +
qt(0.975, df = summaryDFfeature_list_ChIP[[x]][[y]]$n-1)*summaryDFfeature_list_ChIP[[x]][[y]]$sem
}
}
# Convert list of lists summaryDFfeature_list_ChIP into
# a list of single data.frames containing all meta-profiles for plotting
featureTmp <- lapply(seq_along(summaryDFfeature_list_ChIP), function(x) {
summaryDFfeature_list_ChIP[[x]][[1]]
})
ranLocTmp <- lapply(seq_along(summaryDFfeature_list_ChIP), function(x) {
summaryDFfeature_list_ChIP[[x]][[2]]
})
gapTmp <- lapply(seq_along(summaryDFfeature_list_ChIP), function(x) {
summaryDFfeature_list_ChIP[[x]][[3]]
})
AthilaTmp <- lapply(seq_along(summaryDFfeature_list_ChIP), function(x) {
summaryDFfeature_list_ChIP[[x]][[4]]
})
soloLTRTmp <- lapply(seq_along(summaryDFfeature_list_ChIP), function(x) {
summaryDFfeature_list_ChIP[[x]][[5]]
})
names(featureTmp) <- ChIPNamesPlot
names(ranLocTmp) <- ChIPNamesPlot
names(gapTmp) <- ChIPNamesPlot
names(AthilaTmp) <- ChIPNamesPlot
names(soloLTRTmp) <- ChIPNamesPlot
summaryDFfeature_ChIP <- list(
bind_rows(featureTmp, .id = "libName"),
bind_rows(ranLocTmp, .id = "libName"),
bind_rows(gapTmp, .id = "libName"),
bind_rows(AthilaTmp, .id = "libName"),
bind_rows(soloLTRTmp, .id = "libName")
)
for(x in seq_along(summaryDFfeature_ChIP)) {
summaryDFfeature_ChIP[[x]]$libName <- factor(summaryDFfeature_ChIP[[x]]$libName,
levels = ChIPNamesPlot)
}
# Define y-axis limits
ymin_ChIP <- min(c(summaryDFfeature_ChIP[[1]]$CI_lower,
summaryDFfeature_ChIP[[2]]$CI_lower,
summaryDFfeature_ChIP[[3]]$CI_lower,
summaryDFfeature_ChIP[[4]]$CI_lower,
summaryDFfeature_ChIP[[5]]$CI_lower))
ymax_ChIP <- max(c(summaryDFfeature_ChIP[[1]]$CI_upper,
summaryDFfeature_ChIP[[2]]$CI_upper,
summaryDFfeature_ChIP[[3]]$CI_upper,
summaryDFfeature_ChIP[[4]]$CI_upper,
summaryDFfeature_ChIP[[5]]$CI_upper))
# Define legend labels
legendLabs <- lapply(seq_along(ChIPNamesPlot), function(x) {
grobTree(textGrob(bquote(.(ChIPNamesPlot[x])),
x = legendPos[1], y = legendPos[2]-((x-1)*0.06), just = "left",
gp = gpar(col = ChIPColours[x], fontsize = 18)))
})
# Plot average profiles with 95% CI ribbon
## feature
summaryDFfeature <- summaryDFfeature_ChIP[[1]]
ggObj1_combined_ChIP <- ggplot(data = summaryDFfeature,
mapping = aes(x = winNo,
y = mean,
group = libName)
) +
geom_line(data = summaryDFfeature,
mapping = aes(colour = libName),
size = 1) +
scale_colour_manual(values = ChIPColours) +
geom_ribbon(data = summaryDFfeature,
mapping = aes(ymin = CI_lower,
ymax = CI_upper,
fill = libName),
alpha = 0.4) +
scale_fill_manual(values = ChIPColours) +
scale_y_continuous(limits = c(ymin_ChIP, ymax_ChIP),
labels = function(x) sprintf("%6.3f", x)) +
scale_x_discrete(breaks = c(1,
((upstream-1000)/binSize)+1,
(dim(summaryDFfeature_ChIP[[1]])[1]/length(ChIPNames))-((downstream-1000)/binSize),
dim(summaryDFfeature_ChIP[[1]])[1]/length(ChIPNames)),
labels = c(paste0("-", "1kb"),
featureStartLab,
featureEndLab,
paste0("+", "1kb"))) +
geom_vline(xintercept = c(((upstream-1000)/binSize)+1,
(dim(summaryDFfeature_ChIP[[1]])[1]/length(ChIPNames))-((downstream-1000)/binSize)),
linetype = "dashed",
size = 1) +
labs(x = "",
y = bquote(.(yLabPlot))) +
theme_bw() +
theme(
axis.ticks = element_line(size = 1.0, colour = "black"),
axis.ticks.length = unit(0.25, "cm"),
axis.text.x = element_text(size = 22, colour = "black"),
axis.text.y = element_text(size = 18, colour = "black", family = "Luxi Mono"),
axis.title = element_text(size = 30, colour = "black"),
legend.position = "none",
panel.grid = element_blank(),
panel.border = element_rect(size = 3.5, colour = "black"),
panel.background = element_blank(),
plot.margin = unit(c(0.3,1.2,0.0,0.3), "cm"),
plot.title = element_text(hjust = 0.5, size = 30)) +
ggtitle(bquote(.(featureNamePlot) ~ "(" * italic("n") ~ "=" ~
.(prettyNum(summaryDFfeature$n[1],
big.mark = ",", trim = T)) *
")"))
## ranLoc
summaryDFfeature <- summaryDFfeature_ChIP[[2]]
ggObj2_combined_ChIP <- ggplot(data = summaryDFfeature,
mapping = aes(x = winNo,
y = mean,
group = libName)
) +
geom_line(data = summaryDFfeature,
mapping = aes(colour = libName),
size = 1) +
scale_colour_manual(values = ChIPColours) +
geom_ribbon(data = summaryDFfeature,
mapping = aes(ymin = CI_lower,
ymax = CI_upper,
fill = libName),
alpha = 0.4) +
scale_fill_manual(values = ChIPColours) +
scale_y_continuous(limits = c(ymin_ChIP, ymax_ChIP),
labels = function(x) sprintf("%6.3f", x)) +
scale_x_discrete(breaks = c(1,
((upstream-1000)/binSize)+1,
(dim(summaryDFfeature_ChIP[[2]])[1]/length(ChIPNames))-((downstream-1000)/binSize),
dim(summaryDFfeature_ChIP[[2]])[1]/length(ChIPNames)),
labels = c(paste0("-", "1kb"),
featureStartLab,
featureEndLab,
paste0("+", "1kb"))) +
geom_vline(xintercept = c(((upstream-1000)/binSize)+1,
(dim(summaryDFfeature_ChIP[[2]])[1]/length(ChIPNames))-((downstream-1000)/binSize)),
linetype = "dashed",
size = 1) +
labs(x = "",
y = bquote(.(yLabPlot))) +
annotation_custom(legendLabs[[1]]) +
annotation_custom(legendLabs[[2]]) +
annotation_custom(legendLabs[[3]]) +
annotation_custom(legendLabs[[4]]) +
annotation_custom(legendLabs[[5]]) +
annotation_custom(legendLabs[[6]]) +
annotation_custom(legendLabs[[7]]) +
theme_bw() +
theme(
axis.ticks = element_line(size = 1.0, colour = "black"),
axis.ticks.length = unit(0.25, "cm"),
axis.text.x = element_text(size = 22, colour = "black"),
axis.text.y = element_text(size = 18, colour = "black", family = "Luxi Mono"),
axis.title = element_text(size = 30, colour = "black"),
legend.position = "none",
panel.grid = element_blank(),
panel.border = element_rect(size = 3.5, colour = "black"),
panel.background = element_blank(),
plot.margin = unit(c(0.3,1.2,0.0,0.3), "cm"),
plot.title = element_text(hjust = 0.5, size = 30)) +
ggtitle(bquote(.(ranLocNamePlot) ~ "(" * italic("n") ~ "=" ~
.(prettyNum(summaryDFfeature$n[1],
big.mark = ",", trim = T)) *
")"))
## gap
summaryDFfeature <- summaryDFfeature_ChIP[[3]]
ggObj3_combined_ChIP <- ggplot(data = summaryDFfeature,
mapping = aes(x = winNo,
y = mean,
group = libName)
) +
geom_line(data = summaryDFfeature,
mapping = aes(colour = libName),
size = 1) +
scale_colour_manual(values = ChIPColours) +
geom_ribbon(data = summaryDFfeature,
mapping = aes(ymin = CI_lower,
ymax = CI_upper,
fill = libName),
alpha = 0.4) +
scale_fill_manual(values = ChIPColours) +
scale_y_continuous(limits = c(ymin_ChIP, ymax_ChIP),
labels = function(x) sprintf("%6.3f", x)) +
scale_x_discrete(breaks = c(1,
(upstream/Athila_binSize)+1,
(dim(summaryDFfeature_ChIP[[3]])[1]/length(ChIPNames))-(downstream/Athila_binSize),
dim(summaryDFfeature_ChIP[[3]])[1]/length(ChIPNames)),
labels = c(paste0("-", flankName),
featureStartLab,
featureEndLab,
paste0("+", flankName))) +
geom_vline(xintercept = c((upstream/Athila_binSize)+1,
(dim(summaryDFfeature_ChIP[[3]])[1]/length(ChIPNames))-(downstream/Athila_binSize)),
linetype = "dashed",
size = 1) +
labs(x = "",
y = bquote(.(yLabPlot))) +
theme_bw() +
theme(
axis.ticks = element_line(size = 1.0, colour = "black"),
axis.ticks.length = unit(0.25, "cm"),
axis.text.x = element_text(size = 22, colour = "black"),
axis.text.y = element_text(size = 18, colour = "black", family = "Luxi Mono"),
axis.title = element_text(size = 30, colour = "black"),
legend.position = "none",
panel.grid = element_blank(),
panel.border = element_rect(size = 3.5, colour = "black"),
panel.background = element_blank(),
plot.margin = unit(c(0.3,1.2,0.0,0.3), "cm"),
plot.title = element_text(hjust = 0.5, size = 30)) +
ggtitle(bquote(.(gapNamePlot) ~ "(" * italic("n") ~ "=" ~
.(prettyNum(summaryDFfeature$n[1],
big.mark = ",", trim = T)) *
")"))
## Athila
summaryDFfeature <- summaryDFfeature_ChIP[[4]]
ggObj4_combined_ChIP <- ggplot(data = summaryDFfeature,
mapping = aes(x = winNo,
y = mean,
group = libName)
) +
geom_line(data = summaryDFfeature,
mapping = aes(colour = libName),
size = 1) +
scale_colour_manual(values = ChIPColours) +
geom_ribbon(data = summaryDFfeature,
mapping = aes(ymin = CI_lower,
ymax = CI_upper,
fill = libName),
alpha = 0.4) +
scale_fill_manual(values = ChIPColours) +
scale_y_continuous(limits = c(ymin_ChIP, ymax_ChIP),
labels = function(x) sprintf("%6.3f", x)) +
scale_x_discrete(breaks = c(1,
(upstream/Athila_binSize)+1,
(dim(summaryDFfeature_ChIP[[4]])[1]/length(ChIPNames))-(downstream/Athila_binSize),
dim(summaryDFfeature_ChIP[[4]])[1]/length(ChIPNames)),
labels = c(paste0("-", flankName),
featureStartLab,
featureEndLab,
paste0("+", flankName))) +
geom_vline(xintercept = c((upstream/Athila_binSize)+1,
(dim(summaryDFfeature_ChIP[[4]])[1]/length(ChIPNames))-(downstream/Athila_binSize)),
linetype = "dashed",
size = 1) +
labs(x = "",
y = bquote(.(yLabPlot))) +
theme_bw() +
theme(
axis.ticks = element_line(size = 1.0, colour = "black"),
axis.ticks.length = unit(0.25, "cm"),
axis.text.x = element_text(size = 22, colour = "black"),
axis.text.y = element_text(size = 18, colour = "black", family = "Luxi Mono"),
axis.title = element_text(size = 30, colour = "black"),
legend.position = "none",
panel.grid = element_blank(),
panel.border = element_rect(size = 3.5, colour = "black"),
panel.background = element_blank(),
plot.margin = unit(c(0.3,1.2,0.0,0.3), "cm"),
plot.title = element_text(hjust = 0.5, size = 30)) +
ggtitle(bquote(.(AthilaNamePlot) ~ "(" * italic("n") ~ "=" ~
.(prettyNum(summaryDFfeature$n[1],
big.mark = ",", trim = T)) *
")"))
## soloLTR
summaryDFfeature <- summaryDFfeature_ChIP[[5]]
ggObj5_combined_ChIP <- ggplot(data = summaryDFfeature,
mapping = aes(x = winNo,
y = mean,
group = libName)
) +
geom_line(data = summaryDFfeature,
mapping = aes(colour = libName),
size = 1) +
scale_colour_manual(values = ChIPColours) +
geom_ribbon(data = summaryDFfeature,
mapping = aes(ymin = CI_lower,
ymax = CI_upper,
fill = libName),
alpha = 0.4) +
scale_fill_manual(values = ChIPColours) +
scale_y_continuous(limits = c(ymin_ChIP, ymax_ChIP),
labels = function(x) sprintf("%6.3f", x)) +
scale_x_discrete(breaks = c(1,
(upstream/Athila_binSize)+1,
(dim(summaryDFfeature_ChIP[[5]])[1]/length(ChIPNames))-(downstream/Athila_binSize),
dim(summaryDFfeature_ChIP[[5]])[1]/length(ChIPNames)),
labels = c(paste0("-", flankName),
featureStartLab,
featureEndLab,
paste0("+", flankName))) +
geom_vline(xintercept = c((upstream/Athila_binSize)+1,
(dim(summaryDFfeature_ChIP[[5]])[1]/length(ChIPNames))-(downstream/Athila_binSize)),
linetype = "dashed",
size = 1) +
labs(x = "",
y = bquote(.(yLabPlot))) +
theme_bw() +
theme(
axis.ticks = element_line(size = 1.0, colour = "black"),
axis.ticks.length = unit(0.25, "cm"),
axis.text.x = element_text(size = 22, colour = "black"),
axis.text.y = element_text(size = 18, colour = "black", family = "Luxi Mono"),
axis.title = element_text(size = 30, colour = "black"),
legend.position = "none",
panel.grid = element_blank(),
panel.border = element_rect(size = 3.5, colour = "black"),
panel.background = element_blank(),
plot.margin = unit(c(0.3,1.2,0.0,0.3), "cm"),
plot.title = element_text(hjust = 0.5, size = 30)) +
ggtitle(bquote(.(soloLTRNamePlot) ~ "(" * italic("n") ~ "=" ~
.(prettyNum(summaryDFfeature$n[1],
big.mark = ",", trim = T)) *
")"))
ggObjGA_combined <- grid.arrange(grobs = list(
ggObj1_combined_ChIP,
ggObj2_combined_ChIP,
ggObj3_combined_ChIP,
ggObj4_combined_ChIP,
ggObj5_combined_ChIP
),
layout_matrix = cbind(
1,
2,
3,
4,
5
))
ggsave(paste0(plotDir,
"CEN180_variants_vs_CEN180_consensus_",
# paste0(ChIPNames, collapse = "_"),
"_avgProfiles_around",
"_CEN180_CENranLoc_CENgap_CENAthila_CENsoloLTR_in_T2T_Col_",
paste0(chrName, collapse = "_"), ".pdf"),
plot = ggObjGA_combined,
height = 6.5, width = 7*5, limitsize = FALSE)
|
library(tidystats)
### Name: report
### Title: Report function
### Aliases: report
### ** Examples
# Read in a list of results
results <- read_stats(system.file("results.csv", package = "tidystats"))
# Set the list as the default list
options(tidystats_list = results)
# Example: t-test
report("t_test_one_sample")
report("t_test_welch")
# Example: correlation
report("correlation_pearson")
report("correlation_spearman")
# Example: ANOVA
report("aov_two_way", term = "condition")
report("aov_two_way", term = "sex")
# Example: Linear models
report("lm_simple", term = "conditionmortality salience")
report("lm_simple", term_nr = 2)
report("lm_simple", group = "model")
| /data/genthat_extracted_code/tidystats/examples/report.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 683 | r | library(tidystats)
### Name: report
### Title: Report function
### Aliases: report
### ** Examples
# Read in a list of results
results <- read_stats(system.file("results.csv", package = "tidystats"))
# Set the list as the default list
options(tidystats_list = results)
# Example: t-test
report("t_test_one_sample")
report("t_test_welch")
# Example: correlation
report("correlation_pearson")
report("correlation_spearman")
# Example: ANOVA
report("aov_two_way", term = "condition")
report("aov_two_way", term = "sex")
# Example: Linear models
report("lm_simple", term = "conditionmortality salience")
report("lm_simple", term_nr = 2)
report("lm_simple", group = "model")
|
# Analysis of ARM results
library(ggplot2)
library(gridExtra)
library(ggpubr)
#library(wesanderson)
library("RColorBrewer")
### CONSTANTS ###
workspace = "/Users/marcosmr/tmp/ARM_resources/EVALUATION/results"
setwd(workspace)
color1 <- "#DB6D00"
color2 <- "#070092"
color3 <- "#ffff99" # yellow
file_NCBItoNCBI <- paste(workspace, "/free_text/results_trainNCBI_testNCBI_2018-04-16_08_57_20.csv", sep="")
file_NCBItoEBI <- paste(workspace, "/free_text/results_trainNCBI_testEBI_2018-04-16_19_11_51.csv", sep="")
file_EBItoEBI <- paste(workspace, "/free_text/results_trainEBI_testEBI_2018-04-16_23_59_03.csv", sep="")
file_EBItoNCBI <- paste(workspace, "/free_text/results_trainEBI_testNCBI_2018-04-17_06_53_04.csv", sep="")
file_NCBItoNCBI_annotated <- paste(workspace, "/annotated/results_trainNCBI_testNCBI_annotated_2018-04-17_09_13_57.csv", sep="")
file_NCBItoEBI_annotated <- paste(workspace, "/annotated/results_trainNCBI_testEBI_annotated_2018-04-17_18_06_15.csv", sep="")
file_EBItoEBI_annotated <- paste(workspace, "/annotated/results_trainEBI_testEBI_annotated_2018-04-17_20_37_23.csv", sep="")
file_EBItoNCBI_annotated <- paste(workspace, "/annotated/results_trainEBI_testNCBI_annotated_2018-04-17_22_43_26.csv", sep="")
file_NCBItoNCBI_annotated_mappings <- paste(workspace, "/annotated_mappings/results_trainNCBI_testNCBI_annotated_mappings_2018-04-18_05_30_09.csv", sep="")
file_NCBItoEBI_annotated_mappings <- paste(workspace, "/annotated_mappings/results_trainNCBI_testEBI_annotated_mappings_2018-04-18_07_40_04.csv", sep="")
file_EBItoEBI_annotated_mappings <- paste(workspace, "/annotated_mappings/results_trainEBI_testEBI_annotated_mappings_2018-04-18_03_31_33.csv.csv", sep="")
file_EBItoNCBI_annotated_mappings <- paste(workspace, "/annotated_mappings/results_trainEBI_testNCBI_annotated_mappings_2018-04-18_01_11_03.csv", sep="")
### The following inputs are just for testing
# file_NCBItoNCBI <- paste(workspace, "/mini/results_trainNCBI_testNCBI_2018-04-16_08_57_20.csv", sep="")
# file_NCBItoEBI <- paste(workspace, "/mini/results_trainNCBI_testEBI_2018-04-16_19_11_51.csv", sep="")
# file_EBItoEBI <- paste(workspace, "/mini/results_trainEBI_testEBI_2018-04-16_23_59_03.csv", sep="")
# file_EBItoNCBI <- paste(workspace, "/mini/results_trainEBI_testNCBI_2018-04-17_06_53_04.csv", sep="")
#
# file_NCBItoNCBI_annotated <- paste(workspace, "/mini/results_trainNCBI_testNCBI_annotated_2018-04-17_09_13_57.csv", sep="")
# file_NCBItoEBI_annotated <- paste(workspace, "/mini/results_trainNCBI_testEBI_annotated_2018-04-17_18_06_15.csv", sep="")
# file_EBItoEBI_annotated <- paste(workspace, "/mini/results_trainEBI_testEBI_annotated_2018-04-17_20_37_23.csv", sep="")
# file_EBItoNCBI_annotated <- paste(workspace, "/mini/results_trainEBI_testNCBI_annotated_2018-04-17_22_43_26.csv", sep="")
#
# file_NCBItoNCBI_annotated_mappings <- paste(workspace, "/mini/results_trainNCBI_testNCBI_annotated_mappings_2018-04-18_05_30_09.csv", sep="")
# file_NCBItoEBI_annotated_mappings <- paste(workspace, "/mini/results_trainEBI_testEBI_annotated_mappings_2018-04-18_03_31_33.csv", sep="")
# file_EBItoEBI_annotated_mappings <- paste(workspace, "/mini/results_trainEBI_testEBI_annotated_mappings_2018-04-18_03_31_33.csv", sep="")
# file_EBItoNCBI_annotated_mappings <- paste(workspace, "/mini/results_trainEBI_testNCBI_annotated_mappings_2018-04-18_01_11_03.csv", sep="")
### FUNCTION DEFINITIONS ###
# Aggregation by no_populated_fields
aggregate_data_1 <- function(df, reciprocal_rank_vr_column, reciprocal_rank_baseline_column) {
# aggregation for the 'recommender' method
agg1 <- aggregate(list(mrr=df[[reciprocal_rank_vr_column]]), by=list(no_populated_fields = df$populated_fields_size), FUN=mean)
agg1$method <- "recommender"
# aggregation for the 'baseline' method
agg2 <- aggregate(list(mrr=df[[reciprocal_rank_baseline_column]]), by=list(no_populated_fields = df$populated_fields_size), FUN=mean)
agg2$method <- "baseline"
# final aggregation
agg_final <- rbind(agg1, agg2)
# Limit it to no_populated_fields <5
agg_final <- agg_final[agg_final$no_populated_fields < 5,]
agg_final$experiment <- df$experiment[1]
return(agg_final)
}
# Aggregation by no_populated_fields
aggregate_data_1_2 <- function(df, reciprocal_rank_vr_column, reciprocal_rank_baseline_column, recom_method_name="recommender", baseline_method_name="baseline") {
# aggregation for the 'recommender' method
agg1 <- aggregate(list(mrr=df[[reciprocal_rank_vr_column]]), by=list(no_populated_fields = df$populated_fields_size), FUN=mean)
agg1$method <- recom_method_name
if (!is.null(reciprocal_rank_baseline_column)) {
# aggregation for the 'baseline' method
agg2 <- aggregate(list(mrr=df[[reciprocal_rank_baseline_column]]), by=list(no_populated_fields = df$populated_fields_size), FUN=mean)
agg2$method <- "baseline"
# final aggregation
agg_final <- rbind(agg1, agg2)
}
else {
agg_final <- agg1
}
# Limit it to no_populated_fields <5
agg_final <- agg_final[agg_final$no_populated_fields < 5,]
agg_final$experiment <- df$experiment[1]
return(agg_final)
}
# Aggregation by target_field and no_populated_fields
aggregate_data_2 <- function(df, reciprocal_rank_vr_column, reciprocal_rank_baseline_column) {
# aggregation for the 'recommender' method
agg1 <- aggregate(list(mrr=df[[reciprocal_rank_vr_column]]), by=list(field = df$target_field, no_populated_fields = df$populated_fields_size), FUN=mean)
agg1$method <- "recommender"
# aggregation for the 'baseline' method
agg2 <- aggregate(list(mrr=df[[reciprocal_rank_baseline_column]]), by=list(field = df$target_field, no_populated_fields = df$populated_fields_size), FUN=mean)
agg2$method <- "baseline"
# final aggregation
agg_final <- rbind(agg1, agg2)
# Limit it to no_populated_fields <5
agg_final <- agg_final[agg_final$no_populated_fields < 5,]
agg_final$experiment <- df$experiment[1]
return(agg_final)
}
# Generate MRR plot (Recommender vs Baseline)
# generate_plot <- function(df, title="title"){
# plot <- ggplot(data=df, aes(x=no_populated_fields, y=mrr, group=method, colour=method)) +
# geom_line() + geom_point() + geom_text(aes(label=sprintf("%0.2f", round(mrr, digits = 2))), vjust=2, show.legend = FALSE) +
# ylim(0,1) + ggtitle(title) + xlab("No. populated fields") + ylab("Mean Reciprocal Rank")
# # + scale_color_brewer(palette="Dark2")
# return(plot)
# }
generate_plot <- function(df, title="title"){
plot <- ggplot(data=df, aes(x=no_populated_fields, y=mrr, group=method, colour=method)) +
geom_line(aes(linetype=method), size=0.7) +
scale_linetype_manual(values=c("solid", "solid")) +
scale_color_manual(values=c(color1, color2)) +
geom_point() + geom_text(size=2.5, aes(label=sprintf("%0.2f", round(mrr, digits = 2))), vjust=2, show.legend = FALSE) +
ylim(0,1) + ggtitle(title) + xlab("No. populated fields") + ylab("Mean Reciprocal Rank") +
theme(text = element_text(size=8))
# + scale_color_brewer(palette="Dark2")
return(plot)
}
generate_plot_2 <- function(df, title="title"){
plot <- ggplot(data=df, aes(x=no_populated_fields, y=mrr, group=method, colour=method)) +
geom_line(aes(linetype=method), size=0.7) +
scale_linetype_manual(values=c("dotted", "dashed", "solid")) +
scale_color_manual(values=c(color2, color2, color2)) +
geom_point() + geom_text(size=2.5, aes(label=sprintf("%0.2f", round(mrr, digits = 2))), vjust=2, show.legend = FALSE) +
ylim(0,1) + ggtitle(title) + xlab("No. populated fields") + ylab("Mean Reciprocal Rank") +
theme(text = element_text(size=8))
# + scale_color_brewer(palette="Dark2")
return(plot)
}
# Generate MRR plot (Recommender vs Baseline) per field
# generate_plot_field <- function(df, title="title"){
# plot <- ggplot(data=df, aes(x=field, y=mrr, fill=method)) + geom_bar(stat="identity", position=position_dodge()) +
# ylim(0,1) + ggtitle(title) + xlab("Field") + ylab("Mean Reciprocal Rank")
# return(plot)
# }
generate_plot_field <- function(df, title="title"){
plot <- ggplot(data=df, aes(x=field, y=mrr, fill=method)) + geom_bar(stat="identity", position=position_dodge()) +
scale_fill_manual(values=c(color1, color2)) +
ylim(0,1) + ggtitle(title) + xlab("Field") + ylab("Mean Reciprocal Rank") +
theme(text = element_text(size=8))
return(plot)
}
setClass("EvaluationSet", representation(datasets = "vector", description = "character"))
generate_all_plots <- function(evaluation_set, reciprocal_rank_vr_column, reciprocal_rank_baseline_column) {
data_NCBItoNCBI <- read.csv(evaluation_set@datasets[1])
data_NCBItoEBI <- read.csv(evaluation_set@datasets[2])
data_EBItoEBI <- read.csv(evaluation_set@datasets[3])
data_EBItoNCBI <- read.csv(evaluation_set@datasets[4])
description <- evaluation_set@description
# remove the 'treatment' field from the analysis
data_NCBItoNCBI <- data_NCBItoNCBI[data_NCBItoNCBI$target_field!="treatment",]
data_NCBItoNCBI$experiment <- "NCBItoNCBI"
data_NCBItoEBI$experiment <- "NCBItoEBI"
data_EBItoEBI$experiment <- "EBItoEBI"
data_EBItoNCBI$experiment <- "EBItoNCBI"
#hist(data_NCBItoNCBI$populated_fields_size)
# 1) Recommender vs Baseline 2x2 plots
p1 <- generate_plot(aggregate_data_1(data_NCBItoNCBI, reciprocal_rank_vr_column, reciprocal_rank_baseline_column), "Training: NCBI; Testing: NCBI")
p2 <- generate_plot(aggregate_data_1(data_NCBItoEBI, reciprocal_rank_vr_column, reciprocal_rank_baseline_column), "Training: NCBI; Testing: EBI")
p3 <- generate_plot(aggregate_data_1(data_EBItoEBI, reciprocal_rank_vr_column, reciprocal_rank_baseline_column), "Training: EBI; Testing: EBI")
p4 <- generate_plot(aggregate_data_1(data_EBItoNCBI, reciprocal_rank_vr_column, reciprocal_rank_baseline_column), "Training: EBI; Testing: NCBI")
fig1 <- ggarrange(p1, p2, p3, p4, ncol=2, nrow=2, common.legend = TRUE, legend="bottom")
desc_text <- paste("Metadata Recommender vs Baseline (", description, ")", sep = "")
fig1_annotated <- annotate_figure(fig1, top = text_grob(label=desc_text, color = "black", face = "bold", size = 11))
print(fig1_annotated)
# Export plot
dev.copy(pdf, paste("plot1_", gsub(" ", "_", description), "_", format(Sys.time(), "%Y_%m_%d_%H_%M_%S"), ".pdf", sep=""))
dev.off()
# 2) Recommender vs Baseline per target field
p1 <- generate_plot_field(aggregate_data_2(data_NCBItoNCBI,reciprocal_rank_vr_column, reciprocal_rank_baseline_column), "Training: NCBI; Testing: NCBI")
p2 <- generate_plot_field(aggregate_data_2(data_NCBItoEBI,reciprocal_rank_vr_column, reciprocal_rank_baseline_column), "Training: NCBI; Testing: EBI")
p3 <- generate_plot_field(aggregate_data_2(data_EBItoEBI,reciprocal_rank_vr_column, reciprocal_rank_baseline_column), "Training: EBI; Testing: EBI")
p4 <- generate_plot_field(aggregate_data_2(data_EBItoNCBI,reciprocal_rank_vr_column, reciprocal_rank_baseline_column), "Training: EBI; Testing: NCBI")
fig2 <- ggarrange(p1, p2, p3, p4, ncol=2, nrow=2, common.legend = TRUE, legend="bottom")
desc_text <- paste("Metadata Recommender vs Baseline by field (", description, ")", sep = "")
fig2_annotated <- annotate_figure(fig2, top = text_grob(label=desc_text, color = "black", face = "bold", size = 11))
print(fig2_annotated)
# Export plot
dev.copy(pdf, paste("plot2_", gsub(" ", "_", description), "_", format(Sys.time(), "%Y_%m_%d_%H_%M_%S"), ".pdf", sep=""))
dev.off()
# histogram with positions of correct values
#ggplot(data_NCBItoNCBI, aes(x=correct_pos_vr)) + geom_histogram()
}
generate_all_plots_overlapped <- function(evaluation_set1, evaluation_set2, evaluation_set3, reciprocal_rank_vr_column, reciprocal_rank_baseline_column) {
data_NCBItoNCBI1 <- read.csv(evaluation_set1@datasets[1])
data_NCBItoEBI1 <- read.csv(evaluation_set1@datasets[2])
data_EBItoEBI1 <- read.csv(evaluation_set1@datasets[3])
data_EBItoNCBI1 <- read.csv(evaluation_set1@datasets[4])
description1 <- evaluation_set1@description
data_NCBItoNCBI2 <- read.csv(evaluation_set2@datasets[1])
data_NCBItoEBI2 <- read.csv(evaluation_set2@datasets[2])
data_EBItoEBI2 <- read.csv(evaluation_set2@datasets[3])
data_EBItoNCBI2 <- read.csv(evaluation_set2@datasets[4])
description2 <- evaluation_set2@description
data_NCBItoNCBI3 <- read.csv(evaluation_set3@datasets[1])
data_NCBItoEBI3 <- read.csv(evaluation_set3@datasets[2])
data_EBItoEBI3 <- read.csv(evaluation_set3@datasets[3])
data_EBItoNCBI3 <- read.csv(evaluation_set3@datasets[4])
description3 <- evaluation_set3@description
data_NCBItoNCBI1$experiment <- "NCBItoNCBI"
data_NCBItoEBI1$experiment <- "NCBItoEBI"
data_EBItoEBI1$experiment <- "EBItoEBI"
data_EBItoNCBI1$experiment <- "EBItoNCBI"
data_NCBItoNCBI2$experiment <- "NCBItoNCBI"
data_NCBItoEBI2$experiment <- "NCBItoEBI"
data_EBItoEBI2$experiment <- "EBItoEBI"
data_EBItoNCBI2$experiment <- "EBItoNCBI"
data_NCBItoNCBI3$experiment <- "NCBItoNCBI"
data_NCBItoEBI3$experiment <- "NCBItoEBI"
data_EBItoEBI3$experiment <- "EBItoEBI"
data_EBItoNCBI3$experiment <- "EBItoNCBI"
data_p1_1 <- aggregate_data_1_2(data_NCBItoNCBI1, reciprocal_rank_vr_column, NULL, recom_method_name="recommender", baseline_method_name="baseline")
data_p1_2 <- aggregate_data_1_2(data_NCBItoNCBI2, reciprocal_rank_vr_column, NULL, recom_method_name="recommender_annotated", baseline_method_name="baseline_annotated")
data_p1_3 <- aggregate_data_1_2(data_NCBItoNCBI3, reciprocal_rank_vr_column, NULL, recom_method_name="recommender_annotated_mappings", baseline_method_name="baseline_annotated_mappings")
data_p1 <- rbind(data_p1_1, data_p1_2, data_p1_3)
data_p2_1 <- aggregate_data_1_2(data_NCBItoEBI1, reciprocal_rank_vr_column, NULL, recom_method_name="recommender", baseline_method_name="baseline")
data_p2_2 <- aggregate_data_1_2(data_NCBItoEBI2, reciprocal_rank_vr_column, NULL, recom_method_name="recommender_annotated", baseline_method_name="baseline_annotated")
data_p2_3 <- aggregate_data_1_2(data_NCBItoEBI3, reciprocal_rank_vr_column, NULL, recom_method_name="recommender_annotated_mappings", baseline_method_name="baseline_annotated_mappings")
data_p2 <- rbind(data_p2_1, data_p2_2, data_p2_3)
data_p3_1 <- aggregate_data_1_2(data_EBItoEBI1, reciprocal_rank_vr_column, NULL, recom_method_name="recommender", baseline_method_name="baseline")
data_p3_2 <- aggregate_data_1_2(data_EBItoEBI2, reciprocal_rank_vr_column, NULL, recom_method_name="recommender_annotated", baseline_method_name="baseline_annotated")
data_p3_3 <- aggregate_data_1_2(data_EBItoEBI3, reciprocal_rank_vr_column, NULL, recom_method_name="recommender_annotated_mappings", baseline_method_name="baseline_annotated_mappings")
data_p3 <- rbind(data_p3_1, data_p3_2, data_p3_3)
data_p4_1 <- aggregate_data_1_2(data_EBItoNCBI1, reciprocal_rank_vr_column, NULL, recom_method_name="recommender", baseline_method_name="baseline")
data_p4_2 <- aggregate_data_1_2(data_EBItoNCBI2, reciprocal_rank_vr_column, NULL, recom_method_name="recommender_annotated", baseline_method_name="baseline_annotated")
data_p4_3 <- aggregate_data_1_2(data_EBItoNCBI3, reciprocal_rank_vr_column, NULL, recom_method_name="recommender_annotated_mappings", baseline_method_name="baseline_annotated_mappings")
data_p4 <- rbind(data_p4_1, data_p4_2, data_p4_3)
# 1) Recommender vs Baseline 2x2 plots
p1 <- generate_plot_2(data_p1, "Training: NCBI; Testing: NCBI")
p2 <- generate_plot_2(data_p2, "Training: NCBI; Testing: EBI")
p3 <- generate_plot_2(data_p3, "Training: EBI; Testing: EBI")
p4 <- generate_plot_2(data_p4, "Training: EBI; Testing: NCBI")
fig1 <- ggarrange(p1, p2, p3, p4, ncol=2, nrow=2, common.legend = TRUE, legend="bottom")
description = paste(description1, " vs ", description2, sep = "")
desc_text <- paste("Metadata Recommender (text vs annotated vs annotated_mappings)", sep = "")
fig1_annotated <- annotate_figure(fig1, top = text_grob(label=desc_text, color = "black", face = "bold", size = 11))
print(fig1_annotated)
# Export plot
dev.copy(pdf, paste("plot3_", gsub(" ", "_", description), "_", format(Sys.time(), "%Y_%m_%d_%H_%M_%S"), ".pdf", sep=""))
dev.off()
}
### MAIN BODY ###
evaluation_set_1 <- new("EvaluationSet", datasets=c(file_NCBItoNCBI, file_NCBItoEBI, file_EBItoEBI, file_EBItoNCBI), description="free text")
evaluation_set_2 <- new("EvaluationSet", datasets=c(file_NCBItoNCBI_annotated, file_NCBItoEBI_annotated, file_EBItoEBI_annotated, file_EBItoNCBI_annotated), description="annotated")
evaluation_set_3 <- new("EvaluationSet", datasets=c(file_NCBItoNCBI_annotated_mappings, file_NCBItoEBI_annotated_mappings, file_EBItoEBI_annotated_mappings, file_EBItoNCBI_annotated_mappings), description="annotated-mappings")
evaluation_sets = c(evaluation_set_1, evaluation_set_2, evaluation_set_3)
evaluation_sets = c(evaluation_set_1)
for (evaluation_set in evaluation_sets){
generate_all_plots(evaluation_set, 'RR_top5_vr', 'RR_top5_baseline')
}
generate_all_plots_overlapped(evaluation_set_1, evaluation_set_2, evaluation_set_3, 'RR_top5_vr', 'RR_top5_baseline')
################################
# hist(data_NCBItoNCBI$populated_fields_size)
# hist(data_NCBItoEBI$populated_fields_size)
| /scripts/python/archive/2018/ARM_evaluation/R_scripts/arm_analysis-v4.R | permissive | metadatacenter/cedar-util | R | false | false | 17,294 | r | # Analysis of ARM results
library(ggplot2)
library(gridExtra)
library(ggpubr)
#library(wesanderson)
library("RColorBrewer")
### CONSTANTS ###
workspace = "/Users/marcosmr/tmp/ARM_resources/EVALUATION/results"
setwd(workspace)
color1 <- "#DB6D00"
color2 <- "#070092"
color3 <- "#ffff99" # yellow
file_NCBItoNCBI <- paste(workspace, "/free_text/results_trainNCBI_testNCBI_2018-04-16_08_57_20.csv", sep="")
file_NCBItoEBI <- paste(workspace, "/free_text/results_trainNCBI_testEBI_2018-04-16_19_11_51.csv", sep="")
file_EBItoEBI <- paste(workspace, "/free_text/results_trainEBI_testEBI_2018-04-16_23_59_03.csv", sep="")
file_EBItoNCBI <- paste(workspace, "/free_text/results_trainEBI_testNCBI_2018-04-17_06_53_04.csv", sep="")
file_NCBItoNCBI_annotated <- paste(workspace, "/annotated/results_trainNCBI_testNCBI_annotated_2018-04-17_09_13_57.csv", sep="")
file_NCBItoEBI_annotated <- paste(workspace, "/annotated/results_trainNCBI_testEBI_annotated_2018-04-17_18_06_15.csv", sep="")
file_EBItoEBI_annotated <- paste(workspace, "/annotated/results_trainEBI_testEBI_annotated_2018-04-17_20_37_23.csv", sep="")
file_EBItoNCBI_annotated <- paste(workspace, "/annotated/results_trainEBI_testNCBI_annotated_2018-04-17_22_43_26.csv", sep="")
file_NCBItoNCBI_annotated_mappings <- paste(workspace, "/annotated_mappings/results_trainNCBI_testNCBI_annotated_mappings_2018-04-18_05_30_09.csv", sep="")
file_NCBItoEBI_annotated_mappings <- paste(workspace, "/annotated_mappings/results_trainNCBI_testEBI_annotated_mappings_2018-04-18_07_40_04.csv", sep="")
file_EBItoEBI_annotated_mappings <- paste(workspace, "/annotated_mappings/results_trainEBI_testEBI_annotated_mappings_2018-04-18_03_31_33.csv.csv", sep="")
file_EBItoNCBI_annotated_mappings <- paste(workspace, "/annotated_mappings/results_trainEBI_testNCBI_annotated_mappings_2018-04-18_01_11_03.csv", sep="")
### The following inputs are just for testing
# file_NCBItoNCBI <- paste(workspace, "/mini/results_trainNCBI_testNCBI_2018-04-16_08_57_20.csv", sep="")
# file_NCBItoEBI <- paste(workspace, "/mini/results_trainNCBI_testEBI_2018-04-16_19_11_51.csv", sep="")
# file_EBItoEBI <- paste(workspace, "/mini/results_trainEBI_testEBI_2018-04-16_23_59_03.csv", sep="")
# file_EBItoNCBI <- paste(workspace, "/mini/results_trainEBI_testNCBI_2018-04-17_06_53_04.csv", sep="")
#
# file_NCBItoNCBI_annotated <- paste(workspace, "/mini/results_trainNCBI_testNCBI_annotated_2018-04-17_09_13_57.csv", sep="")
# file_NCBItoEBI_annotated <- paste(workspace, "/mini/results_trainNCBI_testEBI_annotated_2018-04-17_18_06_15.csv", sep="")
# file_EBItoEBI_annotated <- paste(workspace, "/mini/results_trainEBI_testEBI_annotated_2018-04-17_20_37_23.csv", sep="")
# file_EBItoNCBI_annotated <- paste(workspace, "/mini/results_trainEBI_testNCBI_annotated_2018-04-17_22_43_26.csv", sep="")
#
# file_NCBItoNCBI_annotated_mappings <- paste(workspace, "/mini/results_trainNCBI_testNCBI_annotated_mappings_2018-04-18_05_30_09.csv", sep="")
# file_NCBItoEBI_annotated_mappings <- paste(workspace, "/mini/results_trainEBI_testEBI_annotated_mappings_2018-04-18_03_31_33.csv", sep="")
# file_EBItoEBI_annotated_mappings <- paste(workspace, "/mini/results_trainEBI_testEBI_annotated_mappings_2018-04-18_03_31_33.csv", sep="")
# file_EBItoNCBI_annotated_mappings <- paste(workspace, "/mini/results_trainEBI_testNCBI_annotated_mappings_2018-04-18_01_11_03.csv", sep="")
### FUNCTION DEFINITIONS ###
# Aggregation by no_populated_fields
aggregate_data_1 <- function(df, reciprocal_rank_vr_column, reciprocal_rank_baseline_column) {
# aggregation for the 'recommender' method
agg1 <- aggregate(list(mrr=df[[reciprocal_rank_vr_column]]), by=list(no_populated_fields = df$populated_fields_size), FUN=mean)
agg1$method <- "recommender"
# aggregation for the 'baseline' method
agg2 <- aggregate(list(mrr=df[[reciprocal_rank_baseline_column]]), by=list(no_populated_fields = df$populated_fields_size), FUN=mean)
agg2$method <- "baseline"
# final aggregation
agg_final <- rbind(agg1, agg2)
# Limit it to no_populated_fields <5
agg_final <- agg_final[agg_final$no_populated_fields < 5,]
agg_final$experiment <- df$experiment[1]
return(agg_final)
}
# Aggregation by no_populated_fields
aggregate_data_1_2 <- function(df, reciprocal_rank_vr_column, reciprocal_rank_baseline_column, recom_method_name="recommender", baseline_method_name="baseline") {
# aggregation for the 'recommender' method
agg1 <- aggregate(list(mrr=df[[reciprocal_rank_vr_column]]), by=list(no_populated_fields = df$populated_fields_size), FUN=mean)
agg1$method <- recom_method_name
if (!is.null(reciprocal_rank_baseline_column)) {
# aggregation for the 'baseline' method
agg2 <- aggregate(list(mrr=df[[reciprocal_rank_baseline_column]]), by=list(no_populated_fields = df$populated_fields_size), FUN=mean)
agg2$method <- "baseline"
# final aggregation
agg_final <- rbind(agg1, agg2)
}
else {
agg_final <- agg1
}
# Limit it to no_populated_fields <5
agg_final <- agg_final[agg_final$no_populated_fields < 5,]
agg_final$experiment <- df$experiment[1]
return(agg_final)
}
# Aggregation by target_field and no_populated_fields
aggregate_data_2 <- function(df, reciprocal_rank_vr_column, reciprocal_rank_baseline_column) {
# aggregation for the 'recommender' method
agg1 <- aggregate(list(mrr=df[[reciprocal_rank_vr_column]]), by=list(field = df$target_field, no_populated_fields = df$populated_fields_size), FUN=mean)
agg1$method <- "recommender"
# aggregation for the 'baseline' method
agg2 <- aggregate(list(mrr=df[[reciprocal_rank_baseline_column]]), by=list(field = df$target_field, no_populated_fields = df$populated_fields_size), FUN=mean)
agg2$method <- "baseline"
# final aggregation
agg_final <- rbind(agg1, agg2)
# Limit it to no_populated_fields <5
agg_final <- agg_final[agg_final$no_populated_fields < 5,]
agg_final$experiment <- df$experiment[1]
return(agg_final)
}
# Generate MRR plot (Recommender vs Baseline)
# generate_plot <- function(df, title="title"){
# plot <- ggplot(data=df, aes(x=no_populated_fields, y=mrr, group=method, colour=method)) +
# geom_line() + geom_point() + geom_text(aes(label=sprintf("%0.2f", round(mrr, digits = 2))), vjust=2, show.legend = FALSE) +
# ylim(0,1) + ggtitle(title) + xlab("No. populated fields") + ylab("Mean Reciprocal Rank")
# # + scale_color_brewer(palette="Dark2")
# return(plot)
# }
generate_plot <- function(df, title="title"){
plot <- ggplot(data=df, aes(x=no_populated_fields, y=mrr, group=method, colour=method)) +
geom_line(aes(linetype=method), size=0.7) +
scale_linetype_manual(values=c("solid", "solid")) +
scale_color_manual(values=c(color1, color2)) +
geom_point() + geom_text(size=2.5, aes(label=sprintf("%0.2f", round(mrr, digits = 2))), vjust=2, show.legend = FALSE) +
ylim(0,1) + ggtitle(title) + xlab("No. populated fields") + ylab("Mean Reciprocal Rank") +
theme(text = element_text(size=8))
# + scale_color_brewer(palette="Dark2")
return(plot)
}
generate_plot_2 <- function(df, title="title"){
plot <- ggplot(data=df, aes(x=no_populated_fields, y=mrr, group=method, colour=method)) +
geom_line(aes(linetype=method), size=0.7) +
scale_linetype_manual(values=c("dotted", "dashed", "solid")) +
scale_color_manual(values=c(color2, color2, color2)) +
geom_point() + geom_text(size=2.5, aes(label=sprintf("%0.2f", round(mrr, digits = 2))), vjust=2, show.legend = FALSE) +
ylim(0,1) + ggtitle(title) + xlab("No. populated fields") + ylab("Mean Reciprocal Rank") +
theme(text = element_text(size=8))
# + scale_color_brewer(palette="Dark2")
return(plot)
}
# Generate MRR plot (Recommender vs Baseline) per field
# generate_plot_field <- function(df, title="title"){
# plot <- ggplot(data=df, aes(x=field, y=mrr, fill=method)) + geom_bar(stat="identity", position=position_dodge()) +
# ylim(0,1) + ggtitle(title) + xlab("Field") + ylab("Mean Reciprocal Rank")
# return(plot)
# }
generate_plot_field <- function(df, title="title"){
plot <- ggplot(data=df, aes(x=field, y=mrr, fill=method)) + geom_bar(stat="identity", position=position_dodge()) +
scale_fill_manual(values=c(color1, color2)) +
ylim(0,1) + ggtitle(title) + xlab("Field") + ylab("Mean Reciprocal Rank") +
theme(text = element_text(size=8))
return(plot)
}
setClass("EvaluationSet", representation(datasets = "vector", description = "character"))
generate_all_plots <- function(evaluation_set, reciprocal_rank_vr_column, reciprocal_rank_baseline_column) {
data_NCBItoNCBI <- read.csv(evaluation_set@datasets[1])
data_NCBItoEBI <- read.csv(evaluation_set@datasets[2])
data_EBItoEBI <- read.csv(evaluation_set@datasets[3])
data_EBItoNCBI <- read.csv(evaluation_set@datasets[4])
description <- evaluation_set@description
# remove the 'treatment' field from the analysis
data_NCBItoNCBI <- data_NCBItoNCBI[data_NCBItoNCBI$target_field!="treatment",]
data_NCBItoNCBI$experiment <- "NCBItoNCBI"
data_NCBItoEBI$experiment <- "NCBItoEBI"
data_EBItoEBI$experiment <- "EBItoEBI"
data_EBItoNCBI$experiment <- "EBItoNCBI"
#hist(data_NCBItoNCBI$populated_fields_size)
# 1) Recommender vs Baseline 2x2 plots
p1 <- generate_plot(aggregate_data_1(data_NCBItoNCBI, reciprocal_rank_vr_column, reciprocal_rank_baseline_column), "Training: NCBI; Testing: NCBI")
p2 <- generate_plot(aggregate_data_1(data_NCBItoEBI, reciprocal_rank_vr_column, reciprocal_rank_baseline_column), "Training: NCBI; Testing: EBI")
p3 <- generate_plot(aggregate_data_1(data_EBItoEBI, reciprocal_rank_vr_column, reciprocal_rank_baseline_column), "Training: EBI; Testing: EBI")
p4 <- generate_plot(aggregate_data_1(data_EBItoNCBI, reciprocal_rank_vr_column, reciprocal_rank_baseline_column), "Training: EBI; Testing: NCBI")
fig1 <- ggarrange(p1, p2, p3, p4, ncol=2, nrow=2, common.legend = TRUE, legend="bottom")
desc_text <- paste("Metadata Recommender vs Baseline (", description, ")", sep = "")
fig1_annotated <- annotate_figure(fig1, top = text_grob(label=desc_text, color = "black", face = "bold", size = 11))
print(fig1_annotated)
# Export plot
dev.copy(pdf, paste("plot1_", gsub(" ", "_", description), "_", format(Sys.time(), "%Y_%m_%d_%H_%M_%S"), ".pdf", sep=""))
dev.off()
# 2) Recommender vs Baseline per target field
p1 <- generate_plot_field(aggregate_data_2(data_NCBItoNCBI,reciprocal_rank_vr_column, reciprocal_rank_baseline_column), "Training: NCBI; Testing: NCBI")
p2 <- generate_plot_field(aggregate_data_2(data_NCBItoEBI,reciprocal_rank_vr_column, reciprocal_rank_baseline_column), "Training: NCBI; Testing: EBI")
p3 <- generate_plot_field(aggregate_data_2(data_EBItoEBI,reciprocal_rank_vr_column, reciprocal_rank_baseline_column), "Training: EBI; Testing: EBI")
p4 <- generate_plot_field(aggregate_data_2(data_EBItoNCBI,reciprocal_rank_vr_column, reciprocal_rank_baseline_column), "Training: EBI; Testing: NCBI")
fig2 <- ggarrange(p1, p2, p3, p4, ncol=2, nrow=2, common.legend = TRUE, legend="bottom")
desc_text <- paste("Metadata Recommender vs Baseline by field (", description, ")", sep = "")
fig2_annotated <- annotate_figure(fig2, top = text_grob(label=desc_text, color = "black", face = "bold", size = 11))
print(fig2_annotated)
# Export plot
dev.copy(pdf, paste("plot2_", gsub(" ", "_", description), "_", format(Sys.time(), "%Y_%m_%d_%H_%M_%S"), ".pdf", sep=""))
dev.off()
# histogram with positions of correct values
#ggplot(data_NCBItoNCBI, aes(x=correct_pos_vr)) + geom_histogram()
}
generate_all_plots_overlapped <- function(evaluation_set1, evaluation_set2, evaluation_set3, reciprocal_rank_vr_column, reciprocal_rank_baseline_column) {
data_NCBItoNCBI1 <- read.csv(evaluation_set1@datasets[1])
data_NCBItoEBI1 <- read.csv(evaluation_set1@datasets[2])
data_EBItoEBI1 <- read.csv(evaluation_set1@datasets[3])
data_EBItoNCBI1 <- read.csv(evaluation_set1@datasets[4])
description1 <- evaluation_set1@description
data_NCBItoNCBI2 <- read.csv(evaluation_set2@datasets[1])
data_NCBItoEBI2 <- read.csv(evaluation_set2@datasets[2])
data_EBItoEBI2 <- read.csv(evaluation_set2@datasets[3])
data_EBItoNCBI2 <- read.csv(evaluation_set2@datasets[4])
description2 <- evaluation_set2@description
data_NCBItoNCBI3 <- read.csv(evaluation_set3@datasets[1])
data_NCBItoEBI3 <- read.csv(evaluation_set3@datasets[2])
data_EBItoEBI3 <- read.csv(evaluation_set3@datasets[3])
data_EBItoNCBI3 <- read.csv(evaluation_set3@datasets[4])
description3 <- evaluation_set3@description
data_NCBItoNCBI1$experiment <- "NCBItoNCBI"
data_NCBItoEBI1$experiment <- "NCBItoEBI"
data_EBItoEBI1$experiment <- "EBItoEBI"
data_EBItoNCBI1$experiment <- "EBItoNCBI"
data_NCBItoNCBI2$experiment <- "NCBItoNCBI"
data_NCBItoEBI2$experiment <- "NCBItoEBI"
data_EBItoEBI2$experiment <- "EBItoEBI"
data_EBItoNCBI2$experiment <- "EBItoNCBI"
data_NCBItoNCBI3$experiment <- "NCBItoNCBI"
data_NCBItoEBI3$experiment <- "NCBItoEBI"
data_EBItoEBI3$experiment <- "EBItoEBI"
data_EBItoNCBI3$experiment <- "EBItoNCBI"
data_p1_1 <- aggregate_data_1_2(data_NCBItoNCBI1, reciprocal_rank_vr_column, NULL, recom_method_name="recommender", baseline_method_name="baseline")
data_p1_2 <- aggregate_data_1_2(data_NCBItoNCBI2, reciprocal_rank_vr_column, NULL, recom_method_name="recommender_annotated", baseline_method_name="baseline_annotated")
data_p1_3 <- aggregate_data_1_2(data_NCBItoNCBI3, reciprocal_rank_vr_column, NULL, recom_method_name="recommender_annotated_mappings", baseline_method_name="baseline_annotated_mappings")
data_p1 <- rbind(data_p1_1, data_p1_2, data_p1_3)
data_p2_1 <- aggregate_data_1_2(data_NCBItoEBI1, reciprocal_rank_vr_column, NULL, recom_method_name="recommender", baseline_method_name="baseline")
data_p2_2 <- aggregate_data_1_2(data_NCBItoEBI2, reciprocal_rank_vr_column, NULL, recom_method_name="recommender_annotated", baseline_method_name="baseline_annotated")
data_p2_3 <- aggregate_data_1_2(data_NCBItoEBI3, reciprocal_rank_vr_column, NULL, recom_method_name="recommender_annotated_mappings", baseline_method_name="baseline_annotated_mappings")
data_p2 <- rbind(data_p2_1, data_p2_2, data_p2_3)
data_p3_1 <- aggregate_data_1_2(data_EBItoEBI1, reciprocal_rank_vr_column, NULL, recom_method_name="recommender", baseline_method_name="baseline")
data_p3_2 <- aggregate_data_1_2(data_EBItoEBI2, reciprocal_rank_vr_column, NULL, recom_method_name="recommender_annotated", baseline_method_name="baseline_annotated")
data_p3_3 <- aggregate_data_1_2(data_EBItoEBI3, reciprocal_rank_vr_column, NULL, recom_method_name="recommender_annotated_mappings", baseline_method_name="baseline_annotated_mappings")
data_p3 <- rbind(data_p3_1, data_p3_2, data_p3_3)
data_p4_1 <- aggregate_data_1_2(data_EBItoNCBI1, reciprocal_rank_vr_column, NULL, recom_method_name="recommender", baseline_method_name="baseline")
data_p4_2 <- aggregate_data_1_2(data_EBItoNCBI2, reciprocal_rank_vr_column, NULL, recom_method_name="recommender_annotated", baseline_method_name="baseline_annotated")
data_p4_3 <- aggregate_data_1_2(data_EBItoNCBI3, reciprocal_rank_vr_column, NULL, recom_method_name="recommender_annotated_mappings", baseline_method_name="baseline_annotated_mappings")
data_p4 <- rbind(data_p4_1, data_p4_2, data_p4_3)
# 1) Recommender vs Baseline 2x2 plots
p1 <- generate_plot_2(data_p1, "Training: NCBI; Testing: NCBI")
p2 <- generate_plot_2(data_p2, "Training: NCBI; Testing: EBI")
p3 <- generate_plot_2(data_p3, "Training: EBI; Testing: EBI")
p4 <- generate_plot_2(data_p4, "Training: EBI; Testing: NCBI")
fig1 <- ggarrange(p1, p2, p3, p4, ncol=2, nrow=2, common.legend = TRUE, legend="bottom")
description = paste(description1, " vs ", description2, sep = "")
desc_text <- paste("Metadata Recommender (text vs annotated vs annotated_mappings)", sep = "")
fig1_annotated <- annotate_figure(fig1, top = text_grob(label=desc_text, color = "black", face = "bold", size = 11))
print(fig1_annotated)
# Export plot
dev.copy(pdf, paste("plot3_", gsub(" ", "_", description), "_", format(Sys.time(), "%Y_%m_%d_%H_%M_%S"), ".pdf", sep=""))
dev.off()
}
### MAIN BODY ###
evaluation_set_1 <- new("EvaluationSet", datasets=c(file_NCBItoNCBI, file_NCBItoEBI, file_EBItoEBI, file_EBItoNCBI), description="free text")
evaluation_set_2 <- new("EvaluationSet", datasets=c(file_NCBItoNCBI_annotated, file_NCBItoEBI_annotated, file_EBItoEBI_annotated, file_EBItoNCBI_annotated), description="annotated")
evaluation_set_3 <- new("EvaluationSet", datasets=c(file_NCBItoNCBI_annotated_mappings, file_NCBItoEBI_annotated_mappings, file_EBItoEBI_annotated_mappings, file_EBItoNCBI_annotated_mappings), description="annotated-mappings")
evaluation_sets = c(evaluation_set_1, evaluation_set_2, evaluation_set_3)
evaluation_sets = c(evaluation_set_1)
for (evaluation_set in evaluation_sets){
generate_all_plots(evaluation_set, 'RR_top5_vr', 'RR_top5_baseline')
}
generate_all_plots_overlapped(evaluation_set_1, evaluation_set_2, evaluation_set_3, 'RR_top5_vr', 'RR_top5_baseline')
################################
# hist(data_NCBItoNCBI$populated_fields_size)
# hist(data_NCBItoEBI$populated_fields_size)
|
########################################################################################################################
## RnBeadRawSet-class.R
## created: 2013-xx-xx
## creator: Pavlo Lutsik
## ---------------------------------------------------------------------------------------------------------------------
## RnBeadRawSet class definition.
########################################################################################################################
## ---------------------------------------------------------------------------------------------------------------------
## CLASS DEFINITIONS
## ---------------------------------------------------------------------------------------------------------------------
#' RnBeadRawSet-class
#'
#' Main class for storing HumanMethylation micorarray data which includes intensity information
#'
#' @section Slots:
#' \describe{
#' \item{\code{pheno}}{Phenotypic data.}
#' \item{\code{M}}{\code{matrix} of intensities for the probes measuring the abundance of methylated molecules.}
#' \item{\code{U}}{\code{matrix} of intensities for the probes measuring the abundance of unmethylated molecules.}
#' \item{\code{M0}}{\code{matrix} of "out-of-band" intensities for the probes measuring the abundance of methylated molecules.}
#' \item{\code{U0}}{\code{matrix} of "out-of-band" intensities for the probes measuring the abundance of unmethylated molecules.}
#' \item{\code{bead.counts.M}}{\code{matrix} of bead counts per probe.}
#' \item{\code{bead.counts.U}}{\code{matrix} of bead counts per probe.}
#' }
#'
#' @section Methods and Functions:
#' \describe{
#' \item{\code{samples}}{Gets the identifiers of all samples in the dataset.}
#' \item{\code{\link[=M,RnBeadRawSet-method]{M}}}{Get the matrix of intensities for the probes measuring the abundance of methylated molecules.}
#' \item{\code{\link[=U,RnBeadRawSet-method]{U}}}{Get the matrix of intensities for the probes measuring the abundance of unmethylated molecules.}
#' \item{\code{\link{intensities.by.color}}}{Get probe intensities in each color channel.}
#' }
#'
#' @name RnBeadRawSet-class
#' @rdname RnBeadRawSet-class
#' @author Pavlo Lutsik
#' @exportClass RnBeadRawSet
#' @include RnBeadSet-class.R
setClass("RnBeadRawSet",
representation(M="matrixOrffOrNULL",
U="matrixOrffOrNULL",
M0="matrixOrffOrNULL",
U0="matrixOrffOrNULL",
bead.counts.M="matrixOrffOrNULL",
bead.counts.U="matrixOrffOrNULL"
),
contains="RnBeadSet",
prototype(#pheno=data.frame(),
#betas=matrix(),
#meth.sites=matrix(),
#pval.sites=NULL,
#pval.regions=NULL,
#qc=NULL,
#status=NULL,
M=matrix(),
U=matrix(),
M0=NULL,
U0=NULL,
bead.counts.M=NULL,
bead.counts.U=NULL
),
package = "RnBeads"
)
RNBRAWSET.SLOTNAMES<-c("M","U","M0","U0","bead.counts.M", "bead.counts.U")
########################################################################################################################
## initialize.RnBeadRawSet
##
## Direct slot filling.
##
## #docType methods
## #rdname RnBeadRawSet-class
setMethod("initialize", "RnBeadRawSet",
function(.Object,
pheno = data.frame(),
sites = matrix(ncol=0, nrow=0),
meth.sites = matrix(ncol=0, nrow=0),
M = matrix(ncol=0, nrow=0),
U = matrix(ncol=0, nrow=0),
M0 = NULL,
U0 = NULL,
bead.counts.M = NULL,
bead.counts.U = NULL,
covg.sites = NULL,
pval.sites = NULL,
qc = NULL,
target="probes450",
status=list(normalized=FALSE, background=FALSE, disk.dump=FALSE)
) {
.Object@target<-target
#.Object@pheno<-pheno
#.Object@sites<-sites
.Object@M<-M
.Object@U<-U
.Object@M0<-M0
.Object@U0<-U0
.Object@bead.counts.M<-bead.counts.M
.Object@bead.counts.U<-bead.counts.U
#.Object@meth.sites<-betas
#.Object@pval.sites<-p.values
#.Object@covg.sites<-bead.counts
#.Object@regions<-list()
#.Object@status<-list()
#.Object@status[["normalized"]]<-"none"
#.Object@status[["background"]]<-"none"
#.Object@status[["disk.dump"]]<-useff
#.Object@inferred.covariates <- list()
#.Object@qc<-qc
#.Object
callNextMethod(.Object,
pheno=pheno,
sites=sites,
meth.sites=meth.sites,
pval.sites=pval.sites,
covg.sites=covg.sites,
target=target,
status=status,
qc=qc)
})
########################################################################################################################
#' Wrapper function RnBeadRawSet
#'
#' @param pheno Phenotypic data.
#' @param probes \code{character} vector of Infinium(R) probe identifiers
#' @param M Matrix of intensities for the probes measuring the abundance of methylated molecules
#' @param U Matrix of intensities for the probes measuring the abundance of unmethylated molecules
#' @param M0 Matrix of "out-of-band" intensities for the probes measuring the abundance of methylated molecules
#' @param U0 Matrix of "out-of-band" intensities for the probes measuring the abundance of unmethylated molecules
#' @param bead.counts.M Matrix of bead counts per probe.
#' @param bead.counts.U Matrix of bead counts per probe.
#' @param p.values Matrix of detection p-values.
#' @param qc ...
#' @param platform \code{character} singleton specifying the microarray platform: \code{"450k"} corresponds to HumanMethylation450 microarray, and \code{"27k"} stands for HumanMethylation27.
#' @param region.types A \code{character} vector specifying the region types, for which the methylation infromation will be summarized.
#' @param beta.offset A regularization constant which is added to the denominator at beta-value calculation
#' @param summarize.bead.counts If \code{TRUE} the coverage slot is filled by summarizing the \code{bead.counts.M} and \code{bead.counts.U} matrices. For type I probes the summarization is done using \code{min} operation, while for type II probes the bead counts should be identical in both supplied matrices
#' @param summarize.regions ...
#' @param useff If \code{TRUE} the data matrices will be stored as \code{ff} objects
#' @param ffcleanup If \code{TRUE} and disk dumping has been enabled the data of the input \code{ff} objects will be deleted
#'
#' @return an object of class RnBeadRawSet
#'
#' @name RnBeadRawSet
#' @rdname RnBeadRawSet-class
#' @aliases initialize,RnBeadRawSet-method
#' @export
RnBeadRawSet<-function(
pheno,
probes,
M,
U,
M0=NULL,
U0=NULL,
bead.counts.M = NULL,
bead.counts.U = NULL,
p.values=NULL,
qc = NULL,
platform = "450k",
beta.offset=100,
summarize.bead.counts=TRUE,
summarize.regions=TRUE,
region.types = rnb.region.types.for.analysis("hg19"),
useff=rnb.getOption("disk.dump.big.matrices"),
ffcleanup=FALSE){
if(missing(pheno)){
stop("argument pheno should be supplied")
}
if(missing(probes)){
if(!is.null(rownames(M))){
probes<-rownames(M)
}else if(!is.null(rownames(U))){
probes<-rownames(U)
}else{
stop("If probes are not supplied, betas should have probe identifers as row names")
}
}
if(!is.data.frame(pheno)){
stop("invalid value for pheno: should be a data frame")
}
if(!any(c('matrix', 'ff_matrix') %in% class(M))){
stop("invalid value for M: should be a matrix or an ff_matrix")
}
if(!any(c('matrix', 'ff_matrix') %in% class(U))){
stop("invalid value for U: should be a matrix or an ff_matrix")
}
if(!is.null(p.values) && !any(c('matrix', 'ff_matrix') %in% class(p.values))){
stop("invalid value for p.values: should be a matrix or an ff_matrix")
}
if(!is.null(bead.counts.M) && !any(c('matrix', 'ff_matrix') %in% class(bead.counts.M))){
stop("invalid value for bead.counts.M: should be a matrix or an ff_matrix")
}
if(!is.null(bead.counts.U) && !any(c('matrix', 'ff_matrix') %in% class(bead.counts.U))){
stop("invalid value for bead.counts.U: should be a matrix or an ff_matrix")
}
if(!is.null(qc)){
if(!is.list(qc)){
stop("invalid value for qc: should be a list")
}
}
if(!is.character(platform) || length(platform)!=1L){
stop("invalid value for platform: should be a character of length one")
}
if(!is.character(region.types)){
stop("invalid value for region types: should be a character vector")
}
if(!is.numeric(beta.offset) || length(beta.offset)!=1L || beta.offset<0){
stop("invalid value for beta.offset: should be a positive numeric of length one")
}
if(!is.logical(summarize.regions) || length(summarize.regions)!=1L){
stop("invalid value for summarize.regions: should be a logical of length one")
}
if(!is.logical(summarize.bead.counts) || length(summarize.bead.counts)!=1L){
stop("invalid value for summarize.bead.counts: should be a logical of length one")
}
if(!is.logical(useff) || length(useff)!=1L){
stop("invalid value for useff: should be a logical of length one")
}
if (platform =="EPIC") {
target <- "probesEPIC"
assembly <- "hg19"
}else if (platform =="450k") {
target <- "probes450"
assembly <- "hg19"
} else if(platform == "27k"){
target <- "probes27"
assembly <- "hg19"
}else{
rnb.error("Invalid value for platform")
}
res<-match.probes2annotation(probes, target, assembly)
sites<-res[[1]]
site.ids<-res[[2]]
fullmatch<-res[[3]]
M<-prepare.slot.matrix(M, useff=useff, full.match=fullmatch, subset=site.ids, cleanup=ffcleanup)
U<-prepare.slot.matrix(U, useff=useff, full.match=fullmatch, subset=site.ids, cleanup=ffcleanup)
if(!is.null(M0)){
M0<-prepare.slot.matrix(M0, useff=useff, full.match=fullmatch, subset=site.ids, cleanup=ffcleanup)
}
if(!is.null(U0)){
U0<-prepare.slot.matrix(U0, useff=useff, full.match=fullmatch, subset=site.ids, cleanup=ffcleanup)
}
if(!is.null(bead.counts.M)){
bead.counts.M<-prepare.slot.matrix(bead.counts.M, useff=useff, full.match=fullmatch, subset=site.ids, cleanup=ffcleanup)
}
if(!is.null(bead.counts.U)){
bead.counts.U<-prepare.slot.matrix(bead.counts.U, useff=useff, full.match=fullmatch, subset=site.ids, cleanup=ffcleanup)
}
if(!is.null(p.values)){
p.values<-prepare.slot.matrix(p.values, useff=useff, full.match=fullmatch, subset=site.ids, cleanup=ffcleanup)
}
rownames(M)<-NULL
rownames(U)<-NULL
if(!is.null(M0)){
rownames(M0)<-NULL
}
if(!is.null(U0)){
rownames(U0)<-NULL
}
if(!is.null(bead.counts.M)){
rownames(bead.counts.M)<-NULL
}
if(!is.null(bead.counts.U)){
rownames(bead.counts.U)<-NULL
}
if(!is.null(bead.counts.M) && !is.null(bead.counts.U) && summarize.bead.counts){
bead.counts<-summarize.bead.counts(bead.counts.M[,,drop=FALSE],bead.counts.U[,,drop=FALSE])
}else{
bead.counts<-NULL
}
betas<-beta.value(M[,,drop=FALSE],U[,,drop=FALSE], beta.offset)
if(useff){
if(!is.null(bead.counts)){
bead.counts<-convert.to.ff.matrix.tmp(bead.counts)
}
}
if(useff){
betas<-convert.to.ff.matrix.tmp(betas)
}
status<-list()
status[["normalized"]]<-"none"
status[["background"]]<-"none"
status[["disk.dump"]]<-useff
object<-new("RnBeadRawSet",
pheno=pheno,
sites=sites,
meth.sites=betas,
M=M,
U=U,
M0=M0,
U0=U0,
bead.counts.M=bead.counts.M,
bead.counts.U=bead.counts.U,
covg.sites=bead.counts,
pval.sites=p.values,
qc=qc,
target=target,
status=status
)
if(summarize.regions){
for (region.type in region.types) {
if (region.type %in% rnb.region.types("hg19")) {
object <- summarize.regions(object, region.type)
}
}
}
return(object)
}
########################################################################################################################
## FIXME: dummy validity method
validRnBeadRawSetObject<-function(object){
return(TRUE)
}
setValidity("RnBeadRawSet", method=validRnBeadRawSetObject)
########################################################################################################################
setMethod("show", "RnBeadRawSet", rnb.show.rnbeadset)
########################################################################################################################
#' as("MethyLumiSet", "RnBeadRawSet")
#'
#' Convert a \code{\linkS4class{MethyLumiSet}} object to \code{\linkS4class{RnBeadRawSet}}
#'
#' @name as.RnBeadRawSet
setAs("MethyLumiSet", "RnBeadRawSet",
function(from, to){
if(!inherits(from,"MethyLumiSet")){
stop("not a MethyLumiSet object:", deparse(substitute(methylumi.set)))
}
m.data <- MethyLumiSet2RnBeadSet(from)
if("methylated.N" %in% ls(from@assayData) && "unmethylated.N" %in% ls(from@assayData) ){
meth.N.element<-"methylated.N"
umeth.N.element<-"unmethylated.N"
}else if("Avg_NBEADS_A" %in% ls(from@assayData) && "Avg_NBEADS_B" %in% ls(from@assayData)){
meth.N.element<-"Avg_NBEADS_B"
umeth.N.element<-"Avg_NBEADS_A"
}else{
meth.N.element<-NULL
umeth.N.element<-NULL
}
if("methylated.OOB" %in% ls(from@assayData) && "unmethylated.OOB" %in% ls(from@assayData)){
meth.oob.element<-"methylated.OOB"
umeth.oob.element<-"unmethylated.OOB"
}else{
meth.oob.element<-NULL
umeth.oob.element<-NULL
}
if(annotation(from)=="IlluminaMethylationEPIC"){
platform="EPIC"
}else if(annotation(from)=="IlluminaHumanMethylation450k"){
platform="450k"
}else if(annotation(from)=="IlluminaHumanMethylation27k"){
platform="27k"
}
object<-RnBeadRawSet(
pheno=m.data$pheno,
probes=m.data$probes,
M=methylated(from),
U=unmethylated(from),
p.values=m.data$p.values,
M0=if(!is.null(meth.oob.element)) get(meth.oob.element,from@assayData) else NULL,
U0=if(!is.null(umeth.oob.element)) get(umeth.oob.element,from@assayData) else NULL,
bead.counts.M=if(!is.null(meth.N.element)) get(meth.N.element,from@assayData) else NULL,
bead.counts.U=if(!is.null(umeth.N.element)) get(umeth.N.element,from@assayData) else NULL,
platform=platform
)
if ("qc" %in% names(m.data)) {
qc(object)<-m.data[["qc"]]
}
object
})
########################################################################################################################
#' as("RnBeadRawSet", "MethyLumiSet")
#'
#' Convert a \code{\linkS4class{RnBeadRawSet}} object to \code{\linkS4class{MethyLumiSet}}
#'
#' @name as.RnBeadRawSet
setAs("RnBeadRawSet","MethyLumiSet",
function(from, to){
if(!inherits(from,"RnBeadRawSet")){
stop("not a RnBeadRawSet object:", deparse(substitute(methylumi.set)))
}
assd<-new.env()
assign("betas", meth(from), envir=assd)
assign("pvals", dpval(from), envir=assd)
assign("methylated", M(from), envir=assd)
assign("unmethylated", U(from), envir=assd)
if(!is.null(M0(from))){
assign("methylated.OOB", M0(from), envir=assd)
}
if(!is.null(U0(from))){
assign("unmethylated.OOB", U0(from), envir=assd)
}
if(!is.null(bead.counts.M(from))){
assign("methylated.N", bead.counts.M(from), envir=assd)
}
if(!is.null(bead.counts.U(from))){
assign("unmethylated.N", bead.counts.U(from), envir=assd)
}
pd<-pheno(from)
rownames(pd)<-colnames(meth(from))
mset<-new(to, assd, as(pd, "AnnotatedDataFrame"))
rm(assd)
ann<-annotation(from, add.names=TRUE)
featureData(mset)<-as(data.frame(COLOR_CHANNEL=ann$Color, rownames=rownames(ann)), "AnnotatedDataFrame")
featureNames(mset)<-ann[["ID"]]
if (!is.null(qc(from))){
assd<-new.env()
assign("methylated", qc(from)$Cy3, envir=assd)
assign("unmethylated", qc(from)$Cy5, envir=assd)
mset@QC<-new("MethyLumiQC", assd)
if(from@target == "probesEPIC"){
probeIDs<-rnb.get.annotation("controlsEPIC")[,"Target"]
## TODO remove this after annotation has been fixed
index<-rnb.update.controlsEPIC.enrich(rnb.get.annotation("controlsEPIC"))[,"Index"]
probeIDs<-paste(probeIDs, index, sep=".")
}else if(from@target == "probes450"){
probeIDs<-rnb.get.annotation("controls450")[,"Target"]
probeIDs<-paste(probeIDs, unlist(sapply(table(probeIDs)[unique(probeIDs)], seq, from=1 )), sep=".")
}else if(from@target == "probes27"){
probeIDs<-rnb.get.annotation("controls27")[,"Name"]
}
featureData(mset@QC)<-as(data.frame(Address=rownames(qc(from)$Cy3), rownames=probeIDs), "AnnotatedDataFrame")
featureNames(mset@QC)<-probeIDs
if(from@target == "probesEPIC"){
annotation(mset@QC) <- "IlluminaMethylationEPIC"
}else if(from@target == "probes450"){
annotation(mset@QC) <- "IlluminaHumanMethylation450k"
}else if(from@target == "probes27"){
annotation(mset) <- "IlluminaHumanMethylation27k"
}
}
if(from@target == "probesEPIC"){
annotation(mset) <- "IlluminaMethylationEPIC"
}else if(from@target == "probes450"){
annotation(mset) <- "IlluminaHumanMethylation450k"
}else if(from@target == "probes27"){
annotation(mset) <- "IlluminaHumanMethylation27k"
}
mset
})
########################################################################################################################
## ---------------------------------------------------------------------------------------------------------------------
## ACCESSORS
## ---------------------------------------------------------------------------------------------------------------------
if(!isGeneric("M")) setGeneric('M',
function(object, ...) standardGeneric('M'))
#' M-methods
#'
#' Extract raw methylated probe intensity from an object of \code{RnBeadRawSet} class.
#'
#' @param object Dataset of interest.
#' @param row.names Flag indicating whether the resulting matrix will be assigned row names
#'
#' @return \code{matrix} of the methylated probe intensities
#'
#' @rdname M-methods
#' @docType methods
#' @export
#' @aliases M
#' @aliases M,RnBeadRawSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' M.intensity<-M(rnb.set.example)
#' head(M.intensity)
#' }
#'
setMethod("M", signature(object="RnBeadRawSet"),
function(object, row.names=FALSE){
get.dataset.matrix(object, "sites", row.names, object@M, object@meth.regions)
})
if(!isGeneric("U")) setGeneric('U',
function(object, ...) standardGeneric('U'))
########################################################################################################################
#' U-methods
#'
#' Extract raw unmethylated probe intensity from an object of \code{RnBeadRawSet} class.
#'
#' @param object Dataset of interest.
#' @param row.names Flag indicating whether the resulting matrix will be assigned row names
#'
#' @return \code{matrix} of the unmethylated probe intensities
#'
#' @rdname U-methods
#' @docType methods
#' @export
#' @aliases U
#' @aliases U,RnBeadRawSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' U.intensity<-U(rnb.set.example)
#' head(U.intensity)
#' }
setMethod("U", signature(object="RnBeadRawSet"),
function(object, row.names=FALSE){
get.dataset.matrix(object, "sites", row.names, object@U, object@meth.regions)
})
########################################################################################################################
setGeneric('M0',
function(object, ...) standardGeneric('M0'))
setMethod("M0", signature(object="RnBeadRawSet"),
function(object, row.names=FALSE){
get.dataset.matrix(object, "sites", row.names, object@M0, object@meth.regions)
})
########################################################################################################################
setGeneric('U0',
function(object, ...) standardGeneric('U0'))
setMethod("U0", signature(object="RnBeadRawSet"),
function(object, row.names=FALSE){
get.dataset.matrix(object, "sites", row.names, object@U0, object@meth.regions)
})
########################################################################################################################
setGeneric('bead.counts.M',
function(object, ...) standardGeneric('bead.counts.M'))
setMethod("bead.counts.M", signature(object="RnBeadRawSet"),
function(object, row.names=FALSE){
get.dataset.matrix(object, "sites", row.names, object@bead.counts.M, object@meth.regions)
})
########################################################################################################################
setGeneric('bead.counts.U',
function(object, ...) standardGeneric('bead.counts.U'))
setMethod("bead.counts.U", signature(object="RnBeadRawSet"),
function(object, row.names=FALSE){
get.dataset.matrix(object, "sites", row.names, object@bead.counts.U, object@meth.regions)
})
## ---------------------------------------------------------------------------------------------------------------------
## MODIFIERS
## ---------------------------------------------------------------------------------------------------------------------
setGeneric('M<-',
function(object, value) standardGeneric('M<-'))
setMethod("M<-", signature(object="RnBeadRawSet", value="matrixOrffOrNULL"),
function(object, value){
if(object@status$disk.dump){
# delete(object@M)
object@M<-convert.to.ff.matrix.tmp(value)
}else{
object@M<-value
}
})
########################################################################################################################
setGeneric('U<-',
function(object, value) standardGeneric('U<-'))
setMethod("U<-", signature(object="RnBeadRawSet", value="matrixOrffOrNULL"),
function(object, value){
if(object@status$disk.dump){
# delete(object@U)
object@U<-convert.to.ff.matrix.tmp(value)
}else{
object@U<-value
}
})
########################################################################################################################
setGeneric('M0<-',
function(object, value) standardGeneric('M0<-'))
setMethod("M0<-", signature(object="RnBeadRawSet", value="matrixOrffOrNULL"),
function(object, value){
if(object@status$disk.dump){
# delete(object@M0)
object@M0<-convert.to.ff.matrix.tmp(value)
}else{
object@M0<-value
}
})
########################################################################################################################
setGeneric('U0<-',
function(object, value) standardGeneric('U0<-'))
setMethod("U0<-", signature(object="RnBeadRawSet", value="matrixOrffOrNULL"),
function(object, value){
if(object@status$disk.dump){
# delete(object@U0)
object@U0<-convert.to.ff.matrix.tmp(value)
}else{
object@U0<-value
}
})
########################################################################################################################
setGeneric('bead.counts.M<-',
function(object, value) standardGeneric('bead.counts.M<-'))
setMethod("bead.counts.M<-", signature(object="RnBeadRawSet", value="matrixOrffOrNULL"),
function(object, value){
if(object@status$disk.dump){
# delete(object@bead.counts.M)
object@bead.counts.M<-convert.to.ff.matrix.tmp(value)
}else{
object@bead.counts.M<-value
}
})
########################################################################################################################
setGeneric('bead.counts.U<-',
function(object, value) standardGeneric('bead.counts.U<-'))
setMethod("bead.counts.U<-", signature(object="RnBeadRawSet", value="matrixOrffOrNULL"),
function(object, value){
if(object@status$disk.dump){
# delete(object@bead.counts.U)
object@bead.counts.U<-convert.to.ff.matrix.tmp(value)
}else{
object@bead.counts.U<-value
}
})
########################################################################################################################
if (!isGeneric("remove.sites")) {
setGeneric("remove.sites", function(object, probelist, verbose = TRUE) standardGeneric("remove.sites"))
}
#' @rdname remove.sites-methods
#' @aliases remove.sites,RnBeadRawSet-method
#' @docType methods
#' @export
setMethod("remove.sites", signature(object = "RnBeadRawSet"),
function(object, probelist, verbose = TRUE) {
inds <- get.i.vector(probelist, rownames(object@meth.sites))
if (length(inds) != 0) {
for(sl in RNBRAWSET.SLOTNAMES){
if(!is.null(slot(object,sl))){
if(!is.null(object@status) && object@status$disk.dump){
new.matrix<-slot(object,sl)[-inds,,drop=FALSE]
if(isTRUE(object@status$discard.ff.matrices)){
delete(slot(object,sl))
}
slot(object,sl)<-convert.to.ff.matrix.tmp(new.matrix)
rm(new.matrix); rnb.cleanMem()
}else{
slot(object,sl)<-slot(object,sl)[-inds,,drop=FALSE]
}
}
}
}
callNextMethod()
}
)
########################################################################################################################
if (!isGeneric("remove.samples")) {
setGeneric("remove.samples", function(object, samplelist) standardGeneric("remove.samples"))
}
#' @rdname remove.samples-methods
#' @aliases remove.samples,RnBeadRawSet-method
#' @docType methods
#' @export
setMethod("remove.samples", signature(object = "RnBeadRawSet"),
function(object, samplelist) {
inds <- get.i.vector(samplelist, samples(object))
if (length(inds) != 0) {
for(sl in RNBRAWSET.SLOTNAMES){
if(!is.null(slot(object,sl))){
if(!is.null(object@status) && object@status$disk.dump){
new.matrix<-slot(object,sl)[,-inds, drop=FALSE]
if(isTRUE(object@status$discard.ff.matrices)){
delete(slot(object,sl))
}
slot(object,sl)<-convert.to.ff.matrix.tmp(new.matrix)
rm(new.matrix); rnb.cleanMem()
}else{
slot(object,sl)<-slot(object,sl)[,-inds, drop=FALSE]
}
}
}
}
callNextMethod()
}
)
#######################################################################################################################
#if (!isGeneric("update.meth")) {
setGeneric("update.meth", function(object) standardGeneric("update.meth"))
#}
##
## update.meth
##
## Update the methylation calls, after the change of intensity values
##
## param object RnBeadRawSet object
##
## return Updated RnBeadRawSet object
##
setMethod("update.meth", signature(object="RnBeadRawSet"),
function(object){
if(object@status$disk.dump){
object@meth.sites<-convert.to.ff.matrix.tmp(beta.value(object@M[,], object@U[,]))
}else{
object@meth.sites<-beta.value(object@M, object@U)
}
return(object)
})
#######################################################################################################################
## save, load and destroy
setMethod("save.matrices", signature(object="RnBeadRawSet", path="character"),
function(object, path){
if(!is.null(object@status) && object@status$disk.dump){
for(sl in RNBRAWSET.SLOTNAMES){
if(!is.null(slot(object,sl))){
if("ff" %in% class(slot(object,sl))){
ffmatrix<-slot(object,sl)
ffsave(ffmatrix, file=file.path(path, paste("rnb", sl, sep=".")),
rootpath=getOption('fftempdir'))
rm(ffmatrix)
}
}
}
}
callNextMethod(object, path)
})
#######################################################################################################################
setMethod("load.matrices", signature(object="RnBeadRawSet", path="character"),
function(object, path, temp.dir=tempdir()){
slot.names <- RNBRAWSET.SLOTNAMES
for(sl in slot.names){
if(!is.null(slot(object, sl))){
if(paste("rnb",sl,"RData", sep=".") %in% list.files(path) &&
paste("rnb",sl,"ffData", sep=".") %in% list.files(path)){
load_env<-new.env()
suppressMessages(ffload(file=file.path(path, paste("rnb", sl, sep=".")),
envir=load_env,rootpath=getOption("fftempdir")))
slot(object, sl)<-get("ffmatrix", envir=load_env)
rm(load_env)
}
}
}
callNextMethod(object=object, path=path, temp.dir=temp.dir)
})
#######################################################################################################################
#' @rdname destroy-methods
#' @aliases destroy,RnBeadRawSet-method
#' @docType methods
#' @export
setMethod("destroy", signature(object="RnBeadRawSet"),
function(object){
if(object@status$disk.dump){
for(sl in RNBRAWSET.SLOTNAMES){
if(!is.null(slot(object,sl))){
delete(slot(object, sl))
}
}
}
callNextMethod()
}
)
## ---------------------------------------------------------------------------------------------------------------------
## HELPER ROUTINES
## ---------------------------------------------------------------------------------------------------------------------
beta.value<-function(M,U,offset=100){
M/(M+U+offset)
}
#######################################################################################################################
m.value<-function(M,U,offset=100){
log2((M+offset)/(U+offset))
}
#######################################################################################################################
#' intensities.by.color
#'
#' Rearranges information from "M" and "U" slots of a RnBeadsRawSet object by color channer.
#'
#' @param raw.set RnBeadRawSet object
#' @param address.rownames if \code{TRUE} the rows of the returned matrices are named with the with the correspoding Illumina probe addresses
#' @param add.oob if \code{TRUE} the "out-of-band" intensities are included
#' @param add.controls if \code{TRUE} the control probe intensities are included
#' @param add.missing if \code{TRUE} the rows for the probes missing in \code{raw.set} is imputed with \code{NA} values
#'
#' @return a \code{list} with elements \code{Cy3} and \code{Cy5} containing average bead intensities
#' measured for each probe in the green and red channels, respectively
#'
#' @author Pavlo Lutsik
intensities.by.color<-function(raw.set,
address.rownames=TRUE,
add.oob=TRUE,
add.controls=TRUE,
add.missing=TRUE
){
if(!require("IlluminaHumanMethylation450kmanifest")){
rnb.error("IlluminaHumanMethylation450kmanifest should be installed")
}
Mmatrix<-M(raw.set, row.names=TRUE)
Umatrix<-U(raw.set, row.names=TRUE)
if(add.oob){
M0matrix<-M0(raw.set, row.names=TRUE)
U0matrix<-U0(raw.set, row.names=TRUE)
}
pinfos <- annotation(raw.set, add.names=TRUE)
if(add.missing){
full.ann<-rnb.annotation2data.frame(rnb.get.annotation(raw.set@target))
ann.missing<-full.ann[!rownames(full.ann)%in%rownames(pinfos),]
pinfos<-rbind(pinfos, ann.missing[,colnames(full.ann)])
filler<-matrix(NA_real_, nrow=nrow(ann.missing), ncol=length(samples(raw.set)))
rownames(filler)<-rownames(ann.missing)
Mmatrix<-rbind(Mmatrix, filler)
Umatrix<-rbind(Umatrix, filler)
if(add.oob){
M0matrix<-rbind(M0matrix, filler)
U0matrix<-rbind(U0matrix, filler)
}
rm(ann.missing, filler, full.ann)
}
rnb.set.probe.ids<-pinfos[["ID"]]
dII.probes <- rnb.set.probe.ids[pinfos[,"Design"] == "II"]
#dII.probes <- dII.probes[!grepl("rs", dII.probes)]
if(address.rownames){
tII<-rbind(as.data.frame(IlluminaHumanMethylation450kmanifest@data$TypeII[,c("Name", "AddressA")]),
as.data.frame(IlluminaHumanMethylation450kmanifest@data$TypeSnpII[,c("Name", "AddressA")]))
tII<-tII[match(dII.probes, tII$Name),]
}
dII.grn<-Mmatrix[pinfos[,"Design"] == "II",,drop=FALSE]
if(address.rownames) rownames(dII.grn)<-tII$AddressA
dII.red<-Umatrix[pinfos[,"Design"] == "II",,drop=FALSE]
if(address.rownames) rownames(dII.red)<-tII$AddressA
dI.red.probes <- rnb.set.probe.ids[pinfos[, "Color"] == "Red"]
#dI.red.probes <- dI.red.probes[!grepl("rs", dI.red.probes)]
dI.green.probes <- rnb.set.probe.ids[pinfos[, "Color"] == "Grn"]
#dI.green.probes <- dI.green.probes[!grepl("rs", dI.green.probes)]
if(address.rownames){
tI<-rbind(as.data.frame(IlluminaHumanMethylation450kmanifest@data$TypeI[,c("Name","Color", "AddressA", "AddressB")]),
as.data.frame(IlluminaHumanMethylation450kmanifest@data$TypeSnpI[,c("Name","Color", "AddressA", "AddressB")]))
tI.red<-tI[tI$Color=="Red",]
tI.red<-tI.red[match(dI.red.probes, tI.red$Name),]
tI.grn<-tI[tI$Color=="Grn",]
tI.grn<-tI.grn[match(dI.green.probes, tI.grn$Name),]
}
dI.red.meth<-Mmatrix[pinfos[, "Color"] == "Red",,drop=FALSE]
if(address.rownames) rownames(dI.red.meth)<-tI.red[,"AddressB"]
dI.red.umeth<-Umatrix[pinfos[, "Color"] == "Red",,drop=FALSE]
if(address.rownames) rownames(dI.red.umeth)<-tI.red[,"AddressA"]
if(add.oob){
dI.red.meth.oob<-M0matrix[pinfos[, "Color"] == "Red",,drop=FALSE]
if(address.rownames) rownames(dI.red.meth.oob)<-tI.red[,"AddressB"]
dI.red.umeth.oob<-U0matrix[pinfos[, "Color"] == "Red",,drop=FALSE]
if(address.rownames) rownames(dI.red.umeth.oob)<-tI.red[,"AddressA"]
}
dI.grn.meth<-Mmatrix[pinfos[, "Color"] == "Grn",,drop=FALSE]
if(address.rownames) rownames(dI.grn.meth)<-tI.grn[,"AddressB"]
dI.grn.umeth<-Umatrix[pinfos[, "Color"] == "Grn",,drop=FALSE]
if(address.rownames) rownames(dI.grn.umeth)<-tI.grn[,"AddressA"]
if(add.oob){
dI.grn.meth.oob<-M0matrix[pinfos[, "Color"] == "Grn",,drop=FALSE]
if(address.rownames) rownames(dI.grn.meth.oob)<-tI.grn[,"AddressB"]
dI.grn.umeth.oob<-U0matrix[pinfos[, "Color"] == "Grn",,drop=FALSE]
if(address.rownames) rownames(dI.grn.umeth.oob)<-tI.grn[,"AddressA"]
}
intensities.by.channel <- list(
Cy3=rbind(dII.grn, dI.grn.meth,dI.grn.umeth,
if(add.oob) dI.red.meth.oob else NULL, if(add.oob) dI.red.umeth.oob else NULL),
Cy5=rbind(dII.red, dI.red.meth, dI.red.umeth,
if(add.oob) dI.grn.meth.oob else NULL, if(add.oob) dI.grn.umeth.oob else NULL))
rm(dII.grn, dI.grn.meth, dI.grn.umeth, dI.red.meth.oob, dI.red.umeth.oob,
dII.red, dI.red.meth, dI.red.umeth, dI.grn.meth.oob, dI.grn.umeth.oob)
gc()
if(address.rownames) intensities.by.channel$Cy5<-intensities.by.channel$Cy5[rownames(intensities.by.channel$Cy3),,drop=FALSE]
if(add.controls){
ncd<-rnb.get.annotation("controls450")
#ncd<-ncd[ncd[["Target"]] == "NEGATIVE", ]
ncd$Target<-tolower(ncd$Target)
controls.by.channel<-qc(raw.set)
controls.by.channel$Cy3<-controls.by.channel$Cy3[as.character(ncd$ID),,drop=FALSE]
controls.by.channel$Cy5<-controls.by.channel$Cy5[as.character(ncd$ID),,drop=FALSE]
intensities.by.channel$Cy3<-rbind(intensities.by.channel$Cy3, controls.by.channel$Cy3)
intensities.by.channel$Cy5<-rbind(intensities.by.channel$Cy5, controls.by.channel$Cy5)
}
return(intensities.by.channel)
}
########################################################################################################################
| /R/RnBeadRawSet-class.R | no_license | cirruswolke/RnBeads | R | false | false | 34,872 | r | ########################################################################################################################
## RnBeadRawSet-class.R
## created: 2013-xx-xx
## creator: Pavlo Lutsik
## ---------------------------------------------------------------------------------------------------------------------
## RnBeadRawSet class definition.
########################################################################################################################
## ---------------------------------------------------------------------------------------------------------------------
## CLASS DEFINITIONS
## ---------------------------------------------------------------------------------------------------------------------
#' RnBeadRawSet-class
#'
#' Main class for storing HumanMethylation micorarray data which includes intensity information
#'
#' @section Slots:
#' \describe{
#' \item{\code{pheno}}{Phenotypic data.}
#' \item{\code{M}}{\code{matrix} of intensities for the probes measuring the abundance of methylated molecules.}
#' \item{\code{U}}{\code{matrix} of intensities for the probes measuring the abundance of unmethylated molecules.}
#' \item{\code{M0}}{\code{matrix} of "out-of-band" intensities for the probes measuring the abundance of methylated molecules.}
#' \item{\code{U0}}{\code{matrix} of "out-of-band" intensities for the probes measuring the abundance of unmethylated molecules.}
#' \item{\code{bead.counts.M}}{\code{matrix} of bead counts per probe.}
#' \item{\code{bead.counts.U}}{\code{matrix} of bead counts per probe.}
#' }
#'
#' @section Methods and Functions:
#' \describe{
#' \item{\code{samples}}{Gets the identifiers of all samples in the dataset.}
#' \item{\code{\link[=M,RnBeadRawSet-method]{M}}}{Get the matrix of intensities for the probes measuring the abundance of methylated molecules.}
#' \item{\code{\link[=U,RnBeadRawSet-method]{U}}}{Get the matrix of intensities for the probes measuring the abundance of unmethylated molecules.}
#' \item{\code{\link{intensities.by.color}}}{Get probe intensities in each color channel.}
#' }
#'
#' @name RnBeadRawSet-class
#' @rdname RnBeadRawSet-class
#' @author Pavlo Lutsik
#' @exportClass RnBeadRawSet
#' @include RnBeadSet-class.R
setClass("RnBeadRawSet",
representation(M="matrixOrffOrNULL",
U="matrixOrffOrNULL",
M0="matrixOrffOrNULL",
U0="matrixOrffOrNULL",
bead.counts.M="matrixOrffOrNULL",
bead.counts.U="matrixOrffOrNULL"
),
contains="RnBeadSet",
prototype(#pheno=data.frame(),
#betas=matrix(),
#meth.sites=matrix(),
#pval.sites=NULL,
#pval.regions=NULL,
#qc=NULL,
#status=NULL,
M=matrix(),
U=matrix(),
M0=NULL,
U0=NULL,
bead.counts.M=NULL,
bead.counts.U=NULL
),
package = "RnBeads"
)
RNBRAWSET.SLOTNAMES<-c("M","U","M0","U0","bead.counts.M", "bead.counts.U")
########################################################################################################################
## initialize.RnBeadRawSet
##
## Direct slot filling.
##
## #docType methods
## #rdname RnBeadRawSet-class
setMethod("initialize", "RnBeadRawSet",
function(.Object,
pheno = data.frame(),
sites = matrix(ncol=0, nrow=0),
meth.sites = matrix(ncol=0, nrow=0),
M = matrix(ncol=0, nrow=0),
U = matrix(ncol=0, nrow=0),
M0 = NULL,
U0 = NULL,
bead.counts.M = NULL,
bead.counts.U = NULL,
covg.sites = NULL,
pval.sites = NULL,
qc = NULL,
target="probes450",
status=list(normalized=FALSE, background=FALSE, disk.dump=FALSE)
) {
.Object@target<-target
#.Object@pheno<-pheno
#.Object@sites<-sites
.Object@M<-M
.Object@U<-U
.Object@M0<-M0
.Object@U0<-U0
.Object@bead.counts.M<-bead.counts.M
.Object@bead.counts.U<-bead.counts.U
#.Object@meth.sites<-betas
#.Object@pval.sites<-p.values
#.Object@covg.sites<-bead.counts
#.Object@regions<-list()
#.Object@status<-list()
#.Object@status[["normalized"]]<-"none"
#.Object@status[["background"]]<-"none"
#.Object@status[["disk.dump"]]<-useff
#.Object@inferred.covariates <- list()
#.Object@qc<-qc
#.Object
callNextMethod(.Object,
pheno=pheno,
sites=sites,
meth.sites=meth.sites,
pval.sites=pval.sites,
covg.sites=covg.sites,
target=target,
status=status,
qc=qc)
})
########################################################################################################################
#' Wrapper function RnBeadRawSet
#'
#' @param pheno Phenotypic data.
#' @param probes \code{character} vector of Infinium(R) probe identifiers
#' @param M Matrix of intensities for the probes measuring the abundance of methylated molecules
#' @param U Matrix of intensities for the probes measuring the abundance of unmethylated molecules
#' @param M0 Matrix of "out-of-band" intensities for the probes measuring the abundance of methylated molecules
#' @param U0 Matrix of "out-of-band" intensities for the probes measuring the abundance of unmethylated molecules
#' @param bead.counts.M Matrix of bead counts per probe.
#' @param bead.counts.U Matrix of bead counts per probe.
#' @param p.values Matrix of detection p-values.
#' @param qc ...
#' @param platform \code{character} singleton specifying the microarray platform: \code{"450k"} corresponds to HumanMethylation450 microarray, and \code{"27k"} stands for HumanMethylation27.
#' @param region.types A \code{character} vector specifying the region types, for which the methylation infromation will be summarized.
#' @param beta.offset A regularization constant which is added to the denominator at beta-value calculation
#' @param summarize.bead.counts If \code{TRUE} the coverage slot is filled by summarizing the \code{bead.counts.M} and \code{bead.counts.U} matrices. For type I probes the summarization is done using \code{min} operation, while for type II probes the bead counts should be identical in both supplied matrices
#' @param summarize.regions ...
#' @param useff If \code{TRUE} the data matrices will be stored as \code{ff} objects
#' @param ffcleanup If \code{TRUE} and disk dumping has been enabled the data of the input \code{ff} objects will be deleted
#'
#' @return an object of class RnBeadRawSet
#'
#' @name RnBeadRawSet
#' @rdname RnBeadRawSet-class
#' @aliases initialize,RnBeadRawSet-method
#' @export
RnBeadRawSet<-function(
pheno,
probes,
M,
U,
M0=NULL,
U0=NULL,
bead.counts.M = NULL,
bead.counts.U = NULL,
p.values=NULL,
qc = NULL,
platform = "450k",
beta.offset=100,
summarize.bead.counts=TRUE,
summarize.regions=TRUE,
region.types = rnb.region.types.for.analysis("hg19"),
useff=rnb.getOption("disk.dump.big.matrices"),
ffcleanup=FALSE){
if(missing(pheno)){
stop("argument pheno should be supplied")
}
if(missing(probes)){
if(!is.null(rownames(M))){
probes<-rownames(M)
}else if(!is.null(rownames(U))){
probes<-rownames(U)
}else{
stop("If probes are not supplied, betas should have probe identifers as row names")
}
}
if(!is.data.frame(pheno)){
stop("invalid value for pheno: should be a data frame")
}
if(!any(c('matrix', 'ff_matrix') %in% class(M))){
stop("invalid value for M: should be a matrix or an ff_matrix")
}
if(!any(c('matrix', 'ff_matrix') %in% class(U))){
stop("invalid value for U: should be a matrix or an ff_matrix")
}
if(!is.null(p.values) && !any(c('matrix', 'ff_matrix') %in% class(p.values))){
stop("invalid value for p.values: should be a matrix or an ff_matrix")
}
if(!is.null(bead.counts.M) && !any(c('matrix', 'ff_matrix') %in% class(bead.counts.M))){
stop("invalid value for bead.counts.M: should be a matrix or an ff_matrix")
}
if(!is.null(bead.counts.U) && !any(c('matrix', 'ff_matrix') %in% class(bead.counts.U))){
stop("invalid value for bead.counts.U: should be a matrix or an ff_matrix")
}
if(!is.null(qc)){
if(!is.list(qc)){
stop("invalid value for qc: should be a list")
}
}
if(!is.character(platform) || length(platform)!=1L){
stop("invalid value for platform: should be a character of length one")
}
if(!is.character(region.types)){
stop("invalid value for region types: should be a character vector")
}
if(!is.numeric(beta.offset) || length(beta.offset)!=1L || beta.offset<0){
stop("invalid value for beta.offset: should be a positive numeric of length one")
}
if(!is.logical(summarize.regions) || length(summarize.regions)!=1L){
stop("invalid value for summarize.regions: should be a logical of length one")
}
if(!is.logical(summarize.bead.counts) || length(summarize.bead.counts)!=1L){
stop("invalid value for summarize.bead.counts: should be a logical of length one")
}
if(!is.logical(useff) || length(useff)!=1L){
stop("invalid value for useff: should be a logical of length one")
}
if (platform =="EPIC") {
target <- "probesEPIC"
assembly <- "hg19"
}else if (platform =="450k") {
target <- "probes450"
assembly <- "hg19"
} else if(platform == "27k"){
target <- "probes27"
assembly <- "hg19"
}else{
rnb.error("Invalid value for platform")
}
res<-match.probes2annotation(probes, target, assembly)
sites<-res[[1]]
site.ids<-res[[2]]
fullmatch<-res[[3]]
M<-prepare.slot.matrix(M, useff=useff, full.match=fullmatch, subset=site.ids, cleanup=ffcleanup)
U<-prepare.slot.matrix(U, useff=useff, full.match=fullmatch, subset=site.ids, cleanup=ffcleanup)
if(!is.null(M0)){
M0<-prepare.slot.matrix(M0, useff=useff, full.match=fullmatch, subset=site.ids, cleanup=ffcleanup)
}
if(!is.null(U0)){
U0<-prepare.slot.matrix(U0, useff=useff, full.match=fullmatch, subset=site.ids, cleanup=ffcleanup)
}
if(!is.null(bead.counts.M)){
bead.counts.M<-prepare.slot.matrix(bead.counts.M, useff=useff, full.match=fullmatch, subset=site.ids, cleanup=ffcleanup)
}
if(!is.null(bead.counts.U)){
bead.counts.U<-prepare.slot.matrix(bead.counts.U, useff=useff, full.match=fullmatch, subset=site.ids, cleanup=ffcleanup)
}
if(!is.null(p.values)){
p.values<-prepare.slot.matrix(p.values, useff=useff, full.match=fullmatch, subset=site.ids, cleanup=ffcleanup)
}
rownames(M)<-NULL
rownames(U)<-NULL
if(!is.null(M0)){
rownames(M0)<-NULL
}
if(!is.null(U0)){
rownames(U0)<-NULL
}
if(!is.null(bead.counts.M)){
rownames(bead.counts.M)<-NULL
}
if(!is.null(bead.counts.U)){
rownames(bead.counts.U)<-NULL
}
if(!is.null(bead.counts.M) && !is.null(bead.counts.U) && summarize.bead.counts){
bead.counts<-summarize.bead.counts(bead.counts.M[,,drop=FALSE],bead.counts.U[,,drop=FALSE])
}else{
bead.counts<-NULL
}
betas<-beta.value(M[,,drop=FALSE],U[,,drop=FALSE], beta.offset)
if(useff){
if(!is.null(bead.counts)){
bead.counts<-convert.to.ff.matrix.tmp(bead.counts)
}
}
if(useff){
betas<-convert.to.ff.matrix.tmp(betas)
}
status<-list()
status[["normalized"]]<-"none"
status[["background"]]<-"none"
status[["disk.dump"]]<-useff
object<-new("RnBeadRawSet",
pheno=pheno,
sites=sites,
meth.sites=betas,
M=M,
U=U,
M0=M0,
U0=U0,
bead.counts.M=bead.counts.M,
bead.counts.U=bead.counts.U,
covg.sites=bead.counts,
pval.sites=p.values,
qc=qc,
target=target,
status=status
)
if(summarize.regions){
for (region.type in region.types) {
if (region.type %in% rnb.region.types("hg19")) {
object <- summarize.regions(object, region.type)
}
}
}
return(object)
}
########################################################################################################################
## FIXME: dummy validity method
validRnBeadRawSetObject<-function(object){
return(TRUE)
}
setValidity("RnBeadRawSet", method=validRnBeadRawSetObject)
########################################################################################################################
setMethod("show", "RnBeadRawSet", rnb.show.rnbeadset)
########################################################################################################################
#' as("MethyLumiSet", "RnBeadRawSet")
#'
#' Convert a \code{\linkS4class{MethyLumiSet}} object to \code{\linkS4class{RnBeadRawSet}}
#'
#' @name as.RnBeadRawSet
setAs("MethyLumiSet", "RnBeadRawSet",
function(from, to){
if(!inherits(from,"MethyLumiSet")){
stop("not a MethyLumiSet object:", deparse(substitute(methylumi.set)))
}
m.data <- MethyLumiSet2RnBeadSet(from)
if("methylated.N" %in% ls(from@assayData) && "unmethylated.N" %in% ls(from@assayData) ){
meth.N.element<-"methylated.N"
umeth.N.element<-"unmethylated.N"
}else if("Avg_NBEADS_A" %in% ls(from@assayData) && "Avg_NBEADS_B" %in% ls(from@assayData)){
meth.N.element<-"Avg_NBEADS_B"
umeth.N.element<-"Avg_NBEADS_A"
}else{
meth.N.element<-NULL
umeth.N.element<-NULL
}
if("methylated.OOB" %in% ls(from@assayData) && "unmethylated.OOB" %in% ls(from@assayData)){
meth.oob.element<-"methylated.OOB"
umeth.oob.element<-"unmethylated.OOB"
}else{
meth.oob.element<-NULL
umeth.oob.element<-NULL
}
if(annotation(from)=="IlluminaMethylationEPIC"){
platform="EPIC"
}else if(annotation(from)=="IlluminaHumanMethylation450k"){
platform="450k"
}else if(annotation(from)=="IlluminaHumanMethylation27k"){
platform="27k"
}
object<-RnBeadRawSet(
pheno=m.data$pheno,
probes=m.data$probes,
M=methylated(from),
U=unmethylated(from),
p.values=m.data$p.values,
M0=if(!is.null(meth.oob.element)) get(meth.oob.element,from@assayData) else NULL,
U0=if(!is.null(umeth.oob.element)) get(umeth.oob.element,from@assayData) else NULL,
bead.counts.M=if(!is.null(meth.N.element)) get(meth.N.element,from@assayData) else NULL,
bead.counts.U=if(!is.null(umeth.N.element)) get(umeth.N.element,from@assayData) else NULL,
platform=platform
)
if ("qc" %in% names(m.data)) {
qc(object)<-m.data[["qc"]]
}
object
})
########################################################################################################################
#' as("RnBeadRawSet", "MethyLumiSet")
#'
#' Convert a \code{\linkS4class{RnBeadRawSet}} object to \code{\linkS4class{MethyLumiSet}}
#'
#' @name as.RnBeadRawSet
setAs("RnBeadRawSet","MethyLumiSet",
function(from, to){
if(!inherits(from,"RnBeadRawSet")){
stop("not a RnBeadRawSet object:", deparse(substitute(methylumi.set)))
}
assd<-new.env()
assign("betas", meth(from), envir=assd)
assign("pvals", dpval(from), envir=assd)
assign("methylated", M(from), envir=assd)
assign("unmethylated", U(from), envir=assd)
if(!is.null(M0(from))){
assign("methylated.OOB", M0(from), envir=assd)
}
if(!is.null(U0(from))){
assign("unmethylated.OOB", U0(from), envir=assd)
}
if(!is.null(bead.counts.M(from))){
assign("methylated.N", bead.counts.M(from), envir=assd)
}
if(!is.null(bead.counts.U(from))){
assign("unmethylated.N", bead.counts.U(from), envir=assd)
}
pd<-pheno(from)
rownames(pd)<-colnames(meth(from))
mset<-new(to, assd, as(pd, "AnnotatedDataFrame"))
rm(assd)
ann<-annotation(from, add.names=TRUE)
featureData(mset)<-as(data.frame(COLOR_CHANNEL=ann$Color, rownames=rownames(ann)), "AnnotatedDataFrame")
featureNames(mset)<-ann[["ID"]]
if (!is.null(qc(from))){
assd<-new.env()
assign("methylated", qc(from)$Cy3, envir=assd)
assign("unmethylated", qc(from)$Cy5, envir=assd)
mset@QC<-new("MethyLumiQC", assd)
if(from@target == "probesEPIC"){
probeIDs<-rnb.get.annotation("controlsEPIC")[,"Target"]
## TODO remove this after annotation has been fixed
index<-rnb.update.controlsEPIC.enrich(rnb.get.annotation("controlsEPIC"))[,"Index"]
probeIDs<-paste(probeIDs, index, sep=".")
}else if(from@target == "probes450"){
probeIDs<-rnb.get.annotation("controls450")[,"Target"]
probeIDs<-paste(probeIDs, unlist(sapply(table(probeIDs)[unique(probeIDs)], seq, from=1 )), sep=".")
}else if(from@target == "probes27"){
probeIDs<-rnb.get.annotation("controls27")[,"Name"]
}
featureData(mset@QC)<-as(data.frame(Address=rownames(qc(from)$Cy3), rownames=probeIDs), "AnnotatedDataFrame")
featureNames(mset@QC)<-probeIDs
if(from@target == "probesEPIC"){
annotation(mset@QC) <- "IlluminaMethylationEPIC"
}else if(from@target == "probes450"){
annotation(mset@QC) <- "IlluminaHumanMethylation450k"
}else if(from@target == "probes27"){
annotation(mset) <- "IlluminaHumanMethylation27k"
}
}
if(from@target == "probesEPIC"){
annotation(mset) <- "IlluminaMethylationEPIC"
}else if(from@target == "probes450"){
annotation(mset) <- "IlluminaHumanMethylation450k"
}else if(from@target == "probes27"){
annotation(mset) <- "IlluminaHumanMethylation27k"
}
mset
})
########################################################################################################################
## ---------------------------------------------------------------------------------------------------------------------
## ACCESSORS
## ---------------------------------------------------------------------------------------------------------------------
if(!isGeneric("M")) setGeneric('M',
function(object, ...) standardGeneric('M'))
#' M-methods
#'
#' Extract raw methylated probe intensity from an object of \code{RnBeadRawSet} class.
#'
#' @param object Dataset of interest.
#' @param row.names Flag indicating whether the resulting matrix will be assigned row names
#'
#' @return \code{matrix} of the methylated probe intensities
#'
#' @rdname M-methods
#' @docType methods
#' @export
#' @aliases M
#' @aliases M,RnBeadRawSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' M.intensity<-M(rnb.set.example)
#' head(M.intensity)
#' }
#'
setMethod("M", signature(object="RnBeadRawSet"),
function(object, row.names=FALSE){
get.dataset.matrix(object, "sites", row.names, object@M, object@meth.regions)
})
if(!isGeneric("U")) setGeneric('U',
function(object, ...) standardGeneric('U'))
########################################################################################################################
#' U-methods
#'
#' Extract raw unmethylated probe intensity from an object of \code{RnBeadRawSet} class.
#'
#' @param object Dataset of interest.
#' @param row.names Flag indicating whether the resulting matrix will be assigned row names
#'
#' @return \code{matrix} of the unmethylated probe intensities
#'
#' @rdname U-methods
#' @docType methods
#' @export
#' @aliases U
#' @aliases U,RnBeadRawSet-method
#' @examples
#' \donttest{
#' library(RnBeads.hg19)
#' data(small.example.object)
#' U.intensity<-U(rnb.set.example)
#' head(U.intensity)
#' }
setMethod("U", signature(object="RnBeadRawSet"),
function(object, row.names=FALSE){
get.dataset.matrix(object, "sites", row.names, object@U, object@meth.regions)
})
########################################################################################################################
setGeneric('M0',
function(object, ...) standardGeneric('M0'))
setMethod("M0", signature(object="RnBeadRawSet"),
function(object, row.names=FALSE){
get.dataset.matrix(object, "sites", row.names, object@M0, object@meth.regions)
})
########################################################################################################################
setGeneric('U0',
function(object, ...) standardGeneric('U0'))
setMethod("U0", signature(object="RnBeadRawSet"),
function(object, row.names=FALSE){
get.dataset.matrix(object, "sites", row.names, object@U0, object@meth.regions)
})
########################################################################################################################
setGeneric('bead.counts.M',
function(object, ...) standardGeneric('bead.counts.M'))
setMethod("bead.counts.M", signature(object="RnBeadRawSet"),
function(object, row.names=FALSE){
get.dataset.matrix(object, "sites", row.names, object@bead.counts.M, object@meth.regions)
})
########################################################################################################################
setGeneric('bead.counts.U',
function(object, ...) standardGeneric('bead.counts.U'))
setMethod("bead.counts.U", signature(object="RnBeadRawSet"),
function(object, row.names=FALSE){
get.dataset.matrix(object, "sites", row.names, object@bead.counts.U, object@meth.regions)
})
## ---------------------------------------------------------------------------------------------------------------------
## MODIFIERS
## ---------------------------------------------------------------------------------------------------------------------
setGeneric('M<-',
function(object, value) standardGeneric('M<-'))
setMethod("M<-", signature(object="RnBeadRawSet", value="matrixOrffOrNULL"),
function(object, value){
if(object@status$disk.dump){
# delete(object@M)
object@M<-convert.to.ff.matrix.tmp(value)
}else{
object@M<-value
}
})
########################################################################################################################
setGeneric('U<-',
function(object, value) standardGeneric('U<-'))
setMethod("U<-", signature(object="RnBeadRawSet", value="matrixOrffOrNULL"),
function(object, value){
if(object@status$disk.dump){
# delete(object@U)
object@U<-convert.to.ff.matrix.tmp(value)
}else{
object@U<-value
}
})
########################################################################################################################
setGeneric('M0<-',
function(object, value) standardGeneric('M0<-'))
setMethod("M0<-", signature(object="RnBeadRawSet", value="matrixOrffOrNULL"),
function(object, value){
if(object@status$disk.dump){
# delete(object@M0)
object@M0<-convert.to.ff.matrix.tmp(value)
}else{
object@M0<-value
}
})
########################################################################################################################
setGeneric('U0<-',
function(object, value) standardGeneric('U0<-'))
setMethod("U0<-", signature(object="RnBeadRawSet", value="matrixOrffOrNULL"),
function(object, value){
if(object@status$disk.dump){
# delete(object@U0)
object@U0<-convert.to.ff.matrix.tmp(value)
}else{
object@U0<-value
}
})
########################################################################################################################
setGeneric('bead.counts.M<-',
function(object, value) standardGeneric('bead.counts.M<-'))
setMethod("bead.counts.M<-", signature(object="RnBeadRawSet", value="matrixOrffOrNULL"),
function(object, value){
if(object@status$disk.dump){
# delete(object@bead.counts.M)
object@bead.counts.M<-convert.to.ff.matrix.tmp(value)
}else{
object@bead.counts.M<-value
}
})
########################################################################################################################
setGeneric('bead.counts.U<-',
function(object, value) standardGeneric('bead.counts.U<-'))
setMethod("bead.counts.U<-", signature(object="RnBeadRawSet", value="matrixOrffOrNULL"),
function(object, value){
if(object@status$disk.dump){
# delete(object@bead.counts.U)
object@bead.counts.U<-convert.to.ff.matrix.tmp(value)
}else{
object@bead.counts.U<-value
}
})
########################################################################################################################
if (!isGeneric("remove.sites")) {
setGeneric("remove.sites", function(object, probelist, verbose = TRUE) standardGeneric("remove.sites"))
}
#' @rdname remove.sites-methods
#' @aliases remove.sites,RnBeadRawSet-method
#' @docType methods
#' @export
setMethod("remove.sites", signature(object = "RnBeadRawSet"),
function(object, probelist, verbose = TRUE) {
inds <- get.i.vector(probelist, rownames(object@meth.sites))
if (length(inds) != 0) {
for(sl in RNBRAWSET.SLOTNAMES){
if(!is.null(slot(object,sl))){
if(!is.null(object@status) && object@status$disk.dump){
new.matrix<-slot(object,sl)[-inds,,drop=FALSE]
if(isTRUE(object@status$discard.ff.matrices)){
delete(slot(object,sl))
}
slot(object,sl)<-convert.to.ff.matrix.tmp(new.matrix)
rm(new.matrix); rnb.cleanMem()
}else{
slot(object,sl)<-slot(object,sl)[-inds,,drop=FALSE]
}
}
}
}
callNextMethod()
}
)
########################################################################################################################
if (!isGeneric("remove.samples")) {
setGeneric("remove.samples", function(object, samplelist) standardGeneric("remove.samples"))
}
#' @rdname remove.samples-methods
#' @aliases remove.samples,RnBeadRawSet-method
#' @docType methods
#' @export
setMethod("remove.samples", signature(object = "RnBeadRawSet"),
function(object, samplelist) {
inds <- get.i.vector(samplelist, samples(object))
if (length(inds) != 0) {
for(sl in RNBRAWSET.SLOTNAMES){
if(!is.null(slot(object,sl))){
if(!is.null(object@status) && object@status$disk.dump){
new.matrix<-slot(object,sl)[,-inds, drop=FALSE]
if(isTRUE(object@status$discard.ff.matrices)){
delete(slot(object,sl))
}
slot(object,sl)<-convert.to.ff.matrix.tmp(new.matrix)
rm(new.matrix); rnb.cleanMem()
}else{
slot(object,sl)<-slot(object,sl)[,-inds, drop=FALSE]
}
}
}
}
callNextMethod()
}
)
#######################################################################################################################
#if (!isGeneric("update.meth")) {
setGeneric("update.meth", function(object) standardGeneric("update.meth"))
#}
##
## update.meth
##
## Update the methylation calls, after the change of intensity values
##
## param object RnBeadRawSet object
##
## return Updated RnBeadRawSet object
##
setMethod("update.meth", signature(object="RnBeadRawSet"),
function(object){
if(object@status$disk.dump){
object@meth.sites<-convert.to.ff.matrix.tmp(beta.value(object@M[,], object@U[,]))
}else{
object@meth.sites<-beta.value(object@M, object@U)
}
return(object)
})
#######################################################################################################################
## save, load and destroy
setMethod("save.matrices", signature(object="RnBeadRawSet", path="character"),
function(object, path){
if(!is.null(object@status) && object@status$disk.dump){
for(sl in RNBRAWSET.SLOTNAMES){
if(!is.null(slot(object,sl))){
if("ff" %in% class(slot(object,sl))){
ffmatrix<-slot(object,sl)
ffsave(ffmatrix, file=file.path(path, paste("rnb", sl, sep=".")),
rootpath=getOption('fftempdir'))
rm(ffmatrix)
}
}
}
}
callNextMethod(object, path)
})
#######################################################################################################################
setMethod("load.matrices", signature(object="RnBeadRawSet", path="character"),
function(object, path, temp.dir=tempdir()){
slot.names <- RNBRAWSET.SLOTNAMES
for(sl in slot.names){
if(!is.null(slot(object, sl))){
if(paste("rnb",sl,"RData", sep=".") %in% list.files(path) &&
paste("rnb",sl,"ffData", sep=".") %in% list.files(path)){
load_env<-new.env()
suppressMessages(ffload(file=file.path(path, paste("rnb", sl, sep=".")),
envir=load_env,rootpath=getOption("fftempdir")))
slot(object, sl)<-get("ffmatrix", envir=load_env)
rm(load_env)
}
}
}
callNextMethod(object=object, path=path, temp.dir=temp.dir)
})
#######################################################################################################################
#' @rdname destroy-methods
#' @aliases destroy,RnBeadRawSet-method
#' @docType methods
#' @export
setMethod("destroy", signature(object="RnBeadRawSet"),
function(object){
if(object@status$disk.dump){
for(sl in RNBRAWSET.SLOTNAMES){
if(!is.null(slot(object,sl))){
delete(slot(object, sl))
}
}
}
callNextMethod()
}
)
## ---------------------------------------------------------------------------------------------------------------------
## HELPER ROUTINES
## ---------------------------------------------------------------------------------------------------------------------
beta.value<-function(M,U,offset=100){
M/(M+U+offset)
}
#######################################################################################################################
m.value<-function(M,U,offset=100){
log2((M+offset)/(U+offset))
}
#######################################################################################################################
#' intensities.by.color
#'
#' Rearranges information from "M" and "U" slots of a RnBeadsRawSet object by color channer.
#'
#' @param raw.set RnBeadRawSet object
#' @param address.rownames if \code{TRUE} the rows of the returned matrices are named with the with the correspoding Illumina probe addresses
#' @param add.oob if \code{TRUE} the "out-of-band" intensities are included
#' @param add.controls if \code{TRUE} the control probe intensities are included
#' @param add.missing if \code{TRUE} the rows for the probes missing in \code{raw.set} is imputed with \code{NA} values
#'
#' @return a \code{list} with elements \code{Cy3} and \code{Cy5} containing average bead intensities
#' measured for each probe in the green and red channels, respectively
#'
#' @author Pavlo Lutsik
intensities.by.color<-function(raw.set,
address.rownames=TRUE,
add.oob=TRUE,
add.controls=TRUE,
add.missing=TRUE
){
if(!require("IlluminaHumanMethylation450kmanifest")){
rnb.error("IlluminaHumanMethylation450kmanifest should be installed")
}
Mmatrix<-M(raw.set, row.names=TRUE)
Umatrix<-U(raw.set, row.names=TRUE)
if(add.oob){
M0matrix<-M0(raw.set, row.names=TRUE)
U0matrix<-U0(raw.set, row.names=TRUE)
}
pinfos <- annotation(raw.set, add.names=TRUE)
if(add.missing){
full.ann<-rnb.annotation2data.frame(rnb.get.annotation(raw.set@target))
ann.missing<-full.ann[!rownames(full.ann)%in%rownames(pinfos),]
pinfos<-rbind(pinfos, ann.missing[,colnames(full.ann)])
filler<-matrix(NA_real_, nrow=nrow(ann.missing), ncol=length(samples(raw.set)))
rownames(filler)<-rownames(ann.missing)
Mmatrix<-rbind(Mmatrix, filler)
Umatrix<-rbind(Umatrix, filler)
if(add.oob){
M0matrix<-rbind(M0matrix, filler)
U0matrix<-rbind(U0matrix, filler)
}
rm(ann.missing, filler, full.ann)
}
rnb.set.probe.ids<-pinfos[["ID"]]
dII.probes <- rnb.set.probe.ids[pinfos[,"Design"] == "II"]
#dII.probes <- dII.probes[!grepl("rs", dII.probes)]
if(address.rownames){
tII<-rbind(as.data.frame(IlluminaHumanMethylation450kmanifest@data$TypeII[,c("Name", "AddressA")]),
as.data.frame(IlluminaHumanMethylation450kmanifest@data$TypeSnpII[,c("Name", "AddressA")]))
tII<-tII[match(dII.probes, tII$Name),]
}
dII.grn<-Mmatrix[pinfos[,"Design"] == "II",,drop=FALSE]
if(address.rownames) rownames(dII.grn)<-tII$AddressA
dII.red<-Umatrix[pinfos[,"Design"] == "II",,drop=FALSE]
if(address.rownames) rownames(dII.red)<-tII$AddressA
dI.red.probes <- rnb.set.probe.ids[pinfos[, "Color"] == "Red"]
#dI.red.probes <- dI.red.probes[!grepl("rs", dI.red.probes)]
dI.green.probes <- rnb.set.probe.ids[pinfos[, "Color"] == "Grn"]
#dI.green.probes <- dI.green.probes[!grepl("rs", dI.green.probes)]
if(address.rownames){
tI<-rbind(as.data.frame(IlluminaHumanMethylation450kmanifest@data$TypeI[,c("Name","Color", "AddressA", "AddressB")]),
as.data.frame(IlluminaHumanMethylation450kmanifest@data$TypeSnpI[,c("Name","Color", "AddressA", "AddressB")]))
tI.red<-tI[tI$Color=="Red",]
tI.red<-tI.red[match(dI.red.probes, tI.red$Name),]
tI.grn<-tI[tI$Color=="Grn",]
tI.grn<-tI.grn[match(dI.green.probes, tI.grn$Name),]
}
dI.red.meth<-Mmatrix[pinfos[, "Color"] == "Red",,drop=FALSE]
if(address.rownames) rownames(dI.red.meth)<-tI.red[,"AddressB"]
dI.red.umeth<-Umatrix[pinfos[, "Color"] == "Red",,drop=FALSE]
if(address.rownames) rownames(dI.red.umeth)<-tI.red[,"AddressA"]
if(add.oob){
dI.red.meth.oob<-M0matrix[pinfos[, "Color"] == "Red",,drop=FALSE]
if(address.rownames) rownames(dI.red.meth.oob)<-tI.red[,"AddressB"]
dI.red.umeth.oob<-U0matrix[pinfos[, "Color"] == "Red",,drop=FALSE]
if(address.rownames) rownames(dI.red.umeth.oob)<-tI.red[,"AddressA"]
}
dI.grn.meth<-Mmatrix[pinfos[, "Color"] == "Grn",,drop=FALSE]
if(address.rownames) rownames(dI.grn.meth)<-tI.grn[,"AddressB"]
dI.grn.umeth<-Umatrix[pinfos[, "Color"] == "Grn",,drop=FALSE]
if(address.rownames) rownames(dI.grn.umeth)<-tI.grn[,"AddressA"]
if(add.oob){
dI.grn.meth.oob<-M0matrix[pinfos[, "Color"] == "Grn",,drop=FALSE]
if(address.rownames) rownames(dI.grn.meth.oob)<-tI.grn[,"AddressB"]
dI.grn.umeth.oob<-U0matrix[pinfos[, "Color"] == "Grn",,drop=FALSE]
if(address.rownames) rownames(dI.grn.umeth.oob)<-tI.grn[,"AddressA"]
}
intensities.by.channel <- list(
Cy3=rbind(dII.grn, dI.grn.meth,dI.grn.umeth,
if(add.oob) dI.red.meth.oob else NULL, if(add.oob) dI.red.umeth.oob else NULL),
Cy5=rbind(dII.red, dI.red.meth, dI.red.umeth,
if(add.oob) dI.grn.meth.oob else NULL, if(add.oob) dI.grn.umeth.oob else NULL))
rm(dII.grn, dI.grn.meth, dI.grn.umeth, dI.red.meth.oob, dI.red.umeth.oob,
dII.red, dI.red.meth, dI.red.umeth, dI.grn.meth.oob, dI.grn.umeth.oob)
gc()
if(address.rownames) intensities.by.channel$Cy5<-intensities.by.channel$Cy5[rownames(intensities.by.channel$Cy3),,drop=FALSE]
if(add.controls){
ncd<-rnb.get.annotation("controls450")
#ncd<-ncd[ncd[["Target"]] == "NEGATIVE", ]
ncd$Target<-tolower(ncd$Target)
controls.by.channel<-qc(raw.set)
controls.by.channel$Cy3<-controls.by.channel$Cy3[as.character(ncd$ID),,drop=FALSE]
controls.by.channel$Cy5<-controls.by.channel$Cy5[as.character(ncd$ID),,drop=FALSE]
intensities.by.channel$Cy3<-rbind(intensities.by.channel$Cy3, controls.by.channel$Cy3)
intensities.by.channel$Cy5<-rbind(intensities.by.channel$Cy5, controls.by.channel$Cy5)
}
return(intensities.by.channel)
}
########################################################################################################################
|
loadHospitalChargeData_CSV <- function(){
hospital_charge_data <- read_csv("../data/hospital_charge_data.csv",
col_types = cols(`Average Covered Charges` = col_number(),
`Average Medicare Payments` = col_number(),
`Average Total Payments` = col_number(),
`Provider Id` = col_character(),
`Provider Zip Code` = col_character()))
# remove useless data
NULL->hospital_charge_data$`Hospital Referral Region Description`
cnames <- colnames(hospital_charge_data)
new_cnames <- tolower(gsub("[ .]","_",cnames,perl=FALSE))
new_cnames -> colnames(hospital_charge_data)
drg <- str_extract(hospital_charge_data$drg_definition,"[0-9]+")
mdf <- hospital_charge_data %>% mutate(provider_city = tolower(provider_city),
drg_code = drg)
hcd <- mdf %>%
select(drg_code,
drg_definition,
provider_id,
provider_name,
total_discharges,
average_covered_charges,
average_total_payments,
average_medicare_payments,
provider_street_address,
provider_city,
provider_state,
provider_zip_code)
hcd$provider_zip_code = str_pad(hcd$provider_zip_code,5,side=c("left"),pad="0")
return(hcd)
}
loadHospitalReadmissionData_CSV <- function(){
readmission_data <- read_csv("../data/readmission_data.csv",
col_types = cols(Denominator = col_number(),
`Higher Estimate` = col_number(),
`Lower Estimate` = col_number(),
`Measure End Date` = col_date(format = "%m/%d/%Y"),
`Measure Start Date` = col_date(format = "%m/%d/%Y"),
`Phone Number` = col_skip(),
Score = col_number(),
`ZIP Code` = col_character()),
na = "NA")
# remove useless data
NULL->readmission_data$Footnote
NULL->readmission_data$Location
cnames <- colnames(readmission_data)
new_cnames <- tolower(gsub("[ .]","_",cnames,perl=FALSE))
new_cnames -> colnames(readmission_data)
hrd <- readmission_data %>%
filter(!is.na(denominator)) %>%
mutate(city_name = tolower(city), provider_state = state, provider_city = tolower(city),
provider_zip_code = str_pad(zip_code,5,side=c("left"),pad="0")) %>%
select(provider_id,
hospital_name,
measure_id,
measure_name,
compared_to_national,
denominator,
score,
lower_estimate,
higher_estimate,
measure_start_date,
measure_end_date,
address,
provider_city,
provider_state,
provider_zip_code
)
return(hrd)
}
loadRegionInfo_CSV <- function(){
reg <- read_csv("../data/us_regions.csv")
reg <- mutate(reg,state_name = tolower(state_name))
return(reg)
}
loadIncomeData_TSV <- function(){
data <- read_delim("../data/income_data_fred.txt",
"\t", escape_double = FALSE, trim_ws = TRUE)
colnames(data) -> cnames
labels<-substring(cnames[-1],9,10)
median_income <- data[nrow(data),-1]
colnames(median_income) <- labels
df <- tibble(labels, as.vector(t(median_income)))
colnames(df) <- c("state_name","median_income")
return(df)
}
loadStateScorecard_XML <- function(){
data <- xmlParse("../data/state_scorecard.xml")
df <- xmlToDataFrame(data)
x <- colnames(df)
colnames(df)<-tolower(x)
return(df)
}
getRegionInfoByZip <- function(zip_list){
data("zip.regions")
out <- zip.regions %>%
filter(region %in% zip_list) %>%
select(region, county.name, county.fips.numeric)
colnames(out) <- c("zip_code","county_name","county_id")
return(out)
}
getStateInfoByAbbrev <- function(state_abbrev_list){
data("state.regions")
state_abbrev_list <- toupper(state_abbrev_list)
out <- state.regions %>%
filter(abb %in% state_abbrev_list) %>%
select(region, abb, fips.numeric)
colnames(out) <- c("state_name","state_short","state_id")
return(out)
}
| /lib/hospital_data_utils.R | no_license | TZstatsADS/Spr2017-proj2-grp6 | R | false | false | 4,571 | r | loadHospitalChargeData_CSV <- function(){
hospital_charge_data <- read_csv("../data/hospital_charge_data.csv",
col_types = cols(`Average Covered Charges` = col_number(),
`Average Medicare Payments` = col_number(),
`Average Total Payments` = col_number(),
`Provider Id` = col_character(),
`Provider Zip Code` = col_character()))
# remove useless data
NULL->hospital_charge_data$`Hospital Referral Region Description`
cnames <- colnames(hospital_charge_data)
new_cnames <- tolower(gsub("[ .]","_",cnames,perl=FALSE))
new_cnames -> colnames(hospital_charge_data)
drg <- str_extract(hospital_charge_data$drg_definition,"[0-9]+")
mdf <- hospital_charge_data %>% mutate(provider_city = tolower(provider_city),
drg_code = drg)
hcd <- mdf %>%
select(drg_code,
drg_definition,
provider_id,
provider_name,
total_discharges,
average_covered_charges,
average_total_payments,
average_medicare_payments,
provider_street_address,
provider_city,
provider_state,
provider_zip_code)
hcd$provider_zip_code = str_pad(hcd$provider_zip_code,5,side=c("left"),pad="0")
return(hcd)
}
loadHospitalReadmissionData_CSV <- function(){
readmission_data <- read_csv("../data/readmission_data.csv",
col_types = cols(Denominator = col_number(),
`Higher Estimate` = col_number(),
`Lower Estimate` = col_number(),
`Measure End Date` = col_date(format = "%m/%d/%Y"),
`Measure Start Date` = col_date(format = "%m/%d/%Y"),
`Phone Number` = col_skip(),
Score = col_number(),
`ZIP Code` = col_character()),
na = "NA")
# remove useless data
NULL->readmission_data$Footnote
NULL->readmission_data$Location
cnames <- colnames(readmission_data)
new_cnames <- tolower(gsub("[ .]","_",cnames,perl=FALSE))
new_cnames -> colnames(readmission_data)
hrd <- readmission_data %>%
filter(!is.na(denominator)) %>%
mutate(city_name = tolower(city), provider_state = state, provider_city = tolower(city),
provider_zip_code = str_pad(zip_code,5,side=c("left"),pad="0")) %>%
select(provider_id,
hospital_name,
measure_id,
measure_name,
compared_to_national,
denominator,
score,
lower_estimate,
higher_estimate,
measure_start_date,
measure_end_date,
address,
provider_city,
provider_state,
provider_zip_code
)
return(hrd)
}
loadRegionInfo_CSV <- function(){
reg <- read_csv("../data/us_regions.csv")
reg <- mutate(reg,state_name = tolower(state_name))
return(reg)
}
loadIncomeData_TSV <- function(){
data <- read_delim("../data/income_data_fred.txt",
"\t", escape_double = FALSE, trim_ws = TRUE)
colnames(data) -> cnames
labels<-substring(cnames[-1],9,10)
median_income <- data[nrow(data),-1]
colnames(median_income) <- labels
df <- tibble(labels, as.vector(t(median_income)))
colnames(df) <- c("state_name","median_income")
return(df)
}
loadStateScorecard_XML <- function(){
data <- xmlParse("../data/state_scorecard.xml")
df <- xmlToDataFrame(data)
x <- colnames(df)
colnames(df)<-tolower(x)
return(df)
}
getRegionInfoByZip <- function(zip_list){
data("zip.regions")
out <- zip.regions %>%
filter(region %in% zip_list) %>%
select(region, county.name, county.fips.numeric)
colnames(out) <- c("zip_code","county_name","county_id")
return(out)
}
getStateInfoByAbbrev <- function(state_abbrev_list){
data("state.regions")
state_abbrev_list <- toupper(state_abbrev_list)
out <- state.regions %>%
filter(abb %in% state_abbrev_list) %>%
select(region, abb, fips.numeric)
colnames(out) <- c("state_name","state_short","state_id")
return(out)
}
|
## testEdges: List all edges that can be added with test statistic, Find most significant edge
## Author: Niharika
## Input
## object : imod-object
## edgeList : A list of edges; each edge is a vector
## Output
## A dataframe with test statistics (p-value or change in AIC), edges and logical
## telling if the edge can be added
getSigEdge <- function(object, edgeSet=NULL,...){
UseMethod("getSigEdge")
}
getSigEdge <- function(object, edgeSet=NULL,...){
} | /R/modelList.R | no_license | niharikag/gMCI | R | false | false | 469 | r | ## testEdges: List all edges that can be added with test statistic, Find most significant edge
## Author: Niharika
## Input
## object : imod-object
## edgeList : A list of edges; each edge is a vector
## Output
## A dataframe with test statistics (p-value or change in AIC), edges and logical
## telling if the edge can be added
getSigEdge <- function(object, edgeSet=NULL,...){
UseMethod("getSigEdge")
}
getSigEdge <- function(object, edgeSet=NULL,...){
} |
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "Electric power consumption.zip")
unzip("Electric power consumption")
data<-read.table("household_power_consumption.txt", header=T, na.strings="?", sep=";")
dat<-data[data$Date=="1/2/2007"|data$Date=="2/2/2007",]
GAP<-as.numeric(dat$Global_active_power)
datetime<-strptime(paste(dat$Date, dat$Time), format="%d/%m/%Y %T")
png("plot2.png", 480, 480)
plot(datetime, GAP, xlab="", ylab="Global Active Power (kilowatts)", main=NULL, type="l")
dev.off()
| /Assignment/plot2.R | no_license | jkelvis/ExData_Plotting1 | R | false | false | 556 | r | download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "Electric power consumption.zip")
unzip("Electric power consumption")
data<-read.table("household_power_consumption.txt", header=T, na.strings="?", sep=";")
dat<-data[data$Date=="1/2/2007"|data$Date=="2/2/2007",]
GAP<-as.numeric(dat$Global_active_power)
datetime<-strptime(paste(dat$Date, dat$Time), format="%d/%m/%Y %T")
png("plot2.png", 480, 480)
plot(datetime, GAP, xlab="", ylab="Global Active Power (kilowatts)", main=NULL, type="l")
dev.off()
|
ht <- function(x){ #Hermitian transpose
if(is.complex(x)){
return(t(Conj(x)))
} else {
return(t(x))
}
}
"cprod" <- function(x,y=NULL){
if(is.complex(x) | is.complex(y)){
if(is.null(y)){
return(crossprod(Conj(x),x))
} else {
return(crossprod(Conj(x),y))
}
} else {
return(crossprod(x,y))
}
}
"tcprod" <- function(x,y=NULL){
if(is.complex(x) | is.complex(y)){
if(is.null(y)){
return(tcrossprod(x,Conj(x)))
} else {
return(tcrossprod(x,Conj(y)))
}
} else {
return(tcrossprod(x,y))
}
}
"quad.form" <- # x' M x
function (M, x, chol = FALSE)
{
if (chol == FALSE) {
return(drop(crossprod(crossprod(M,Conj(x)),x)))
}
else {
jj <- cprod(M, x)
return(drop(cprod(jj, jj)))
}
}
"quad.form.inv" <- # x' M^-1 x
function (M, x)
{
drop(cprod(x, solve(M, x)))
}
"quad.3form" <- # left' M right
function(M,left,right)
{
crossprod(crossprod(M,Conj(left)),right)
}
`quad.3form.inv` <- # left' solve(M) right
function(M,left,right)
{
drop(cprod(left, solve(M, right)))
}
"quad.3tform" <- function(M,left,right) # left M right'
{
tcrossprod(left,tcrossprod(Conj(right),M))
}
"quad.tform" <- # x M x'
function(M,x)
{
tcrossprod(x,tcrossprod(Conj(x),M))
}
"quad.tform.inv" <- # x M^-1 x'
function(M,x){
drop(quad.form.inv(M,ht(x)))
}
"quad.diag" <- # diag(quad.form(M,x)) == diag(x' M x)
function(M,x){
colSums(crossprod(M,Conj(x)) * x)
}
"quad.tdiag" <- # diag(quad.tform(M,x)) == diag(x M x')
function(M,x){
rowSums(tcrossprod(Conj(x), M) * x)
}
"quad.3diag" <- function(M,left,right){
colSums(crossprod(M, Conj(left)) * right)
}
"quad.3tdiag" <- function(M,left,right){
colSums(t(left) * tcprod(M, right))
}
#"cmahal" <-
# function (z, center, cov, inverted = FALSE, ...)
#{
# if(is.vector(z)){
# z <- matrix(z, ncol = length(z))
# } else {
# z <- as.matrix(x)
# }
#
# if (!inverted) { cov <- solve(cov, ...)}
# quad.diag(cov,sweep(z, 2, center))
#}
| /R/aaa_cprod.R | no_license | RobinHankin/emulator | R | false | false | 2,033 | r | ht <- function(x){ #Hermitian transpose
if(is.complex(x)){
return(t(Conj(x)))
} else {
return(t(x))
}
}
"cprod" <- function(x,y=NULL){
if(is.complex(x) | is.complex(y)){
if(is.null(y)){
return(crossprod(Conj(x),x))
} else {
return(crossprod(Conj(x),y))
}
} else {
return(crossprod(x,y))
}
}
"tcprod" <- function(x,y=NULL){
if(is.complex(x) | is.complex(y)){
if(is.null(y)){
return(tcrossprod(x,Conj(x)))
} else {
return(tcrossprod(x,Conj(y)))
}
} else {
return(tcrossprod(x,y))
}
}
"quad.form" <- # x' M x
function (M, x, chol = FALSE)
{
if (chol == FALSE) {
return(drop(crossprod(crossprod(M,Conj(x)),x)))
}
else {
jj <- cprod(M, x)
return(drop(cprod(jj, jj)))
}
}
"quad.form.inv" <- # x' M^-1 x
function (M, x)
{
drop(cprod(x, solve(M, x)))
}
"quad.3form" <- # left' M right
function(M,left,right)
{
crossprod(crossprod(M,Conj(left)),right)
}
`quad.3form.inv` <- # left' solve(M) right
function(M,left,right)
{
drop(cprod(left, solve(M, right)))
}
"quad.3tform" <- function(M,left,right) # left M right'
{
tcrossprod(left,tcrossprod(Conj(right),M))
}
"quad.tform" <- # x M x'
function(M,x)
{
tcrossprod(x,tcrossprod(Conj(x),M))
}
"quad.tform.inv" <- # x M^-1 x'
function(M,x){
drop(quad.form.inv(M,ht(x)))
}
"quad.diag" <- # diag(quad.form(M,x)) == diag(x' M x)
function(M,x){
colSums(crossprod(M,Conj(x)) * x)
}
"quad.tdiag" <- # diag(quad.tform(M,x)) == diag(x M x')
function(M,x){
rowSums(tcrossprod(Conj(x), M) * x)
}
"quad.3diag" <- function(M,left,right){
colSums(crossprod(M, Conj(left)) * right)
}
"quad.3tdiag" <- function(M,left,right){
colSums(t(left) * tcprod(M, right))
}
#"cmahal" <-
# function (z, center, cov, inverted = FALSE, ...)
#{
# if(is.vector(z)){
# z <- matrix(z, ncol = length(z))
# } else {
# z <- as.matrix(x)
# }
#
# if (!inverted) { cov <- solve(cov, ...)}
# quad.diag(cov,sweep(z, 2, center))
#}
|
a=read.csv("herv.rnaseq_sorted.DI_delta.csv")
a$X=NULL
a$name = factor(a$name,levels=a$name)
pdf("figures/herv.rnaseq_sorted.DI_delta.pdf")
melted = melt(a)
ggplot(melted) + geom_tile(aes(x=variable,y=name,
fill=ifelse(value>0, log2(value),-log2(-value)) )) +
scale_fill_gradient2(high="red",low="blue") +
theme(axis.text.y=element_blank(),
axis.text.x=element_text(angle=90)
)
dev.off()
deltas = as.numeric(unlist(a[,2:13]))
#CUTOFF = quantile(deltas,0.85)
CUTOFF = 50
b=a
b[,2:13] = b[,2:13]>CUTOFF
#b$ES = rowSums(b[,2:3])==2 & rowSums(b[,4:13])<=2
b$ES = rowSums(b[,2:3])>=1 & apply(a[,4:7],1,median)/(a$D00_HiC_Rep1+a$D00_HiC_Rep2) < 1/4
melted = melt(b,id.vars="name")
pdf("figures/herv.rnaseq_sorted.boundaries.pdf")
ggplot(melted) + geom_tile(aes(x=variable,y=name,fill=value)) +
theme(axis.text.y=element_blank(),
axis.text.x=element_text(angle=90)
)
dev.off()
dIndex = which(b$ES==TRUE)
dIndex = dIndex[which(dIndex<100)]
write.table(a[dIndex,],"hervh.dynamicBoundaries.txt",row.names=F,col.names=F,quote=F,sep='\t')
write.table(a[-dIndex,],"hervh.nonDynamicBoundaries.txt",row.names=F,col.names=F,quote=F,sep='\t')
| /hervh/plotDI_delta.r | no_license | bioinfx/cvdc_scripts | R | false | false | 1,210 | r | a=read.csv("herv.rnaseq_sorted.DI_delta.csv")
a$X=NULL
a$name = factor(a$name,levels=a$name)
pdf("figures/herv.rnaseq_sorted.DI_delta.pdf")
melted = melt(a)
ggplot(melted) + geom_tile(aes(x=variable,y=name,
fill=ifelse(value>0, log2(value),-log2(-value)) )) +
scale_fill_gradient2(high="red",low="blue") +
theme(axis.text.y=element_blank(),
axis.text.x=element_text(angle=90)
)
dev.off()
deltas = as.numeric(unlist(a[,2:13]))
#CUTOFF = quantile(deltas,0.85)
CUTOFF = 50
b=a
b[,2:13] = b[,2:13]>CUTOFF
#b$ES = rowSums(b[,2:3])==2 & rowSums(b[,4:13])<=2
b$ES = rowSums(b[,2:3])>=1 & apply(a[,4:7],1,median)/(a$D00_HiC_Rep1+a$D00_HiC_Rep2) < 1/4
melted = melt(b,id.vars="name")
pdf("figures/herv.rnaseq_sorted.boundaries.pdf")
ggplot(melted) + geom_tile(aes(x=variable,y=name,fill=value)) +
theme(axis.text.y=element_blank(),
axis.text.x=element_text(angle=90)
)
dev.off()
dIndex = which(b$ES==TRUE)
dIndex = dIndex[which(dIndex<100)]
write.table(a[dIndex,],"hervh.dynamicBoundaries.txt",row.names=F,col.names=F,quote=F,sep='\t')
write.table(a[-dIndex,],"hervh.nonDynamicBoundaries.txt",row.names=F,col.names=F,quote=F,sep='\t')
|
rm(list=ls())
library(Hmisc)
library(tools)
library(stringr)
library(lattice)
library(devtools)
library(httr)
`%ni%` <- Negate('%in%')
# Necessary Changes to the Conformance RUles File:
# Problem #1: In excel file or conformance rules in cell [559,F] a non-ASCI character is used for " that needs to be fixed.
# Solution #1: I fixed this by replacing its contents with cell [555,F]
# Problem #2: In Rule 58, Class was specified as "ALL" but SPC and INT domains are not applicable and break rule
# Solution #2: Class changed from "ALL" to "TDM, FND, EVT"
# Source Functions.R from PHUSE GitHub
source_url('https://raw.githubusercontent.com/phuse-org/phuse-scripts/master/contributed/Nonclinical/R/Functions/Functions.R')
# Set working directory to location of script
PATH <- dirname(sys.calls()[[1]][[2]])
setwd(PATH)
# Select dataset from PHUSE GitHub to evaluate
Data_paths <- 'data/send/FFU-Contribution-to-FDA/'
# List which domains are in which classes
Classes <- list(TDM = c('TE', 'TA', 'TX', 'TS'), SPC = c('DM', 'CO', 'SE'),
FND = c('BW', 'BG', 'CL', 'DD', 'FW', 'LB', 'MA', 'MI', 'OM', 'PM', 'PC', 'PP', 'SC', 'TF', 'VS', 'EG', 'CV', 'RE'),
EVT = c('DS'), INT = c('EX'))
SUPP <- paste0('SUPP',unlist(Classes))
Classes$REL <- c('RELREC',SUPP,'POOLDEF')
# Set path of conformance rules .csv file
Rules <- read.csv('SEND_Conformance_Rules_v2.0.csv')
# Provide regular expression for conjunctions, i.e. AND, OR
Conjunctions <- data.frame('AND' = c(' and ', '(?i) and ', '&'),
'OR' = c(' or ', '(?i) or ', '|'))
row.names(Conjunctions) <- c('grep','strsplit','collapse')
# Provide regular expression for equation signs, i.e. ==, !=
Signs <- data.frame('not.equals' = c(' ^= ', ' \\^= ', ' != '),
'equals' = c(' = ', ' = ', ' == '))
row.names(Signs) <- c('grep', 'strsplit', 'paste')
# Store conjunctions and signs
mySplitterTables <- list('Conjunctions' = Conjunctions,'Signs' = Signs)
# Source function for converting rules into logical statements
source('convertRule.R')
# Initialize variables
RuleNum <- NULL
Condition <- NULL
ConformanceRule <- NULL
Result <- NULL
DOMAIN <- NULL
DataSet <- NULL
DOMAINrow <- NULL
IGversion <- NULL
# Loop through files to evaluate
for (Data_path in Data_paths) {
# Get name of dataset
Dataset_name <- basename(Data_path)
# Load dataset
Data <- load.GitHub.xpt.files(studyDir = Data_path)
# Loop through rules to evaluate dataset against
for (row in seq(nrow(Rules))) {
# Select Rule
Rule <- Rules[row,]
# Exclude rules/conditions with uninterpreaible content
# Rules
if (length(grep('Define-XML', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('Domain Name', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('for a given [A-Z][A-Z] record', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('valid domain abbreviation', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('Study day variable', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('value length', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('either a record with', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('AND/OR', Rule$Rule, fixed = T)) > 0) {
next
}
if (length(grep('Each Trial Set must have a single', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('treatment name only', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('Each trial set must have', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('ISO 8601 format in SEND', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('Only one record with', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('Unit for', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('When', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('for derived data', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('precision of data collection', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('Variable label length', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('Variable name length', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('absolute latest value of test', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('value for that subject', Rule$Rule, ignore.case = T)) > 0) {
next
}
# Conditions
if (length(grep('record refers to', Rule$Condition, ignore.case = T)) > 0) {
next
}
if (length(grep('for one of the two', Rule$Condition, ignore.case = T)) > 0) {
next
}
if (length(grep('numeric value', Rule$Condition, ignore.case = T)) > 0) {
next
}
if (length(grep('Variable Core Status', Rule$Condition, ignore.case = T)) > 0) {
next
}
if (length(grep('permissible and codelist', Rule$Condition, ignore.case = T)) > 0) {
next
}
if (length(grep('measurement is made over', Rule$Condition, ignore.case = T)) > 0) {
next
}
# Get Applicable Domains from Class and Domain
Domains <- NULL
if (!is.na(Rule$Class)) {
if (Rule$Class != 'ALL') {
if (length(grep(',', Rule$Class)) == 0) {
Domains <- Classes[[Rule$Class]]
} else {
rowClasses <- unlist(strsplit(Rule$Class, ', ', fixed = T))
for (rowClass in rowClasses) {
Domains <- c(Domains,Classes[[rowClass]])
}
}
} else {
Domains <- unlist(Classes)
}
}
if (!is.na(Rule$Domain)) {
if (Rule$Domain != 'ALL') {
rowDomains <- unlist(strsplit(Rule$Domain, ', ', fixed = T))
Domains <- Domains[which(Domains %in% rowDomains)]
}
}
if (('SUPP' %in% Domains)|('SUPP--' %in% Domains)) {
if ('SUPP' %in% Domains) {
removeIndex <- which(Domains == 'SUPP')
} else if ('SUPP--' %in% Domains) {
removeIndex <- which(Domains == 'SUPP--')
}
Domains <- Domains[-removeIndex]
Domains <- c(Domains,SUPP)
}
# Evaluate whether an interpretable condition exists
if (Rule$Condition != '') {
conditionExists <- T
if (length(grep('=',Rule$Condition,fixed=T))>0) {
conditionInterpretable <- T
} else if (length(grep('<',Rule$Condition,fixed=T))>0) {
conditionInterpretable <- T
} else if (length(grep('>',Rule$Condition,fixed=T))>0) {
conditionInterpretable <- T
} else {
conditionInterpretable <- F
}
} else {
conditionExists <- F
}
# Loop through domains applicable to rule
for (Domain in Domains) {
# Skip domain if not present in dataset
if (tolower(Domain) %in% names(Data)) {
domainData <- Data[[tolower(Domain)]]
} else {
next
}
# Check if rule is interpretable and only move forward if it is
if (length(grep('=', Rule$Rule, fixed = T)) > 0) {
# Store verbatim condition text
origCondition <- Rule$Condition
# Convert condition into logical operation
newCondition <- convertRule(origCondition)
# Store verbatim rule text
origRule <- Rule$Rule
# Convert rule into logical operation
newRule <- convertRule(origRule)
# Store CDISC Rule ID
ruleNum <- Rule$CDISC.SEND.Rule.ID
# Loop through each record in domain
for (i in seq(length(domainData))) {
# Store information about row
Condition <- c(Condition, Rule$Condition)
RuleNum <- c(RuleNum, ruleNum)
ConformanceRule <- c(ConformanceRule,Rule$Rule)
DOMAIN <- c(DOMAIN,Domain)
DataSet <- c(DataSet,Dataset_name)
DOMAINrow <- c(DOMAINrow,i)
IGversion <- c(IGversion,Rule$Cited.Document)
# Check if there is a condition to evaluate
if (conditionExists == T) {
# Check if the condition is interpretable
if (conditionInterpretable == T) {
# Check that evaluation of condition produces an answer
if (eval(parse(text = paste0('length(',newCondition,')>0')))) {
# Check that evaluation of condition is not NA
if (eval(parse(text = paste0('!is.na(',newCondition,')')))) {
# Check if evaluation of condition == TRUE
if (eval(parse(text = newCondition))) {
# Check if evaluation of rule produces an answer
if (eval(parse(text = paste0('length(',newRule,')>0')))) {
# Check if evaluation of rule is TRUE or FALSE
if (eval(parse(text= newRule))==F) {
# Record rule FAILED
Result <- c(Result,'FAIL')
} else {
# Record rule PASSED
Result <- c(Result,'PASS')
}
} else {
# Record that evaulation of rule did not produce an answer
Result <- c(Result,'NA')
}
} else {
# Record that Condition was not met so rule should not be evaluated
Result <- c(Result,'Condition Not Met')
}
} else {
# Record that condition was skipped to due to being NA
Result <- c(Result,'Skipped Condition')
}
} else {
# Record that condition was skipped to due to not producing an answer
Result <- c(Result,'Skipped Condition')
}
} else {
# Record that condition was skipped to due not being interpretable
Result <- c(Result,'Condition Not Interpretable')
}
# No condition, proceed with rule evaluation
} else {
# Check if evaluation of rule produces an answer
if (eval(parse(text = paste0('length(',newRule,')>0')))) {
# Check if evaluation of rule is TRUE or FALSE
if (eval(parse(text= newRule))==F) {
# Record rule FAILED
Result <- c(Result,'FAIL')
} else {
# Record rule PASSED
Result <- c(Result,'PASS')
}
} else {
# Record that evaulation of rule did not produce an answer
Result <- c(Result, 'NA')
}
}
}
}
}
}
}
# Convert results to data frame
Results <- as.data.frame(cbind(DataSet,DOMAIN,DOMAINrow,RuleNum,Condition,ConformanceRule,Result,IGversion))
# print records that failed a rule
print(Results[which(Results$Result=="FAIL"),])
print("Passed the following Rules:")
# Get list of rules that passed
passedRules <- NULL
for (rule in unique(Rules$CDISC.SEND.Rule.ID)) {
passRule <- T
if (rule %in% Results$RuleNum) {
index <- which(Results$RuleNum == rule)
for (result in Results$Result[index]) {
if (result %ni% c('PASS','Condition Not Met','NA')) {
passRule <- F
}
}
if (passRule == T) {
passedRules <- c(passedRules,rule)
}
}
}
print(passedRules)
| /contributed/Nonclinical/R/SEND_conformance/SEND_conformance.R | permissive | ShuguangSun/phuse-scripts | R | false | false | 11,615 | r | rm(list=ls())
library(Hmisc)
library(tools)
library(stringr)
library(lattice)
library(devtools)
library(httr)
`%ni%` <- Negate('%in%')
# Necessary Changes to the Conformance RUles File:
# Problem #1: In excel file or conformance rules in cell [559,F] a non-ASCI character is used for " that needs to be fixed.
# Solution #1: I fixed this by replacing its contents with cell [555,F]
# Problem #2: In Rule 58, Class was specified as "ALL" but SPC and INT domains are not applicable and break rule
# Solution #2: Class changed from "ALL" to "TDM, FND, EVT"
# Source Functions.R from PHUSE GitHub
source_url('https://raw.githubusercontent.com/phuse-org/phuse-scripts/master/contributed/Nonclinical/R/Functions/Functions.R')
# Set working directory to location of script
PATH <- dirname(sys.calls()[[1]][[2]])
setwd(PATH)
# Select dataset from PHUSE GitHub to evaluate
Data_paths <- 'data/send/FFU-Contribution-to-FDA/'
# List which domains are in which classes
Classes <- list(TDM = c('TE', 'TA', 'TX', 'TS'), SPC = c('DM', 'CO', 'SE'),
FND = c('BW', 'BG', 'CL', 'DD', 'FW', 'LB', 'MA', 'MI', 'OM', 'PM', 'PC', 'PP', 'SC', 'TF', 'VS', 'EG', 'CV', 'RE'),
EVT = c('DS'), INT = c('EX'))
SUPP <- paste0('SUPP',unlist(Classes))
Classes$REL <- c('RELREC',SUPP,'POOLDEF')
# Set path of conformance rules .csv file
Rules <- read.csv('SEND_Conformance_Rules_v2.0.csv')
# Provide regular expression for conjunctions, i.e. AND, OR
Conjunctions <- data.frame('AND' = c(' and ', '(?i) and ', '&'),
'OR' = c(' or ', '(?i) or ', '|'))
row.names(Conjunctions) <- c('grep','strsplit','collapse')
# Provide regular expression for equation signs, i.e. ==, !=
Signs <- data.frame('not.equals' = c(' ^= ', ' \\^= ', ' != '),
'equals' = c(' = ', ' = ', ' == '))
row.names(Signs) <- c('grep', 'strsplit', 'paste')
# Store conjunctions and signs
mySplitterTables <- list('Conjunctions' = Conjunctions,'Signs' = Signs)
# Source function for converting rules into logical statements
source('convertRule.R')
# Initialize variables
RuleNum <- NULL
Condition <- NULL
ConformanceRule <- NULL
Result <- NULL
DOMAIN <- NULL
DataSet <- NULL
DOMAINrow <- NULL
IGversion <- NULL
# Loop through files to evaluate
for (Data_path in Data_paths) {
# Get name of dataset
Dataset_name <- basename(Data_path)
# Load dataset
Data <- load.GitHub.xpt.files(studyDir = Data_path)
# Loop through rules to evaluate dataset against
for (row in seq(nrow(Rules))) {
# Select Rule
Rule <- Rules[row,]
# Exclude rules/conditions with uninterpreaible content
# Rules
if (length(grep('Define-XML', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('Domain Name', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('for a given [A-Z][A-Z] record', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('valid domain abbreviation', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('Study day variable', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('value length', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('either a record with', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('AND/OR', Rule$Rule, fixed = T)) > 0) {
next
}
if (length(grep('Each Trial Set must have a single', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('treatment name only', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('Each trial set must have', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('ISO 8601 format in SEND', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('Only one record with', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('Unit for', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('When', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('for derived data', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('precision of data collection', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('Variable label length', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('Variable name length', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('absolute latest value of test', Rule$Rule, ignore.case = T)) > 0) {
next
}
if (length(grep('value for that subject', Rule$Rule, ignore.case = T)) > 0) {
next
}
# Conditions
if (length(grep('record refers to', Rule$Condition, ignore.case = T)) > 0) {
next
}
if (length(grep('for one of the two', Rule$Condition, ignore.case = T)) > 0) {
next
}
if (length(grep('numeric value', Rule$Condition, ignore.case = T)) > 0) {
next
}
if (length(grep('Variable Core Status', Rule$Condition, ignore.case = T)) > 0) {
next
}
if (length(grep('permissible and codelist', Rule$Condition, ignore.case = T)) > 0) {
next
}
if (length(grep('measurement is made over', Rule$Condition, ignore.case = T)) > 0) {
next
}
# Get Applicable Domains from Class and Domain
Domains <- NULL
if (!is.na(Rule$Class)) {
if (Rule$Class != 'ALL') {
if (length(grep(',', Rule$Class)) == 0) {
Domains <- Classes[[Rule$Class]]
} else {
rowClasses <- unlist(strsplit(Rule$Class, ', ', fixed = T))
for (rowClass in rowClasses) {
Domains <- c(Domains,Classes[[rowClass]])
}
}
} else {
Domains <- unlist(Classes)
}
}
if (!is.na(Rule$Domain)) {
if (Rule$Domain != 'ALL') {
rowDomains <- unlist(strsplit(Rule$Domain, ', ', fixed = T))
Domains <- Domains[which(Domains %in% rowDomains)]
}
}
if (('SUPP' %in% Domains)|('SUPP--' %in% Domains)) {
if ('SUPP' %in% Domains) {
removeIndex <- which(Domains == 'SUPP')
} else if ('SUPP--' %in% Domains) {
removeIndex <- which(Domains == 'SUPP--')
}
Domains <- Domains[-removeIndex]
Domains <- c(Domains,SUPP)
}
# Evaluate whether an interpretable condition exists
if (Rule$Condition != '') {
conditionExists <- T
if (length(grep('=',Rule$Condition,fixed=T))>0) {
conditionInterpretable <- T
} else if (length(grep('<',Rule$Condition,fixed=T))>0) {
conditionInterpretable <- T
} else if (length(grep('>',Rule$Condition,fixed=T))>0) {
conditionInterpretable <- T
} else {
conditionInterpretable <- F
}
} else {
conditionExists <- F
}
# Loop through domains applicable to rule
for (Domain in Domains) {
# Skip domain if not present in dataset
if (tolower(Domain) %in% names(Data)) {
domainData <- Data[[tolower(Domain)]]
} else {
next
}
# Check if rule is interpretable and only move forward if it is
if (length(grep('=', Rule$Rule, fixed = T)) > 0) {
# Store verbatim condition text
origCondition <- Rule$Condition
# Convert condition into logical operation
newCondition <- convertRule(origCondition)
# Store verbatim rule text
origRule <- Rule$Rule
# Convert rule into logical operation
newRule <- convertRule(origRule)
# Store CDISC Rule ID
ruleNum <- Rule$CDISC.SEND.Rule.ID
# Loop through each record in domain
for (i in seq(length(domainData))) {
# Store information about row
Condition <- c(Condition, Rule$Condition)
RuleNum <- c(RuleNum, ruleNum)
ConformanceRule <- c(ConformanceRule,Rule$Rule)
DOMAIN <- c(DOMAIN,Domain)
DataSet <- c(DataSet,Dataset_name)
DOMAINrow <- c(DOMAINrow,i)
IGversion <- c(IGversion,Rule$Cited.Document)
# Check if there is a condition to evaluate
if (conditionExists == T) {
# Check if the condition is interpretable
if (conditionInterpretable == T) {
# Check that evaluation of condition produces an answer
if (eval(parse(text = paste0('length(',newCondition,')>0')))) {
# Check that evaluation of condition is not NA
if (eval(parse(text = paste0('!is.na(',newCondition,')')))) {
# Check if evaluation of condition == TRUE
if (eval(parse(text = newCondition))) {
# Check if evaluation of rule produces an answer
if (eval(parse(text = paste0('length(',newRule,')>0')))) {
# Check if evaluation of rule is TRUE or FALSE
if (eval(parse(text= newRule))==F) {
# Record rule FAILED
Result <- c(Result,'FAIL')
} else {
# Record rule PASSED
Result <- c(Result,'PASS')
}
} else {
# Record that evaulation of rule did not produce an answer
Result <- c(Result,'NA')
}
} else {
# Record that Condition was not met so rule should not be evaluated
Result <- c(Result,'Condition Not Met')
}
} else {
# Record that condition was skipped to due to being NA
Result <- c(Result,'Skipped Condition')
}
} else {
# Record that condition was skipped to due to not producing an answer
Result <- c(Result,'Skipped Condition')
}
} else {
# Record that condition was skipped to due not being interpretable
Result <- c(Result,'Condition Not Interpretable')
}
# No condition, proceed with rule evaluation
} else {
# Check if evaluation of rule produces an answer
if (eval(parse(text = paste0('length(',newRule,')>0')))) {
# Check if evaluation of rule is TRUE or FALSE
if (eval(parse(text= newRule))==F) {
# Record rule FAILED
Result <- c(Result,'FAIL')
} else {
# Record rule PASSED
Result <- c(Result,'PASS')
}
} else {
# Record that evaulation of rule did not produce an answer
Result <- c(Result, 'NA')
}
}
}
}
}
}
}
# Convert results to data frame
Results <- as.data.frame(cbind(DataSet,DOMAIN,DOMAINrow,RuleNum,Condition,ConformanceRule,Result,IGversion))
# print records that failed a rule
print(Results[which(Results$Result=="FAIL"),])
print("Passed the following Rules:")
# Get list of rules that passed
passedRules <- NULL
for (rule in unique(Rules$CDISC.SEND.Rule.ID)) {
passRule <- T
if (rule %in% Results$RuleNum) {
index <- which(Results$RuleNum == rule)
for (result in Results$Result[index]) {
if (result %ni% c('PASS','Condition Not Met','NA')) {
passRule <- F
}
}
if (passRule == T) {
passedRules <- c(passedRules,rule)
}
}
}
print(passedRules)
|
#market drop
#this function analyses given a market drop how much a stock recovers to
#tickers <- c("GS", "UBS","TSLA","AAPL","CS","F","ITUB")
tickers <- c("GS")
collect_since <- "2017-01-01"
marketData <- assembleData(tickers,collect_since = collect_since)
marketDrop_threshold <- 0.25
marketDrop_window <- c(7,30,250) #marketData indexes are in days
#par(new = TRUE)
#rolling_mean <- (rollapply(marketData$price, marketDrop_window[1], mean))
#plot(marketData$price)
#lines(rolling_mean)
| /R_analysis/marketDrop_recovery.R | permissive | vacaciones/vacaciones | R | false | false | 505 | r | #market drop
#this function analyses given a market drop how much a stock recovers to
#tickers <- c("GS", "UBS","TSLA","AAPL","CS","F","ITUB")
tickers <- c("GS")
collect_since <- "2017-01-01"
marketData <- assembleData(tickers,collect_since = collect_since)
marketDrop_threshold <- 0.25
marketDrop_window <- c(7,30,250) #marketData indexes are in days
#par(new = TRUE)
#rolling_mean <- (rollapply(marketData$price, marketDrop_window[1], mean))
#plot(marketData$price)
#lines(rolling_mean)
|
\name{covest.SGB}
\alias{covest.SGB}
\title{
Classical and robust asymptotic covariance matrix
}
\description{
Computation of two covariance matrices of the estimators of parameters in a SGB regression. The first is based on the Hessian and the second is the sandwich estimator.
}
\usage{
covest.SGB(x, d, u, V, weight=rep(1,dim(d)[1]), x0 = NULL, hessian = NULL, ind = NULL,
shape1 = NULL)
}
\arguments{
\item{x}{
vector of parameters (shape1,coefi,shape2) where
shape1 is the overall shape, coefi is the vector of regression coefficients (see \code{\link{initpar.SGB}}) and shape2 the vector of the \eqn{D} Dirichlet shape parameters; \eqn{D}: number of parts.
shape1 and shape2 must be positive.
}
\item{d}{
data matrix of explanatory variables (with constant vector if required in the model) \eqn{(n \times m)}; \eqn{n}: sample size, \eqn{m}: number of auxiliary variables.
}
\item{u}{
data matrix of compositions (variables to be explained) \eqn{n \times D}.
}
\item{V}{
full rank transformation of log(parts) into log-ratios, matrix \eqn{D \times (D-1)}.
}
\item{weight}{
vector of length \eqn{n}; positive observation weights, default \code{rep(1,n)}. Should be scaled to sum to \eqn{n}.
}
\item{x0}{
specification of the initial parameter vector of length \eqn{npar} (optional), default: NULL, no specification.
}
\item{hessian}{
Hessian matrix (optional), see \code{\link{regSGB}}, default: NULL, no specification. In this case the Hessian is computed numerically.
}
\item{ind}{
vector of length equal to the number of fixed parameters; specifies the indices of the fixed components in the vector of parameters \eqn{x} (possible for \code{shape1} and \code{coefi} (regression coefficients) only).
}
\item{shape1}{
fixed value of the overall shape parameter, if \code{heq = heqa.SGB} or
\code{heq = heqab.SGB}. Default is 1.
}
}
\details{
This function is internally called by regSGB. In this case the Hessian is the output of \code{\link[alabama]{auglag}} and is numerically computed. \cr
A design based covariance matrix of the parameters can be obtained by linearization as the covariance matrix of the \code{scores}.
}
\value{
a list with
\item{summary }{Data frame with \cr
\code{Initial = x0} (if specified), \cr
\code{Estimate = x}, \cr
\code{StdErr1} = ordinary asymptotic standard error of parameters, \cr
\code{StdErr} = robust asymptotic standard error, \cr
\code{p.value} = asymptotic normal p-value based on \code{StdErr}. For \code{shape1}, \eqn{H_0} is "shape1=shape1", or "shape1=1" if \code{shape1=NULL}. The other parameters are tested against 0. \cr
\code{signif} = significance code based on p.value.
}
\item{scores }{matrix \eqn{n \times npar}. Each row contains the (unweighted) derivatives of the log-density at a data point w.r.t the parameters.}
\item{vcov1}{ordinary asymptotic covariance matrix, inverse of minus the Hessian.}
\item{StdErr1}{vector of ordinary asymptotic standard error of parameters.}
\item{varest2}{robust asymptotic covariance matrix.}
\item{StdErr}{vector of robust asymptotic standard error of parameters.}
}
\references{
Huber, P. J. (1967). The behavior of maximum likelihood estimates under nonstandard conditions. In \emph{Proceedings of the Fifth Berkeley Symposium on Mathematical Statistics and Probability}, Volume 1, pp. 221-233.
}
\seealso{
\code{\link{regSGB}} for creating \code{oilr}.
}
\examples{
data(arc)
data(oilr)
## compositions
da <- as.matrix(log(arc[["depth"]]),ncol=1)
ua <- as.matrix(arc[,1:3])
## ilr transforms
c1 <- 1/sqrt(2)
c2 <- 1/sqrt(6)
Vilr <- matrix(c(-c1,c1,0,-c2,-c2,2*c2),nrow=3)
colnames(Vilr) <- c("ilr1","ilr2")
Vilr
covs <- covest.SGB(oilr[["par"]], da, ua, Vilr)
## Compare the ordinary and robust correlation matrices of parameters estimates.
## (Ordinary) covariance based on inverse Hessian
vcov1 <- covs[["vcov1"]]
StdErr1 <- covs[["StdErr1"]]
## Estimated correlation matrix
vcor1 <- diag(1/StdErr1) \%*\% vcov1 \%*\% diag(1/StdErr1)
round(vcor1,2)
## Robust (Huber's sandwich estimator):
StdErr <- covs[["StdErr"]]
vcov <- covs[["vcov"]]
## Estimated correlation matrix
round(diag(1/StdErr) \%*\% vcov \%*\% diag(1/StdErr),2)
}
\keyword{Utilities}
| /man/covest.SGB.Rd | no_license | cran/SGB | R | false | false | 4,201 | rd | \name{covest.SGB}
\alias{covest.SGB}
\title{
Classical and robust asymptotic covariance matrix
}
\description{
Computation of two covariance matrices of the estimators of parameters in a SGB regression. The first is based on the Hessian and the second is the sandwich estimator.
}
\usage{
covest.SGB(x, d, u, V, weight=rep(1,dim(d)[1]), x0 = NULL, hessian = NULL, ind = NULL,
shape1 = NULL)
}
\arguments{
\item{x}{
vector of parameters (shape1,coefi,shape2) where
shape1 is the overall shape, coefi is the vector of regression coefficients (see \code{\link{initpar.SGB}}) and shape2 the vector of the \eqn{D} Dirichlet shape parameters; \eqn{D}: number of parts.
shape1 and shape2 must be positive.
}
\item{d}{
data matrix of explanatory variables (with constant vector if required in the model) \eqn{(n \times m)}; \eqn{n}: sample size, \eqn{m}: number of auxiliary variables.
}
\item{u}{
data matrix of compositions (variables to be explained) \eqn{n \times D}.
}
\item{V}{
full rank transformation of log(parts) into log-ratios, matrix \eqn{D \times (D-1)}.
}
\item{weight}{
vector of length \eqn{n}; positive observation weights, default \code{rep(1,n)}. Should be scaled to sum to \eqn{n}.
}
\item{x0}{
specification of the initial parameter vector of length \eqn{npar} (optional), default: NULL, no specification.
}
\item{hessian}{
Hessian matrix (optional), see \code{\link{regSGB}}, default: NULL, no specification. In this case the Hessian is computed numerically.
}
\item{ind}{
vector of length equal to the number of fixed parameters; specifies the indices of the fixed components in the vector of parameters \eqn{x} (possible for \code{shape1} and \code{coefi} (regression coefficients) only).
}
\item{shape1}{
fixed value of the overall shape parameter, if \code{heq = heqa.SGB} or
\code{heq = heqab.SGB}. Default is 1.
}
}
\details{
This function is internally called by regSGB. In this case the Hessian is the output of \code{\link[alabama]{auglag}} and is numerically computed. \cr
A design based covariance matrix of the parameters can be obtained by linearization as the covariance matrix of the \code{scores}.
}
\value{
a list with
\item{summary }{Data frame with \cr
\code{Initial = x0} (if specified), \cr
\code{Estimate = x}, \cr
\code{StdErr1} = ordinary asymptotic standard error of parameters, \cr
\code{StdErr} = robust asymptotic standard error, \cr
\code{p.value} = asymptotic normal p-value based on \code{StdErr}. For \code{shape1}, \eqn{H_0} is "shape1=shape1", or "shape1=1" if \code{shape1=NULL}. The other parameters are tested against 0. \cr
\code{signif} = significance code based on p.value.
}
\item{scores }{matrix \eqn{n \times npar}. Each row contains the (unweighted) derivatives of the log-density at a data point w.r.t the parameters.}
\item{vcov1}{ordinary asymptotic covariance matrix, inverse of minus the Hessian.}
\item{StdErr1}{vector of ordinary asymptotic standard error of parameters.}
\item{varest2}{robust asymptotic covariance matrix.}
\item{StdErr}{vector of robust asymptotic standard error of parameters.}
}
\references{
Huber, P. J. (1967). The behavior of maximum likelihood estimates under nonstandard conditions. In \emph{Proceedings of the Fifth Berkeley Symposium on Mathematical Statistics and Probability}, Volume 1, pp. 221-233.
}
\seealso{
\code{\link{regSGB}} for creating \code{oilr}.
}
\examples{
data(arc)
data(oilr)
## compositions
da <- as.matrix(log(arc[["depth"]]),ncol=1)
ua <- as.matrix(arc[,1:3])
## ilr transforms
c1 <- 1/sqrt(2)
c2 <- 1/sqrt(6)
Vilr <- matrix(c(-c1,c1,0,-c2,-c2,2*c2),nrow=3)
colnames(Vilr) <- c("ilr1","ilr2")
Vilr
covs <- covest.SGB(oilr[["par"]], da, ua, Vilr)
## Compare the ordinary and robust correlation matrices of parameters estimates.
## (Ordinary) covariance based on inverse Hessian
vcov1 <- covs[["vcov1"]]
StdErr1 <- covs[["StdErr1"]]
## Estimated correlation matrix
vcor1 <- diag(1/StdErr1) \%*\% vcov1 \%*\% diag(1/StdErr1)
round(vcor1,2)
## Robust (Huber's sandwich estimator):
StdErr <- covs[["StdErr"]]
vcov <- covs[["vcov"]]
## Estimated correlation matrix
round(diag(1/StdErr) \%*\% vcov \%*\% diag(1/StdErr),2)
}
\keyword{Utilities}
|
## PUT YOUR library(..) calls here e.g.
packages <- c("lubridate", "tidyr", "dplyr", "ggplot2")
lapply(packages, function(package) {
quiet_library <- function(p) suppressWarnings(suppressMessages(library(package=p, character.only = T, quietly=TRUE, logical.return=TRUE)))
if(!quiet_library(package)){
cat("Package",package,"was not found, installing it...", "\n")
install.packages(package)
quiet_library(p)
}
}) | /templates/r/includes/libraries.R | no_license | keynmol/sutin | R | false | false | 430 | r | ## PUT YOUR library(..) calls here e.g.
packages <- c("lubridate", "tidyr", "dplyr", "ggplot2")
lapply(packages, function(package) {
quiet_library <- function(p) suppressWarnings(suppressMessages(library(package=p, character.only = T, quietly=TRUE, logical.return=TRUE)))
if(!quiet_library(package)){
cat("Package",package,"was not found, installing it...", "\n")
install.packages(package)
quiet_library(p)
}
}) |
context("test-plot_anomaly_decomposition.R")
test_that("errors on incorrect input", {
expect_error(plot_anomaly_decomposition(3))
})
test_that("returns a ggplot", {
g <- tidyverse_cran_downloads %>%
filter(package == "tidyquant") %>%
ungroup() %>%
time_decompose(count, method = "stl") %>%
anomalize(remainder, method = "iqr") %>%
plot_anomaly_decomposition()
expect_s3_class(g, "ggplot")
})
| /revdep/checks.noindex/anomalize/old/anomalize.Rcheck/tests/testthat/test-plot_anomaly_decomposition.R | no_license | sstoeckl/tibbletime | R | false | false | 467 | r | context("test-plot_anomaly_decomposition.R")
test_that("errors on incorrect input", {
expect_error(plot_anomaly_decomposition(3))
})
test_that("returns a ggplot", {
g <- tidyverse_cran_downloads %>%
filter(package == "tidyquant") %>%
ungroup() %>%
time_decompose(count, method = "stl") %>%
anomalize(remainder, method = "iqr") %>%
plot_anomaly_decomposition()
expect_s3_class(g, "ggplot")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/4_metaClustering.R
\name{ReassignMetaclusters}
\alias{ReassignMetaclusters}
\title{ReassignMetaclusters}
\usage{
ReassignMetaclusters(fsom, metaclustering)
}
\arguments{
\item{fsom}{Result of calling the FlowSOM function}
\item{metaclustering}{Vector with the metacluster names for all clusters}
}
\value{
Updated FlowSOM object
}
\description{
Adapt the metaclustering. Can be used to either split up metaclusters,
or potentially merge some metaclusters.
}
\examples{
fileName <- system.file("extdata", "68983.fcs", package = "FlowSOM")
ff <- flowCore::read.FCS(fileName)
ff <- flowCore::compensate(ff, flowCore::keyword(ff)[["SPILL"]])
ff <- flowCore::transform(ff,
flowCore::transformList(colnames(flowCore::keyword(ff)[["SPILL"]]),
flowCore::logicleTransform()))
flowSOM.res <- FlowSOM(ff,
scale = TRUE,
colsToUse = c(9, 12, 14:18),
nClus = 5,
seed = 1)
PlotStars(flowSOM.res, backgroundValues = flowSOM.res$metaclustering)
# Split up metacluster 3
MC_or <- flowSOM.res$metaclustering
MC_new <- c(MC_or)
MC_new[c(81:86, 91:96)] <- "5b"
flowSOM.res <- ReassignMetaclusters(flowSOM.res, MC_new)
PlotStars(flowSOM.res, backgroundValues = flowSOM.res$metaclustering)
PlotNumbers(flowSOM.res, level = "metaclusters")
GetCounts(flowSOM.res)
}
| /man/ReassignMetaclusters.Rd | no_license | ameranismail/FlowSOM | R | false | true | 1,481 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/4_metaClustering.R
\name{ReassignMetaclusters}
\alias{ReassignMetaclusters}
\title{ReassignMetaclusters}
\usage{
ReassignMetaclusters(fsom, metaclustering)
}
\arguments{
\item{fsom}{Result of calling the FlowSOM function}
\item{metaclustering}{Vector with the metacluster names for all clusters}
}
\value{
Updated FlowSOM object
}
\description{
Adapt the metaclustering. Can be used to either split up metaclusters,
or potentially merge some metaclusters.
}
\examples{
fileName <- system.file("extdata", "68983.fcs", package = "FlowSOM")
ff <- flowCore::read.FCS(fileName)
ff <- flowCore::compensate(ff, flowCore::keyword(ff)[["SPILL"]])
ff <- flowCore::transform(ff,
flowCore::transformList(colnames(flowCore::keyword(ff)[["SPILL"]]),
flowCore::logicleTransform()))
flowSOM.res <- FlowSOM(ff,
scale = TRUE,
colsToUse = c(9, 12, 14:18),
nClus = 5,
seed = 1)
PlotStars(flowSOM.res, backgroundValues = flowSOM.res$metaclustering)
# Split up metacluster 3
MC_or <- flowSOM.res$metaclustering
MC_new <- c(MC_or)
MC_new[c(81:86, 91:96)] <- "5b"
flowSOM.res <- ReassignMetaclusters(flowSOM.res, MC_new)
PlotStars(flowSOM.res, backgroundValues = flowSOM.res$metaclustering)
PlotNumbers(flowSOM.res, level = "metaclusters")
GetCounts(flowSOM.res)
}
|
data <- read.csv('campaign_interarrival.csv')
data$type <- factor(data$type, labels = c('Measurement','Fitting'))
p <- ggplot(data %>% sample_frac(0.05), aes(x, color=type)) +
stat_ecdf() +
scale_x_continuous(limits = c(0,2)) +
scale_y_continuous() +
scale_color_manual(values = color.palette) +
labs(x = label.campaign.interarrival,
y = label.cdf.campaign.interarrival,
color = label.type) +
coord_cartesian(xlim = c(0, 1.26))
save.full.row.plot(p) | /figures/cloud/crowdsourcing/measurements/campaign_interarrival.R | no_license | cschwartz/dissertation | R | false | false | 478 | r | data <- read.csv('campaign_interarrival.csv')
data$type <- factor(data$type, labels = c('Measurement','Fitting'))
p <- ggplot(data %>% sample_frac(0.05), aes(x, color=type)) +
stat_ecdf() +
scale_x_continuous(limits = c(0,2)) +
scale_y_continuous() +
scale_color_manual(values = color.palette) +
labs(x = label.campaign.interarrival,
y = label.cdf.campaign.interarrival,
color = label.type) +
coord_cartesian(xlim = c(0, 1.26))
save.full.row.plot(p) |
library(purrr)
#### This function will return a new function, which use the arguments provided
#### from the former one.
####
adder_maker <- function(n){
function(x){
n + x
}
}
adder_maker(4)
adder_maker(4)(5)
#### map function
map(c(5, 4, 3, 2, 1), function(x){
c("one", "two", "three", "four", "five")[x]
})
map_chr(c(5, 4, 3, 2, 1), function(x){
c("one", "two", "three", "four", "five")[x]
})
map_dbl(1:5,function(x){
6-x
})
## map_if and map_at
map_if(1:5,function(x){
x%%2 == 0
},function(y){
y^2
}) %>% unlist
map_at(seq(100, 500, 100), c(1, 3, 5), function(x){
x - 10
}) %>% unlist()
## multivector
map2(map(c(5, 4, 3, 2, 1), function(x){
c("one", "two", "three", "four", "five")[x]
}),map(c(5, 4, 3, 2, 1), function(x){
c("one", "two", "three", "four", "five")[x]
}),paste
)
pmap(list(map(c(5, 4, 3, 2, 1), function(x){
c("one", "two", "three", "four", "five")[x]
}),map(c(5, 4, 3, 2, 1), function(x){
c("one", "two", "three", "four", "five")[x]
}),map(c(5, 4, 3, 2, 1), function(x){
c("one", "two", "three", "four", "five")[x]
}),map(c(5, 4, 3, 2, 1), function(x){
c("one", "two", "three", "four", "five")[x]
})),function(a,b,c,d){
paste(a,b,c,d,sep = ",")
})
#### reduce
reduce(1:5,function(x1,x2){
x1+x2
})
reduce(1:5,function(x1,x2){
message("x1 is",x1)
message("x2 is",x2)
x1+x2
})
#### Search: contains, detect, detect_index
x <- list(1:10, 5, 9.9)
x %>% contains(1:10)
x %>% contains(3)
x
detect(20:40, function(x){
x > 22 && x %% 2 == 0
})
detect_index(20:40, function(x){
x > 22 && x %% 2 == 0
})
#### Filter: keep, discard, every, some
keep(1:20, function(x){
x %% 2 == 0
})
discard(1:20, function(x){
x %% 2 == 0
})
every(1:20, function(x){
x %% 2 == 0
})
some(1:20, function(x){
x %% 2 == 0
})
#### Compose
n_unique <- compose(length, unique)
# The composition above is the same as:
# n_unique <- function(x){
# length(unique(x))
# }
rep(1:5, 1:5)
n_unique(rep(1:5, 1:5))
#### partial application
mult_three_n <- function(x, y, z){
x * y * z
}
mult_by_15 <- partial(mult_three_n, x = 3, y = 5)
mult_by_15(4)
#### Side effect: walk
walk(c("Friends, Romans, countrymen,",
"lend me your ears;",
"I come to bury Caesar,",
"not to praise him."), message)
#### recursion
Fibonacci_rec <- function(x){
if(x == 1){
0
}else if(x == 2){
1
}else{
Fibonacci_rec(x-1) + Fibonacci_rec(x-2)
}
}
map_dbl(1:12,Fibonacci_rec)
fib_num <- c(0,1,rep(NA,23))
Fibonacci_rec_mem <- function(x){
stopifnot(x >0)
if(!is.na(fib_num[x])){
fib_num[x]
} else{
fib_num[x-1] <<- Fibonacci_rec_mem(x-1)
fib_num[x-2] <<- Fibonacci_rec_mem(x-2)
fib_num[x-1] + fib_num[x-2]
}
}
map_dbl(1:12,Fibonacci_rec_mem)
####which one is faster?
t <- Sys.time()
Fibonacci_rec(30)
Sys.time() - t
t <- Sys.time()
Fibonacci_rec_mem(30)
Sys.time() - t
library(microbenchmark)
library(magrittr)
library(tidyr)
library(dplyr)
fib_data <- map(1:10, function(x){microbenchmark(Fibonacci_rec(x), times = 100)$time})
names(fib_data) <- paste0(letters[1:10], 1:10)
fib_data <- as.data.frame(fib_data)
fib_data %<>%
gather(num, time) %>%
group_by(num) %>%
summarise(med_time = median(time))
memo_data <- map(1:10, function(x){microbenchmark(Fibonacci_rec_mem(x))$time})
names(memo_data) <- paste0(letters[1:10], 1:10)
memo_data <- as.data.frame(memo_data)
memo_data %<>%
gather(num, time) %>%
group_by(num) %>%
summarise(med_time = median(time))
plot(1:10, fib_data$med_time, xlab = "Fibonacci Number", ylab = "Median Time (Nanoseconds)",
pch = 18, bty = "n", xaxt = "n", yaxt = "n")
axis(1, at = 1:10)
axis(2, at = seq(0, 350000, by = 50000))
points(1:10 + .1, memo_data$med_time, col = "blue", pch = 18)
legend(1, 100000, c("Not Memoized", "Memoized"), pch = 18,
col = c("black", "blue"), bty = "n", cex = 1, y.intersp = 1.5)
| /03_functional_programming.R | no_license | bsmg1h/Advanced_R_Programming | R | false | false | 4,010 | r | library(purrr)
#### This function will return a new function, which use the arguments provided
#### from the former one.
####
adder_maker <- function(n){
function(x){
n + x
}
}
adder_maker(4)
adder_maker(4)(5)
#### map function
map(c(5, 4, 3, 2, 1), function(x){
c("one", "two", "three", "four", "five")[x]
})
map_chr(c(5, 4, 3, 2, 1), function(x){
c("one", "two", "three", "four", "five")[x]
})
map_dbl(1:5,function(x){
6-x
})
## map_if and map_at
map_if(1:5,function(x){
x%%2 == 0
},function(y){
y^2
}) %>% unlist
map_at(seq(100, 500, 100), c(1, 3, 5), function(x){
x - 10
}) %>% unlist()
## multivector
map2(map(c(5, 4, 3, 2, 1), function(x){
c("one", "two", "three", "four", "five")[x]
}),map(c(5, 4, 3, 2, 1), function(x){
c("one", "two", "three", "four", "five")[x]
}),paste
)
pmap(list(map(c(5, 4, 3, 2, 1), function(x){
c("one", "two", "three", "four", "five")[x]
}),map(c(5, 4, 3, 2, 1), function(x){
c("one", "two", "three", "four", "five")[x]
}),map(c(5, 4, 3, 2, 1), function(x){
c("one", "two", "three", "four", "five")[x]
}),map(c(5, 4, 3, 2, 1), function(x){
c("one", "two", "three", "four", "five")[x]
})),function(a,b,c,d){
paste(a,b,c,d,sep = ",")
})
#### reduce
reduce(1:5,function(x1,x2){
x1+x2
})
reduce(1:5,function(x1,x2){
message("x1 is",x1)
message("x2 is",x2)
x1+x2
})
#### Search: contains, detect, detect_index
x <- list(1:10, 5, 9.9)
x %>% contains(1:10)
x %>% contains(3)
x
detect(20:40, function(x){
x > 22 && x %% 2 == 0
})
detect_index(20:40, function(x){
x > 22 && x %% 2 == 0
})
#### Filter: keep, discard, every, some
keep(1:20, function(x){
x %% 2 == 0
})
discard(1:20, function(x){
x %% 2 == 0
})
every(1:20, function(x){
x %% 2 == 0
})
some(1:20, function(x){
x %% 2 == 0
})
#### Compose
n_unique <- compose(length, unique)
# The composition above is the same as:
# n_unique <- function(x){
# length(unique(x))
# }
rep(1:5, 1:5)
n_unique(rep(1:5, 1:5))
#### partial application
mult_three_n <- function(x, y, z){
x * y * z
}
mult_by_15 <- partial(mult_three_n, x = 3, y = 5)
mult_by_15(4)
#### Side effect: walk
walk(c("Friends, Romans, countrymen,",
"lend me your ears;",
"I come to bury Caesar,",
"not to praise him."), message)
#### recursion
Fibonacci_rec <- function(x){
if(x == 1){
0
}else if(x == 2){
1
}else{
Fibonacci_rec(x-1) + Fibonacci_rec(x-2)
}
}
map_dbl(1:12,Fibonacci_rec)
fib_num <- c(0,1,rep(NA,23))
Fibonacci_rec_mem <- function(x){
stopifnot(x >0)
if(!is.na(fib_num[x])){
fib_num[x]
} else{
fib_num[x-1] <<- Fibonacci_rec_mem(x-1)
fib_num[x-2] <<- Fibonacci_rec_mem(x-2)
fib_num[x-1] + fib_num[x-2]
}
}
map_dbl(1:12,Fibonacci_rec_mem)
####which one is faster?
t <- Sys.time()
Fibonacci_rec(30)
Sys.time() - t
t <- Sys.time()
Fibonacci_rec_mem(30)
Sys.time() - t
library(microbenchmark)
library(magrittr)
library(tidyr)
library(dplyr)
fib_data <- map(1:10, function(x){microbenchmark(Fibonacci_rec(x), times = 100)$time})
names(fib_data) <- paste0(letters[1:10], 1:10)
fib_data <- as.data.frame(fib_data)
fib_data %<>%
gather(num, time) %>%
group_by(num) %>%
summarise(med_time = median(time))
memo_data <- map(1:10, function(x){microbenchmark(Fibonacci_rec_mem(x))$time})
names(memo_data) <- paste0(letters[1:10], 1:10)
memo_data <- as.data.frame(memo_data)
memo_data %<>%
gather(num, time) %>%
group_by(num) %>%
summarise(med_time = median(time))
plot(1:10, fib_data$med_time, xlab = "Fibonacci Number", ylab = "Median Time (Nanoseconds)",
pch = 18, bty = "n", xaxt = "n", yaxt = "n")
axis(1, at = 1:10)
axis(2, at = seq(0, 350000, by = 50000))
points(1:10 + .1, memo_data$med_time, col = "blue", pch = 18)
legend(1, 100000, c("Not Memoized", "Memoized"), pch = 18,
col = c("black", "blue"), bty = "n", cex = 1, y.intersp = 1.5)
|
#Bulk best binomial bandit script
library(bandit)
experiment = list()
<? foreach ($this->subtests as $subtest) : ?>
trials <- c(<?= implode(',', $subtest['trials']) ?>)
successes <- c(<?= implode(',', $subtest['successes']) ?>)
subtest <- list(trials = trials, successes = successes)
experiment <- c(experiment, list(subtest))
<? endforeach; ?>
bbb <- function(successes, trials, alpha = 1, beta = 1) {
weights <- best_binomial_bandit_sim(successes, trials, alpha, beta)
return(weights)
}
weights = sapply(experiment, function(subtest) bbb(subtest$successes, subtest$trials, alpha = <?= $this->alpha ?>, beta = <?= $this->beta ?>)*10000)
write.table(t(weights), col.names = FALSE, row.names = FALSE)
| /strategies/bulk-bandit.r | no_license | jgivoni/strategy-test | R | false | false | 714 | r |
#Bulk best binomial bandit script
library(bandit)
experiment = list()
<? foreach ($this->subtests as $subtest) : ?>
trials <- c(<?= implode(',', $subtest['trials']) ?>)
successes <- c(<?= implode(',', $subtest['successes']) ?>)
subtest <- list(trials = trials, successes = successes)
experiment <- c(experiment, list(subtest))
<? endforeach; ?>
bbb <- function(successes, trials, alpha = 1, beta = 1) {
weights <- best_binomial_bandit_sim(successes, trials, alpha, beta)
return(weights)
}
weights = sapply(experiment, function(subtest) bbb(subtest$successes, subtest$trials, alpha = <?= $this->alpha ?>, beta = <?= $this->beta ?>)*10000)
write.table(t(weights), col.names = FALSE, row.names = FALSE)
|
#' Score monthly table
#'
#' @param model_alias_score : Model previously made to use to score (character)
#' @param date_to_score : Month to score (character)
#' @param model_type_score : product of model choseen to score
#' @param performance_calculation : use if exist a target to compare (logical)
#'
#' @return
#' @export
#'
#' @examples
score_mensual <- function(model_alias_score,
date_to_score,
model_type_score,
performance_calculation ) {
print(paste("Scoring", model_type_score, "of", model_alias_scoring,
"month", date_to_score))
ifelse(performance_calculation,
results_path <- os.path.join(results_path, "Performance"),
results_path <- os.path.join(results_path, "Prediction"))
dir.create(os.path.join(results_path, date_to_score))
results_alias_path <-
os.path.join(results_path, date_to_score ,model_alias_scoring)
dir.create(results_alias_path)
print("Upload master table")
master <- get.path(master_path, "master") %>% readRDS()
all_variables <- names(master)
lags <- names(master)[grepl("month_ago", names(master))]
lags_product <- lags[grepl(model_type_score, lags)]
last_owned <- names(master)[grepl("last.owned", names(master))]
last_owned_product <- last_owned[grepl(model_type_score, last_owned)]
selected_var <- all_variables[all_variables %!in% c(lags, last_owned)]
selected_var <- c(selected_var, lags_product, last_owned_product)
master <- master[, mget(selected_var)]
test_cut <-
as.Date(paste0(as.character(date_to_score), '01'), format = '%Y%m%d')
months_cut <- c(test_cut, floor_date(as.Date(test_cut) + months(2), "month"))
master <- master[periodo %in% months_cut]
print("Creating target variable")
var_target <- paste0("pr_", model_type_score)
target <-
master[, .(llave,
month.id = month.id - 2,
var_target_2monthsFurther = get(var_target))]
master <-
merge(master,
target,
by = c("llave", "month.id"),
all.x = TRUE)
master[, target := ifelse(var_target_2monthsFurther - get(var_target) > 0, 1, 0)]
master[, var_target_2monthsFurther := NULL]
rm(target)
gc()
# add in purchase frequency feature for each product
print("Load purchase frequencies")
purchase.frequencies <-
readRDS(get.path(feature_path, get_month(1)))
purchase.frequencies <- data.table(purchase.frequencies)
purchase_frequencies_products <-
names(purchase.frequencies)[grepl(model_type_modeling, names(purchase.frequencies))]
purchase.frequencies <-
purchase.frequencies[, mget(c(
"llave",
"month.id",
purchase_frequencies_products,
"num.transactions"
))]
master <-
merge(master,
purchase.frequencies,
by = c("month.id", "llave"),
all.x = TRUE)
master[is.na(master)] <- 0
rm(purchase.frequencies)
gc()
# create train and test tables
print("Creating score tables")
# converting cutting months
test_cut <-
as.Date(paste0(as.character(date_to_score), '01'), format = '%Y%m%d')
# divinding master table
test <- master[periodo == test_cut]
test[is.na(test)] <- 0
rm(master)
gc()
# Classifing variables into categories
# there's a bunch of features related to the products, and thus they have similar
# names. Separate them out to keep things straight
id_variables <-
c("llave", "periodo", "month.id", "month", "year", "target")
products_variables <- names(test)[grepl("pr_", names(test))]
products_variables <-
c(products_variables, "total_products", "num.transactions")
crm_vars <-
names(test)[names(test) %!in% c(id_variables, products_variables)]
categorical_cols <-
c(crm_vars[sapply(test[, mget(crm_vars)], is.factor)],
"month", "year")
categorical_cols <-
categorical_cols[categorical_cols %!in% c("bb_seg_comercial", "aa_cod_ocupacion")]
numeric_cols <-
c(crm_vars[!(sapply(test[, mget(crm_vars)], is.factor))],
products_variables,
c("bb_seg_comercial", "aa_cod_ocupacion"))
# one-hot encode the categorical features
ohe_test <- dummyVars( ~ ., data = test[, mget(categorical_cols)])
ohe_test <- predict(ohe_test, test[, mget(categorical_cols)])
ohe_cols <- colnames(ohe_test)
ohe_test <- as(data.matrix(ohe_test), "dgCMatrix")
# data to train and predict
test_dmatrix <-
cbind(ohe_test, data.matrix(test[, mget(numeric_cols)]))
rm( ohe_test)
gc()
# save model to binary local file
print("load model")
model_alias_path <-
os.path.join(models_path, model_alias_scoring)
model <- xgb.load(os.path.join(model_alias_path, paste0(model_alias_scoring, ".model")))
test[, pred := predict(model, test_dmatrix)]
if(performance_calculation){
test[,bucket := ntile(-pred, 10)]
setkey(test, bucket)
fwrite(test[, .(llave, periodo, target, pred, bucket)],
os.path.join(results_alias_path, "pred_score.csv"))
# metrics model
print("Making metrics model")
cols <- c(ohe_cols, numeric_cols)
performanceReport(test,
path = os.path.join(results_path, date_to_score),
modelFolder = model_alias_scoring,
alias = "score")
importance_matrix <-
xgb.importance(feature_names = cols, model = model)
no_quantil <-
c("llave", "month.id", "aa_cod_ciiu", "departamento")
quantil <- names(test)[names(test) %!in% no_quantil]
exportQuantile(
dt = test[, mget(quantil)],
mostImp = importance_matrix ,
outputPath = os.path.join(results_path, date_to_score,
model_alias_scoring, "quantile_score.csv")
)
}else{
test[,bucket := ntile(-pred, 10)]
setkey(test, bucket)
fwrite(test[, .(llave, periodo, pred, bucket)],
os.path.join(results_alias_path, "pred_score.csv"))
}
}
| /Scripts/scoring/score_mensual.R | no_license | DanielRZapataS/Recommendation_System_Retail_Banking | R | false | false | 6,023 | r |
#' Score monthly table
#'
#' @param model_alias_score : Model previously made to use to score (character)
#' @param date_to_score : Month to score (character)
#' @param model_type_score : product of model choseen to score
#' @param performance_calculation : use if exist a target to compare (logical)
#'
#' @return
#' @export
#'
#' @examples
score_mensual <- function(model_alias_score,
date_to_score,
model_type_score,
performance_calculation ) {
print(paste("Scoring", model_type_score, "of", model_alias_scoring,
"month", date_to_score))
ifelse(performance_calculation,
results_path <- os.path.join(results_path, "Performance"),
results_path <- os.path.join(results_path, "Prediction"))
dir.create(os.path.join(results_path, date_to_score))
results_alias_path <-
os.path.join(results_path, date_to_score ,model_alias_scoring)
dir.create(results_alias_path)
print("Upload master table")
master <- get.path(master_path, "master") %>% readRDS()
all_variables <- names(master)
lags <- names(master)[grepl("month_ago", names(master))]
lags_product <- lags[grepl(model_type_score, lags)]
last_owned <- names(master)[grepl("last.owned", names(master))]
last_owned_product <- last_owned[grepl(model_type_score, last_owned)]
selected_var <- all_variables[all_variables %!in% c(lags, last_owned)]
selected_var <- c(selected_var, lags_product, last_owned_product)
master <- master[, mget(selected_var)]
test_cut <-
as.Date(paste0(as.character(date_to_score), '01'), format = '%Y%m%d')
months_cut <- c(test_cut, floor_date(as.Date(test_cut) + months(2), "month"))
master <- master[periodo %in% months_cut]
print("Creating target variable")
var_target <- paste0("pr_", model_type_score)
target <-
master[, .(llave,
month.id = month.id - 2,
var_target_2monthsFurther = get(var_target))]
master <-
merge(master,
target,
by = c("llave", "month.id"),
all.x = TRUE)
master[, target := ifelse(var_target_2monthsFurther - get(var_target) > 0, 1, 0)]
master[, var_target_2monthsFurther := NULL]
rm(target)
gc()
# add in purchase frequency feature for each product
print("Load purchase frequencies")
purchase.frequencies <-
readRDS(get.path(feature_path, get_month(1)))
purchase.frequencies <- data.table(purchase.frequencies)
purchase_frequencies_products <-
names(purchase.frequencies)[grepl(model_type_modeling, names(purchase.frequencies))]
purchase.frequencies <-
purchase.frequencies[, mget(c(
"llave",
"month.id",
purchase_frequencies_products,
"num.transactions"
))]
master <-
merge(master,
purchase.frequencies,
by = c("month.id", "llave"),
all.x = TRUE)
master[is.na(master)] <- 0
rm(purchase.frequencies)
gc()
# create train and test tables
print("Creating score tables")
# converting cutting months
test_cut <-
as.Date(paste0(as.character(date_to_score), '01'), format = '%Y%m%d')
# divinding master table
test <- master[periodo == test_cut]
test[is.na(test)] <- 0
rm(master)
gc()
# Classifing variables into categories
# there's a bunch of features related to the products, and thus they have similar
# names. Separate them out to keep things straight
id_variables <-
c("llave", "periodo", "month.id", "month", "year", "target")
products_variables <- names(test)[grepl("pr_", names(test))]
products_variables <-
c(products_variables, "total_products", "num.transactions")
crm_vars <-
names(test)[names(test) %!in% c(id_variables, products_variables)]
categorical_cols <-
c(crm_vars[sapply(test[, mget(crm_vars)], is.factor)],
"month", "year")
categorical_cols <-
categorical_cols[categorical_cols %!in% c("bb_seg_comercial", "aa_cod_ocupacion")]
numeric_cols <-
c(crm_vars[!(sapply(test[, mget(crm_vars)], is.factor))],
products_variables,
c("bb_seg_comercial", "aa_cod_ocupacion"))
# one-hot encode the categorical features
ohe_test <- dummyVars( ~ ., data = test[, mget(categorical_cols)])
ohe_test <- predict(ohe_test, test[, mget(categorical_cols)])
ohe_cols <- colnames(ohe_test)
ohe_test <- as(data.matrix(ohe_test), "dgCMatrix")
# data to train and predict
test_dmatrix <-
cbind(ohe_test, data.matrix(test[, mget(numeric_cols)]))
rm( ohe_test)
gc()
# save model to binary local file
print("load model")
model_alias_path <-
os.path.join(models_path, model_alias_scoring)
model <- xgb.load(os.path.join(model_alias_path, paste0(model_alias_scoring, ".model")))
test[, pred := predict(model, test_dmatrix)]
if(performance_calculation){
test[,bucket := ntile(-pred, 10)]
setkey(test, bucket)
fwrite(test[, .(llave, periodo, target, pred, bucket)],
os.path.join(results_alias_path, "pred_score.csv"))
# metrics model
print("Making metrics model")
cols <- c(ohe_cols, numeric_cols)
performanceReport(test,
path = os.path.join(results_path, date_to_score),
modelFolder = model_alias_scoring,
alias = "score")
importance_matrix <-
xgb.importance(feature_names = cols, model = model)
no_quantil <-
c("llave", "month.id", "aa_cod_ciiu", "departamento")
quantil <- names(test)[names(test) %!in% no_quantil]
exportQuantile(
dt = test[, mget(quantil)],
mostImp = importance_matrix ,
outputPath = os.path.join(results_path, date_to_score,
model_alias_scoring, "quantile_score.csv")
)
}else{
test[,bucket := ntile(-pred, 10)]
setkey(test, bucket)
fwrite(test[, .(llave, periodo, pred, bucket)],
os.path.join(results_alias_path, "pred_score.csv"))
}
}
|
# Title : TODO
# Objective : TODO
# Created by: krlse
# Created on: 26/04/2021
# We may create vectors of class numeric or character with the concatenate function
codes <- c(380, 124, 818)
country <- c("italy", "canada", "egypt")
# We can also name the elements of a numeric vector
# Note that the two lines of code below have the same result
codes <- c(italy = 380, canada = 124, egypt = 818)
codes <- c("italy" = 380, "canada" = 124, "egypt" = 818)
# We can also name the elements of a numeric vector using the names() function
codes <- c(380, 124, 818)
country <- c("italy","canada","egypt")
names(codes) <- country
# Using square brackets is useful for subsetting to access specific elements of a vector
codes[2]
codes[c(1,3)]
codes[1:2]
# If the entries of a vector are named, they may be accessed by referring to their name
codes["canada"]
codes[c("egypt","italy")]
codes[c("egypt","brazil")]
codes
abb <- c("IT","CN","EG")
names(abb) <- country
str(codes)
x <- c(1, "canada", 3)
x
as.numeric(x)
| /aula_2_1.R | no_license | krlsedu/Data-Science-R-Basics | R | false | false | 1,018 | r | # Title : TODO
# Objective : TODO
# Created by: krlse
# Created on: 26/04/2021
# We may create vectors of class numeric or character with the concatenate function
codes <- c(380, 124, 818)
country <- c("italy", "canada", "egypt")
# We can also name the elements of a numeric vector
# Note that the two lines of code below have the same result
codes <- c(italy = 380, canada = 124, egypt = 818)
codes <- c("italy" = 380, "canada" = 124, "egypt" = 818)
# We can also name the elements of a numeric vector using the names() function
codes <- c(380, 124, 818)
country <- c("italy","canada","egypt")
names(codes) <- country
# Using square brackets is useful for subsetting to access specific elements of a vector
codes[2]
codes[c(1,3)]
codes[1:2]
# If the entries of a vector are named, they may be accessed by referring to their name
codes["canada"]
codes[c("egypt","italy")]
codes[c("egypt","brazil")]
codes
abb <- c("IT","CN","EG")
names(abb) <- country
str(codes)
x <- c(1, "canada", 3)
x
as.numeric(x)
|
#' Printing outputs of a CopulaCenR object
#' @name print.CopulaCenR
#' @aliases print.CopulaCenR
#' @param x a CopulaCenR object
#' @param ... further arguments
#' @importFrom stats printCoefmat
#' @export
print.CopulaCenR <- function(x,...) {
if (!is.null(x$m.dist)){
cat("Copula: ",x$copula,"\n")
cat("Margin: ",x$m.dist,"\n")
cat("\n")
if (dim(x$summary)[2] > 1){
printCoefmat(x$summary, P.values = T, has.Pvalue = T)
cat("(The Wald tests are testing whether each coefficient is 0)","\n")
} else {
printCoefmat(x$summary, P.values = F, has.Pvalue = F)
}
cat("\n")
cat("Final llk: ",x$llk,"\n")
if (x$code == 0) {cat("Convergence is completed successfully","\n")}
} else {
cat("Copula: ",x$copula,"\n")
cat("Margin: semiparametric","\n")
cat("\n")
if (dim(x$summary)[2] > 1){
printCoefmat(x$summary, P.values = T, has.Pvalue = T)
cat("(The Wald tests are testing whether each coefficient is 0)","\n")
} else {
printCoefmat(x$summary, P.values = F, has.Pvalue = F)
}
cat("\n")
cat("Final llk: ",x$llk,"\n")
if (x$code == 0) {cat("Convergence is completed successfully","\n")}
}
}
#' Summarizing outputs of a CopulaCenR object
#' @name summary.CopulaCenR
#' @aliases summary.CopulaCenR
#' @param object a CopulaCenR object
#' @param ... further arguments
#' @export
summary.CopulaCenR <- function(object,...) {
res <- list(copula=object$copula, m.dist=object$m.dist, summary=object$summary,
llk=object$llk, AIC=object$AIC, code=object$code)
class(res) <- "summary.CopulaCenR"
res
}
#' Print the summary of a CopulaCenR object
#' @name print.summary.CopulaCenR
#' @aliases print.summary.CopulaCenR
#' @param x a summary.CopulaCenR object
#' @param ... further arguments
#' @importFrom stats printCoefmat
#' @export
print.summary.CopulaCenR <- function(x,...) {
if (!is.null(x$m.dist)){
cat("Copula: ",x$copula,"\n")
cat("Margin: ",x$m.dist,"\n")
cat("\n")
if (dim(x$summary)[2] > 1){
printCoefmat(x$summary, P.values = T, has.Pvalue = T)
cat("(The Wald tests are testing whether each coefficient is 0)","\n")
} else {
printCoefmat(x$summary, P.values = F, has.Pvalue = F)
}
cat("\n")
cat("Final llk: ",x$llk,"\n")
if (x$code == 0) {cat("Convergence is completed successfully","\n")}
} else {
cat("Copula: ",x$copula,"\n")
cat("Margin: semiparametric","\n")
cat("\n")
if (dim(x$summary)[2] > 1){
printCoefmat(x$summary, P.values = T, has.Pvalue = T)
cat("(The Wald tests are testing whether each coefficient is 0)","\n")
} else {
printCoefmat(x$summary, P.values = F, has.Pvalue = F)
}
cat("\n")
cat("Final llk: ",x$llk,"\n")
if (x$code == 0) {cat("Convergence is completed successfully","\n")}
}
}
#' the coefficient estimates of a CopulaCenR object
#' @name coef.CopulaCenR
#' @aliases coef.CopulaCenR
#' @param object a CopulaCenR object
#' @param ... further arguments
#' @export
coef.CopulaCenR <- function(object,...) {
res = object$summary[,1]
res
}
#' the log-likelihood of a CopulaCenR object
#' @name logLik.CopulaCenR
#' @aliases logLik.CopulaCenR
#' @param object a CopulaCenR object
#' @param ... further arguments
#' @importFrom stats logLik
#' @export
logLik.CopulaCenR <- function(object,...) {
res <- object$llk
res
}
#' the AIC of a CopulaCenR object
#' @name AIC.CopulaCenR
#' @aliases AIC.CopulaCenR
#' @param object a CopulaCenR object
#' @param ... further arguments
#' @param k numeric, with k = 2 for AIC
#' @importFrom stats AIC
#' @export
AIC.CopulaCenR <- function(object, ..., k = 2) {
res <- object$AIC
return(res)
}
#' the BIC of a CopulaCenR object
#' @name BIC.CopulaCenR
#' @aliases BIC.CopulaCenR
#' @param object a CopulaCenR object
#' @param ... further arguments
#' @importFrom stats BIC
#' @export
BIC.CopulaCenR <- function(object, ...) {
# log(n)*k - 2*llk
n <- nrow(object$indata1)
k <- length(object$estimates)
res <- -2 * object$llk + log(n) * k
return(res)
}
| /R/fun_S3.R | no_license | cran/CopulaCenR | R | false | false | 4,103 | r | #' Printing outputs of a CopulaCenR object
#' @name print.CopulaCenR
#' @aliases print.CopulaCenR
#' @param x a CopulaCenR object
#' @param ... further arguments
#' @importFrom stats printCoefmat
#' @export
print.CopulaCenR <- function(x,...) {
if (!is.null(x$m.dist)){
cat("Copula: ",x$copula,"\n")
cat("Margin: ",x$m.dist,"\n")
cat("\n")
if (dim(x$summary)[2] > 1){
printCoefmat(x$summary, P.values = T, has.Pvalue = T)
cat("(The Wald tests are testing whether each coefficient is 0)","\n")
} else {
printCoefmat(x$summary, P.values = F, has.Pvalue = F)
}
cat("\n")
cat("Final llk: ",x$llk,"\n")
if (x$code == 0) {cat("Convergence is completed successfully","\n")}
} else {
cat("Copula: ",x$copula,"\n")
cat("Margin: semiparametric","\n")
cat("\n")
if (dim(x$summary)[2] > 1){
printCoefmat(x$summary, P.values = T, has.Pvalue = T)
cat("(The Wald tests are testing whether each coefficient is 0)","\n")
} else {
printCoefmat(x$summary, P.values = F, has.Pvalue = F)
}
cat("\n")
cat("Final llk: ",x$llk,"\n")
if (x$code == 0) {cat("Convergence is completed successfully","\n")}
}
}
#' Summarizing outputs of a CopulaCenR object
#' @name summary.CopulaCenR
#' @aliases summary.CopulaCenR
#' @param object a CopulaCenR object
#' @param ... further arguments
#' @export
summary.CopulaCenR <- function(object,...) {
res <- list(copula=object$copula, m.dist=object$m.dist, summary=object$summary,
llk=object$llk, AIC=object$AIC, code=object$code)
class(res) <- "summary.CopulaCenR"
res
}
#' Print the summary of a CopulaCenR object
#' @name print.summary.CopulaCenR
#' @aliases print.summary.CopulaCenR
#' @param x a summary.CopulaCenR object
#' @param ... further arguments
#' @importFrom stats printCoefmat
#' @export
print.summary.CopulaCenR <- function(x,...) {
if (!is.null(x$m.dist)){
cat("Copula: ",x$copula,"\n")
cat("Margin: ",x$m.dist,"\n")
cat("\n")
if (dim(x$summary)[2] > 1){
printCoefmat(x$summary, P.values = T, has.Pvalue = T)
cat("(The Wald tests are testing whether each coefficient is 0)","\n")
} else {
printCoefmat(x$summary, P.values = F, has.Pvalue = F)
}
cat("\n")
cat("Final llk: ",x$llk,"\n")
if (x$code == 0) {cat("Convergence is completed successfully","\n")}
} else {
cat("Copula: ",x$copula,"\n")
cat("Margin: semiparametric","\n")
cat("\n")
if (dim(x$summary)[2] > 1){
printCoefmat(x$summary, P.values = T, has.Pvalue = T)
cat("(The Wald tests are testing whether each coefficient is 0)","\n")
} else {
printCoefmat(x$summary, P.values = F, has.Pvalue = F)
}
cat("\n")
cat("Final llk: ",x$llk,"\n")
if (x$code == 0) {cat("Convergence is completed successfully","\n")}
}
}
#' the coefficient estimates of a CopulaCenR object
#' @name coef.CopulaCenR
#' @aliases coef.CopulaCenR
#' @param object a CopulaCenR object
#' @param ... further arguments
#' @export
coef.CopulaCenR <- function(object,...) {
res = object$summary[,1]
res
}
#' the log-likelihood of a CopulaCenR object
#' @name logLik.CopulaCenR
#' @aliases logLik.CopulaCenR
#' @param object a CopulaCenR object
#' @param ... further arguments
#' @importFrom stats logLik
#' @export
logLik.CopulaCenR <- function(object,...) {
res <- object$llk
res
}
#' the AIC of a CopulaCenR object
#' @name AIC.CopulaCenR
#' @aliases AIC.CopulaCenR
#' @param object a CopulaCenR object
#' @param ... further arguments
#' @param k numeric, with k = 2 for AIC
#' @importFrom stats AIC
#' @export
AIC.CopulaCenR <- function(object, ..., k = 2) {
res <- object$AIC
return(res)
}
#' the BIC of a CopulaCenR object
#' @name BIC.CopulaCenR
#' @aliases BIC.CopulaCenR
#' @param object a CopulaCenR object
#' @param ... further arguments
#' @importFrom stats BIC
#' @export
BIC.CopulaCenR <- function(object, ...) {
# log(n)*k - 2*llk
n <- nrow(object$indata1)
k <- length(object$estimates)
res <- -2 * object$llk + log(n) * k
return(res)
}
|
#!/usr/bin/Rscript
library(fpc)
library(data.table)
library(plyr)
analyse <- function(data, out, keycol) {
# Try numerous K's to discover natural groupings
for (k in c(2,3,4,5,6)) {
fit <- kmeans(data, k)
# Plot parallel coordinates of the centroids
pdf(paste0(out,"-",k,".pdf"), width=10, height=6)
parcoord(fit$centers, var.label=TRUE, lty=seq(1,k,1))
legend("right",bg="white", legend = seq(1,k,1), lty=seq(1,k,1))
dev.off()
# Append the group to the data for future reference
groups <- data.frame(fit$cluster)
colnames(groups)<-c('group')
# Commented out due to GitHub not liking large files
# foo <- cbind(data, groups, keycol)
# filename <- paste0(out,"-",k,".csv")
# write.csv(foo, file=filename)
# Print summaries of the groups
print(paste0("for K=", k))
for (j in seq(1,k,1)) {
print(paste0(" group ", j, " = ", length(groups[groups$group==j,]),"/", length(groups$group)," :=> ", (length(groups[groups$group==j,])/length(groups$group))))
}
}
}
# ID of experiment
exp <- "gte1000"
# Create a folder for our results
out <- paste0("../results/",exp,"/")
dir.create(out)
out <- paste0(out,exp)
# Redirect output
sink(paste0("../results/",exp,"/output.txt"))
# Read data and analyse
data <- read.csv("../data/cleaneddata.csv")
data <- data[!data$user=="",]
data <- data[data$total>=1000,] # Change at will
activity <- cbind( data$commits,
data$commit_comments,
data$issues,
data$issue_comments,
data$pull_requests,
data$pull_requests_comments
)
colnames(activity) <- c('commits',
'commit_comments',
'issues',
'issue_comments',
'pull_requests',
'pull_requests_comments')
analyse(activity, out, data$key)
| /scripts/clusterAnalysis.R | no_license | ossmeter/msr14-challenge | R | false | false | 1,730 | r | #!/usr/bin/Rscript
library(fpc)
library(data.table)
library(plyr)
analyse <- function(data, out, keycol) {
# Try numerous K's to discover natural groupings
for (k in c(2,3,4,5,6)) {
fit <- kmeans(data, k)
# Plot parallel coordinates of the centroids
pdf(paste0(out,"-",k,".pdf"), width=10, height=6)
parcoord(fit$centers, var.label=TRUE, lty=seq(1,k,1))
legend("right",bg="white", legend = seq(1,k,1), lty=seq(1,k,1))
dev.off()
# Append the group to the data for future reference
groups <- data.frame(fit$cluster)
colnames(groups)<-c('group')
# Commented out due to GitHub not liking large files
# foo <- cbind(data, groups, keycol)
# filename <- paste0(out,"-",k,".csv")
# write.csv(foo, file=filename)
# Print summaries of the groups
print(paste0("for K=", k))
for (j in seq(1,k,1)) {
print(paste0(" group ", j, " = ", length(groups[groups$group==j,]),"/", length(groups$group)," :=> ", (length(groups[groups$group==j,])/length(groups$group))))
}
}
}
# ID of experiment
exp <- "gte1000"
# Create a folder for our results
out <- paste0("../results/",exp,"/")
dir.create(out)
out <- paste0(out,exp)
# Redirect output
sink(paste0("../results/",exp,"/output.txt"))
# Read data and analyse
data <- read.csv("../data/cleaneddata.csv")
data <- data[!data$user=="",]
data <- data[data$total>=1000,] # Change at will
activity <- cbind( data$commits,
data$commit_comments,
data$issues,
data$issue_comments,
data$pull_requests,
data$pull_requests_comments
)
colnames(activity) <- c('commits',
'commit_comments',
'issues',
'issue_comments',
'pull_requests',
'pull_requests_comments')
analyse(activity, out, data$key)
|
library(kriens)
context("recursively composing functions")
test_that("if an empty list is passed the function errors", {
expect_error(path())
})
test_that("if NULL is passed the function errors", {
expect_error(path(NULL))
})
test_that("path(list(h, g, f)) = h %.% g %.% f", {
f <- function(x, ret) {
ret(x+1)
}
g <- function(x, ret) {
ret(x*2)
}
h <- function(x, ret){
ret(x) * ret(x)
}
r1 <- h %.% g %.% f
r2 <- path(list(h, g, f))
for(i in 1:100) {
expect_equal(r1(i, identity), r2(i, identity))
}
})
| /data/genthat_extracted_code/kriens/tests/test.path.R | no_license | surayaaramli/typeRrh | R | false | false | 548 | r | library(kriens)
context("recursively composing functions")
test_that("if an empty list is passed the function errors", {
expect_error(path())
})
test_that("if NULL is passed the function errors", {
expect_error(path(NULL))
})
test_that("path(list(h, g, f)) = h %.% g %.% f", {
f <- function(x, ret) {
ret(x+1)
}
g <- function(x, ret) {
ret(x*2)
}
h <- function(x, ret){
ret(x) * ret(x)
}
r1 <- h %.% g %.% f
r2 <- path(list(h, g, f))
for(i in 1:100) {
expect_equal(r1(i, identity), r2(i, identity))
}
})
|
#'-------------------------------------------------------------------------
#'-------------------------------------------------------------------------
#' 2018.06.11.
#'
#' Generate Tables 1-4 and Figure 1 for GOBACK manuscript.
#'-------------------------------------------------------------------------
#'-------------------------------------------------------------------------
# Table 1 -----------------------------------------------------------------
require(gmodels)
load('W:/Old_genepi2/Jeremy/GOBACK/Datasets/Old Datasets/goback.v20180611.rdata')
table(goback$state, goback$birth.yr, useNA = 'ifany')
CrossTable(goback$state, prop.chisq = FALSE)
CrossTable(goback$state, goback$any.birthdefect, prop.chisq = FALSE)
CrossTable(goback$state, goback$cancer, prop.chisq = FALSE)
CrossTable(goback$any.birthdefect, goback$cancer, prop.chisq = FALSE)
for (i in unique(goback$state)){
tmp <- filter(goback, state == i)
print(i)
CrossTable(tmp$cancer, tmp$any.birthdefect, prop.chisq = FALSE)
rm(i, tmp)
}
# Table 2 -----------------------------------------------------------------
setwd('Z:/Jeremy/GOBACK/Datasets/')
load('goback.v20180611.rdata')
#' Pare down the datasaet to help with performance.
goback <- goback[, c(1:16,107)]
#' Collapse plurality to singleton vs multiple.
goback$plu.cat <- factor(ifelse(goback$plu > 1, 1, 0),
levels = c(0,1),
labels = c('singleton','multiple'))
#' The simplified version of the table will compare children only by birth defects status.
for (i in c(3,4,17,18)){
print(names(goback[i]))
print(gmodels::CrossTable(goback[,i], goback$any.birthdefect, prop.t = FALSE, prop.chisq = FALSE, prop.r = FALSE, chisq = TRUE))
}
for (i in c(7,9,6)){
print(names(goback[i]))
print(aggregate(goback[,i] ~ goback$any.birthdefect, data = goback, mean))
print(aggregate(goback[,i] ~ goback$any.birthdefect, data = goback, sd))
print(t.test(goback[,i] ~ goback$any.birthdefect, data = goback, na.rm = TRUE))
}
rm(list = ls()); gc()
# Table 4: chromosomal and genetic conditions -----------------------------
require(survival)
setwd('Z:/Jeremy/GOBACK/Datasets/')
load('goback.chrom.v20180611.rdata')
goback.chrom <- goback.chrom[, c(1:21,95:156)]
#' Any anomaly, any cancer.
goback.surv <- data.frame(time = goback.chrom$person.yrs,
cancer = goback.chrom$cancer,
defect = goback.chrom$any.chromosomal.anomaly,
sex = factor(goback.chrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.chrom$m.age,
state = goback.chrom$state.num)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex + state, data = goback.surv)
cox.coef <- summary(cox)$coefficients
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = 'any.chromosomal.anomaly',
cancer = 'any.cancer',
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
num.comorbid = table(goback.chrom$any.chromosomal.anomaly, goback.chrom$cancer)[2,2])
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/goback.specific.cancer.by.any.chromosomal.defect.v20180611.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
#' Models for cancers except ALL, Wilms, hepatoblastoma in children with
#' chromosomal anomalies or single-gene syndromes.
for (j in c(40:52,54:57,59:78)){
for (i in 22:31){
tab <- table(goback.chrom[,i], goback.chrom[,j])[2,2]
if (tab >= 5){
goback.surv <- data.frame(time = goback.chrom$person.yrs,
cancer = goback.chrom[,j],
defect = goback.chrom[,i],
sex = factor(goback.chrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.chrom$m.age,
state = goback.chrom$state.num)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex + state, data = goback.surv)
cox.coef <- summary(cox)$coefficients
test.ph <- cox.zph(cox)
test.ph <- test.ph$table['defect','p']
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = names(goback.chrom[i]),
cancer = names(goback.chrom[j]),
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
p.value.zph = test.ph,
num.comorbid = tab)
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/goback.specific.cancer.by.any.chromosomal.defect.v20180611.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
else{
next
}
}
}
rm(cox.coef, estimates, test.ph, tab, i, j); gc()
#' Models for ALL, Wilms, hepatoblastoma, in children with
#' chromosomal anomalies or single gene syndromes.
for (j in c(39,53,58)){
for (i in 22:31){
tab <- table(goback.chrom[,i], goback.chrom[,j])[2,2]
if (tab >= 5){
goback.surv <- data.frame(time = goback.chrom$person.yrs,
cancer = goback.chrom[,j],
defect = goback.chrom[,i],
sex = factor(goback.chrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.chrom$m.age,
state = goback.chrom$state.num,
birth.wt = goback.chrom$birth.wt)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex + state + birth.wt, data = goback.surv)
cox.coef <- summary(cox)$coefficients
test.ph <- cox.zph(cox)
test.ph <- test.ph$table['defect','p']
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = names(goback.chrom[i]),
cancer = names(goback.chrom[j]),
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
p.value.zph = test.ph,
num.comorbid = tab)
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/goback.specific.cancer.by.any.chromosomal.defect.v20180611.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
else{
next
}
}
}
rm(list = ls()); gc()
# Table 4: Non-chromosomal defects ----------------------------------------
require(survival)
setwd('Z:/Jeremy/GOBACK/Datasets/')
load('goback.nochrom.v20180611.rdata')
#' Any anomaly, any cancer.
goback.surv <- data.frame(time = goback.nochrom$person.yrs,
cancer = goback.nochrom$cancer,
defect = goback.nochrom$any.birthdefect,
sex = factor(goback.nochrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.nochrom$m.age,
state = goback.nochrom$state.num)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex + state, data = goback.surv)
cox.coef <- summary(cox)$coefficients
test.ph <- cox.zph(cox)
test.ph <- test.ph$table['defect','p']
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = 'any.nonchromosomal.anomaly',
cancer = 'any.cancer',
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
p.value.zph = test.ph,
num.comorbid = table(goback.nochrom$any.birthdefect, goback.nochrom$cancer)[2,2])
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/goback.specific.cancer.by.any.nonchromosomal.defect.v20180611.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
goback.nochrom <- goback.nochrom[,c(1,156,3,6,7,9,15,16,112:151)]; gc()
#' Models for cancers except ALL, Wilms, hepatoblastoma in children with non-chromosomal structural birth defects.
for (j in c(10:22,24:27,29:48)){
tab <- table(goback.nochrom[,'any.birthdefect'], goback.nochrom[,j])[2,2]
if (tab >= 5){
goback.surv <- data.frame(time = goback.nochrom$person.yrs,
cancer = goback.nochrom[,j],
defect = goback.nochrom$any.birthdefect,
sex = factor(goback.nochrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.nochrom$m.age,
state = goback.nochrom$state.num)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex + state, data = goback.surv)
cox.coef <- summary(cox)$coefficients
test.ph <- cox.zph(cox)
test.ph <- test.ph$table['defect','p']
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = 'any.nonchromosomal.defect',
cancer = names(goback.nochrom[j]),
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
p.value.zph = test.ph,
num.comorbid = tab)
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/goback.specific.cancer.by.any.nonchromosomal.defect.v20180611.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
else{
next
}
}
rm(cox.coef, estimates, tab, test.ph, j); gc()
#' Models for ALL, Wilms and hepatoblastoma in children with non-chromosomal structural birth defects.
for (j in c(9,23,28)){
tab <- table(goback.nochrom[,'any.birthdefect'], goback.nochrom[,j])[2,2]
if (tab >= 5){
goback.surv <- data.frame(time = goback.nochrom$person.yrs,
cancer = goback.nochrom[,j],
defect = goback.nochrom$any.birthdefect,
sex = factor(goback.nochrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.nochrom$m.age,
state = goback.nochrom$state.num,
birth.wt = goback.nochrom$birth.wt)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex + state + birth.wt, data = goback.surv)
cox.coef <- summary(cox)$coefficients
test.ph <- cox.zph(cox)
test.ph <- test.ph$table['defect','p']
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = 'any.nonchromosomal.defect',
cancer = names(goback.nochrom[j]),
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
p.value.zph = test.ph,
num.comorbid = tab)
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/goback.specific.cancer.by.any.nonchromosomal.defect.v20180611.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
else{
next
}
}
rm(list = ls()); gc()
# Table 3: Chromosomal defects --------------------------------------------
require(survival)
setwd('Z:/Jeremy/GOBACK/Datasets/')
load('goback.chrom.v20180611.rdata')
for (i in 95:104){
tab <- table(goback.chrom[,i], goback.chrom$cancer)[2,2]
if (tab >= 5){
goback.surv <- data.frame(time = goback.chrom$person.yrs,
cancer = goback.chrom$cancer,
defect = goback.chrom[,i],
sex = factor(goback.chrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.chrom$m.age,
state = goback.chrom$state.num)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex + state, data = goback.surv)
cox.coef <- summary(cox)$coefficients
test.ph <- cox.zph(cox)
test.ph <- test.ph$table['defect','p']
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = names(goback.chrom[i]),
cancer = 'any.cancer',
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
p.value.zph = test.ph,
num.comorbid = tab)
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/goback.any.cancer.by.chromosomal.defect.v20180611.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
else{
next
}
}
rm(list = ls()); gc()
# Table 3: Non-chromosomal defects ----------------------------------------
require(survival)
setwd('Z:/Jeremy/GOBACK/Datasets/')
load('goback.nochrom.v20180611.rdata')
goback.nochrom <- goback.nochrom[,c(1,156,3,6,7,9,15,22:94,107)]; gc()
for (i in 8:80){
tab <- table(goback.nochrom[,i], goback.nochrom$cancer)[2,2]
if (tab >= 5){
goback.surv <- data.frame(time = goback.nochrom$person.yrs,
cancer = goback.nochrom$cancer,
defect = goback.nochrom[,i],
sex = factor(goback.nochrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.nochrom$m.age,
state = goback.nochrom$state.num)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex + state, data = goback.surv)
cox.coef <- summary(cox)$coefficients
test.ph <- cox.zph(cox)
test.ph <- test.ph$table['defect','p']
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = names(goback.nochrom[i]),
cancer = 'any.cancer',
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
p.value.zph = test.ph,
num.comorbid = tab)
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/goback.any.cancer.by.nonchromosomal.defect.v20180611.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
else{
next
}
}
rm(list = ls()); gc()
# Reformat output for convenience -----------------------------------------
#' Reformat tables 3, 4 and 5 to allow easy copy-paste of HRs and CIs into a Word document.
tablenames <- c('defect','cancer','hr','ci.lower','ci.upper','p.val.coef','p.val.zph','n.comorbid')
table3part1 <- read.csv(file='Z:/Jeremy/GOBACK/R outputs/goback.any.cancer.by.chromosomal.defect.v20180611.csv',
header = FALSE, stringsAsFactors = FALSE)
table3part2 <- read.csv(file='Z:/Jeremy/GOBACK/R outputs/goback.any.cancer.by.nonchromosomal.defect.v20180611.csv',
header = FALSE, stringsAsFactors = FALSE)
table3 <- rbind(table3part1, table3part2)
names(table3) <- tablenames
for (i in 3:5){
table3[,i] <- round(table3[,i], 1)
}
table3$hr.ci <- paste0(table3$hr, ' (',table3$ci.lower,'-',table3$ci.upper,')')
table3 <- table3[,c(1,8,9)]
write.csv(table3, file = 'Z:/Jeremy/GOBACK/Tables/table3.raw.v20180611.csv', row.names = FALSE)
table4part1 <- read.csv(file='Z:/Jeremy/GOBACK/R outputs/goback.specific.cancer.by.any.chromosomal.defect.v20180611.csv',
header = FALSE, stringsAsFactors = FALSE)
table4part2 <- read.csv(file='Z:/Jeremy/GOBACK/R outputs/goback.specific.cancer.by.any.nonchromosomal.defect.v20180611.csv',
header = FALSE, stringsAsFactors = FALSE)
table4 <- rbind(table4part1, table4part2)
names(table4) <- tablenames
for (i in 3:5){
table4[,i] <- round(table4[,i], 1)
}
table4$hr.ci <- paste0(table4$hr, ' (',table4$ci.lower,'-',table4$ci.upper,')')
table4 <- table4[,c(1,2,8,9)]
write.csv(table4, file = 'Z:/Jeremy/GOBACK/Tables/table4.raw.v20180611.csv', row.names = FALSE)
load(file = 'Z:/Jeremy/GOBACK/Datasets/Expanded datasets/goback.coxph.top.hits.v20180612.rdata')
table5 <- data.frame(defect = top.hits$defect, cancer = top.hits$cancer, n.comorbid = top.hits$n.comorbid,
hr.ci = paste0(round(top.hits$hr, 1),' (',round(top.hits$ci.lower, 1),'-',round(top.hits$ci.upper, 1),')'))
write.csv(table5, file = 'Z:/Jeremy/GOBACK/Tables/table5.raw.v20180611.csv', row.names = FALSE)
rm(list = ls()); gc()
# Figure 1: Body system defects and body system cancers -------------------
#'-------------------------------------------------------------------------
#'-------------------------------------------------------------------------
#' Philip wants the heatmap to be at the level of the body system defects
#' variables (e.g., 'any CNS anomaly') and body system level cancer (e.g.,
#' 'any CNS cancer'). This is a subset of the results already generated
#' by the Cox models (see the script 'Cox regression models - GOBACK data)
#' and can be astracted from them.
#'-------------------------------------------------------------------------
#'-------------------------------------------------------------------------
load("Z:/Jeremy/GOBACK/Datasets/Expanded datasets/goback.coxph.results.v20180612.rdata")
#' TODO: patterns may need to change; or alternatively, rename variables to comply with patterns.
pat1 <- 'conganomalies.'; pat2 <- 'chromosomalanomalies'
pat3 <- '.any'; pat4 <- '.other'
tmp <- goback.coxmodels[grepl(pat1, goback.coxmodels$defect) & grepl(pat3, goback.coxmodels$cancer), ]
tmp <- tmp[!grepl(pat4, tmp$defect), ]
tmp2 <- goback.coxmodels[grepl(pat2, goback.coxmodels$defect) & grepl(pat3, goback.coxmodels$cancer), ]
tmp2 <- tmp2[!grepl(pat4, tmp2$defect), ]
tmp <- rbind(tmp, tmp2); rm(tmp2,pat1, pat2, pat3, pat4); gc()
heatmap <- tmp
save(heatmap, file = 'Z:/Jeremy/GOBACK/Datasets/figure1.v20180126.1.rdata')
# Etable 5 ----------------------------------------------------------------
require(survival); require(dplyr)
#' Risk of any cancer among children with specific BDs.
#' Only Texas. Only cases DX'd 1 year of age or greater.
load('Z:/Jeremy/GOBACK/Datasets/goback.nochrom.v20180611.rdata')
goback.nochrom$exclude <- ifelse(goback.nochrom$cancer == 1 & goback.nochrom$person.yrs < 1, 1, 0)
goback.nochrom <- filter(filter(goback.nochrom, state == 'TX'),
exclude == 0)
goback.nochrom <- goback.nochrom[,c(1,156,3,6,7,9,15,16,112:151)]; gc()
for (i in 8:80){
tab <- table(goback.nochrom[,i], goback.nochrom$cancer)[2,2]
if (tab >= 1){
goback.surv <- data.frame(time = goback.nochrom$person.yrs,
cancer = goback.nochrom$cancer,
defect = goback.nochrom[,i],
sex = factor(goback.nochrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.nochrom$m.age)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex, data = goback.surv)
cox.coef <- summary(cox)$coefficients
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = names(goback.nochrom[i]),
cancer = 'any.cancer',
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
num.comorbid = tab)
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/etable5.v20180611.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
else{
next
}
}
rm(list = ls()); gc()
load('goback.chrom.v20180611.rdata')
goback.chrom$exclude <- ifelse(goback.chrom$cancer == 1 & goback.chrom$person.yrs < 1, 1, 0)
goback.chrom <- filter(filter(goback.chrom, state == 'TX'),
exclude == 0)
for (i in 95:101){
tab <- table(goback.chrom[,i], goback.chrom$cancer)[2,2]
if (tab >= 1){
goback.surv <- data.frame(time = goback.chrom$person.yrs,
cancer = goback.chrom$cancer,
defect = goback.chrom[,i],
sex = factor(goback.chrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.chrom$m.age)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex, data = goback.surv)
cox.coef <- summary(cox)$coefficients
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = names(goback.chrom[i]),
cancer = 'any.cancer',
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
num.comorbid = tab)
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/etable5.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
else{
next
}
}
rm(list = ls()); gc()
#' Let's load the output back in and cbind it to the original table 4.
setwd('Z:/Jeremy/GOBACK/R outputs/')
#' Have rounded HR and CI columns to 2 decimal places in Excel for convenience.
tab.sens <- read.csv(file = './eTable5.csv', header = TRUE, stringsAsFactors = FALSE)
tab.og.nochrom <- read.csv(file = './table 4 nochrom.csv', header = TRUE, stringsAsFactors = FALSE)
tab.og.chrom <- read.csv(file = './table 4 chrom.csv', header = TRUE, stringsAsFactors = FALSE)
tab.og <- select(rbind(tab.og.chrom, tab.og.nochrom), -p.val.zph); rm(tab.og.chrom, tab.og.nochrom)
etable5 <- rename(left_join(tab.og, tab.sens, by = c('defect','cancer')),
num.comorbid.sensitivity = n.comorbid)
etable5$hr <- with(etable5, paste0(hr.x,' (',ci.lower.x,'-',ci.upper.x,')'))
etable5$hr.sensitivity <- with(etable5, paste0(hr.y,' (',ci.lower.y,'-',ci.upper.y,')'))
etable5$change.flag <- ifelse((etable5$hr.x/etable5$hr.y <= 0.5 | etable5$hr.x/etable5$hr.y >= 1.5) |
(etable5$ci.lower.x > 1 & etable5$ci.lower.y < 1), 1, 0)
tmp <- select(etable5, defect, cancer, hr, hr.sensitivity, change.flag, num.comorbid, num.comorbid.sensitivity)
write.csv(tmp, file = 'Z:/Jeremy/GOBACK/R outputs/eTable5.sidebyside.csv', row.names = FALSE)
| /Tables - manuscript.R | permissive | schrawj/GOBACK | R | false | false | 24,968 | r | #'-------------------------------------------------------------------------
#'-------------------------------------------------------------------------
#' 2018.06.11.
#'
#' Generate Tables 1-4 and Figure 1 for GOBACK manuscript.
#'-------------------------------------------------------------------------
#'-------------------------------------------------------------------------
# Table 1 -----------------------------------------------------------------
require(gmodels)
load('W:/Old_genepi2/Jeremy/GOBACK/Datasets/Old Datasets/goback.v20180611.rdata')
table(goback$state, goback$birth.yr, useNA = 'ifany')
CrossTable(goback$state, prop.chisq = FALSE)
CrossTable(goback$state, goback$any.birthdefect, prop.chisq = FALSE)
CrossTable(goback$state, goback$cancer, prop.chisq = FALSE)
CrossTable(goback$any.birthdefect, goback$cancer, prop.chisq = FALSE)
for (i in unique(goback$state)){
tmp <- filter(goback, state == i)
print(i)
CrossTable(tmp$cancer, tmp$any.birthdefect, prop.chisq = FALSE)
rm(i, tmp)
}
# Table 2 -----------------------------------------------------------------
setwd('Z:/Jeremy/GOBACK/Datasets/')
load('goback.v20180611.rdata')
#' Pare down the datasaet to help with performance.
goback <- goback[, c(1:16,107)]
#' Collapse plurality to singleton vs multiple.
goback$plu.cat <- factor(ifelse(goback$plu > 1, 1, 0),
levels = c(0,1),
labels = c('singleton','multiple'))
#' The simplified version of the table will compare children only by birth defects status.
for (i in c(3,4,17,18)){
print(names(goback[i]))
print(gmodels::CrossTable(goback[,i], goback$any.birthdefect, prop.t = FALSE, prop.chisq = FALSE, prop.r = FALSE, chisq = TRUE))
}
for (i in c(7,9,6)){
print(names(goback[i]))
print(aggregate(goback[,i] ~ goback$any.birthdefect, data = goback, mean))
print(aggregate(goback[,i] ~ goback$any.birthdefect, data = goback, sd))
print(t.test(goback[,i] ~ goback$any.birthdefect, data = goback, na.rm = TRUE))
}
rm(list = ls()); gc()
# Table 4: chromosomal and genetic conditions -----------------------------
require(survival)
setwd('Z:/Jeremy/GOBACK/Datasets/')
load('goback.chrom.v20180611.rdata')
goback.chrom <- goback.chrom[, c(1:21,95:156)]
#' Any anomaly, any cancer.
goback.surv <- data.frame(time = goback.chrom$person.yrs,
cancer = goback.chrom$cancer,
defect = goback.chrom$any.chromosomal.anomaly,
sex = factor(goback.chrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.chrom$m.age,
state = goback.chrom$state.num)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex + state, data = goback.surv)
cox.coef <- summary(cox)$coefficients
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = 'any.chromosomal.anomaly',
cancer = 'any.cancer',
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
num.comorbid = table(goback.chrom$any.chromosomal.anomaly, goback.chrom$cancer)[2,2])
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/goback.specific.cancer.by.any.chromosomal.defect.v20180611.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
#' Models for cancers except ALL, Wilms, hepatoblastoma in children with
#' chromosomal anomalies or single-gene syndromes.
for (j in c(40:52,54:57,59:78)){
for (i in 22:31){
tab <- table(goback.chrom[,i], goback.chrom[,j])[2,2]
if (tab >= 5){
goback.surv <- data.frame(time = goback.chrom$person.yrs,
cancer = goback.chrom[,j],
defect = goback.chrom[,i],
sex = factor(goback.chrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.chrom$m.age,
state = goback.chrom$state.num)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex + state, data = goback.surv)
cox.coef <- summary(cox)$coefficients
test.ph <- cox.zph(cox)
test.ph <- test.ph$table['defect','p']
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = names(goback.chrom[i]),
cancer = names(goback.chrom[j]),
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
p.value.zph = test.ph,
num.comorbid = tab)
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/goback.specific.cancer.by.any.chromosomal.defect.v20180611.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
else{
next
}
}
}
rm(cox.coef, estimates, test.ph, tab, i, j); gc()
#' Models for ALL, Wilms, hepatoblastoma, in children with
#' chromosomal anomalies or single gene syndromes.
for (j in c(39,53,58)){
for (i in 22:31){
tab <- table(goback.chrom[,i], goback.chrom[,j])[2,2]
if (tab >= 5){
goback.surv <- data.frame(time = goback.chrom$person.yrs,
cancer = goback.chrom[,j],
defect = goback.chrom[,i],
sex = factor(goback.chrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.chrom$m.age,
state = goback.chrom$state.num,
birth.wt = goback.chrom$birth.wt)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex + state + birth.wt, data = goback.surv)
cox.coef <- summary(cox)$coefficients
test.ph <- cox.zph(cox)
test.ph <- test.ph$table['defect','p']
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = names(goback.chrom[i]),
cancer = names(goback.chrom[j]),
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
p.value.zph = test.ph,
num.comorbid = tab)
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/goback.specific.cancer.by.any.chromosomal.defect.v20180611.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
else{
next
}
}
}
rm(list = ls()); gc()
# Table 4: Non-chromosomal defects ----------------------------------------
require(survival)
setwd('Z:/Jeremy/GOBACK/Datasets/')
load('goback.nochrom.v20180611.rdata')
#' Any anomaly, any cancer.
goback.surv <- data.frame(time = goback.nochrom$person.yrs,
cancer = goback.nochrom$cancer,
defect = goback.nochrom$any.birthdefect,
sex = factor(goback.nochrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.nochrom$m.age,
state = goback.nochrom$state.num)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex + state, data = goback.surv)
cox.coef <- summary(cox)$coefficients
test.ph <- cox.zph(cox)
test.ph <- test.ph$table['defect','p']
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = 'any.nonchromosomal.anomaly',
cancer = 'any.cancer',
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
p.value.zph = test.ph,
num.comorbid = table(goback.nochrom$any.birthdefect, goback.nochrom$cancer)[2,2])
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/goback.specific.cancer.by.any.nonchromosomal.defect.v20180611.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
goback.nochrom <- goback.nochrom[,c(1,156,3,6,7,9,15,16,112:151)]; gc()
#' Models for cancers except ALL, Wilms, hepatoblastoma in children with non-chromosomal structural birth defects.
for (j in c(10:22,24:27,29:48)){
tab <- table(goback.nochrom[,'any.birthdefect'], goback.nochrom[,j])[2,2]
if (tab >= 5){
goback.surv <- data.frame(time = goback.nochrom$person.yrs,
cancer = goback.nochrom[,j],
defect = goback.nochrom$any.birthdefect,
sex = factor(goback.nochrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.nochrom$m.age,
state = goback.nochrom$state.num)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex + state, data = goback.surv)
cox.coef <- summary(cox)$coefficients
test.ph <- cox.zph(cox)
test.ph <- test.ph$table['defect','p']
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = 'any.nonchromosomal.defect',
cancer = names(goback.nochrom[j]),
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
p.value.zph = test.ph,
num.comorbid = tab)
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/goback.specific.cancer.by.any.nonchromosomal.defect.v20180611.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
else{
next
}
}
rm(cox.coef, estimates, tab, test.ph, j); gc()
#' Models for ALL, Wilms and hepatoblastoma in children with non-chromosomal structural birth defects.
for (j in c(9,23,28)){
tab <- table(goback.nochrom[,'any.birthdefect'], goback.nochrom[,j])[2,2]
if (tab >= 5){
goback.surv <- data.frame(time = goback.nochrom$person.yrs,
cancer = goback.nochrom[,j],
defect = goback.nochrom$any.birthdefect,
sex = factor(goback.nochrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.nochrom$m.age,
state = goback.nochrom$state.num,
birth.wt = goback.nochrom$birth.wt)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex + state + birth.wt, data = goback.surv)
cox.coef <- summary(cox)$coefficients
test.ph <- cox.zph(cox)
test.ph <- test.ph$table['defect','p']
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = 'any.nonchromosomal.defect',
cancer = names(goback.nochrom[j]),
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
p.value.zph = test.ph,
num.comorbid = tab)
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/goback.specific.cancer.by.any.nonchromosomal.defect.v20180611.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
else{
next
}
}
rm(list = ls()); gc()
# Table 3: Chromosomal defects --------------------------------------------
require(survival)
setwd('Z:/Jeremy/GOBACK/Datasets/')
load('goback.chrom.v20180611.rdata')
for (i in 95:104){
tab <- table(goback.chrom[,i], goback.chrom$cancer)[2,2]
if (tab >= 5){
goback.surv <- data.frame(time = goback.chrom$person.yrs,
cancer = goback.chrom$cancer,
defect = goback.chrom[,i],
sex = factor(goback.chrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.chrom$m.age,
state = goback.chrom$state.num)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex + state, data = goback.surv)
cox.coef <- summary(cox)$coefficients
test.ph <- cox.zph(cox)
test.ph <- test.ph$table['defect','p']
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = names(goback.chrom[i]),
cancer = 'any.cancer',
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
p.value.zph = test.ph,
num.comorbid = tab)
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/goback.any.cancer.by.chromosomal.defect.v20180611.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
else{
next
}
}
rm(list = ls()); gc()
# Table 3: Non-chromosomal defects ----------------------------------------
require(survival)
setwd('Z:/Jeremy/GOBACK/Datasets/')
load('goback.nochrom.v20180611.rdata')
goback.nochrom <- goback.nochrom[,c(1,156,3,6,7,9,15,22:94,107)]; gc()
for (i in 8:80){
tab <- table(goback.nochrom[,i], goback.nochrom$cancer)[2,2]
if (tab >= 5){
goback.surv <- data.frame(time = goback.nochrom$person.yrs,
cancer = goback.nochrom$cancer,
defect = goback.nochrom[,i],
sex = factor(goback.nochrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.nochrom$m.age,
state = goback.nochrom$state.num)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex + state, data = goback.surv)
cox.coef <- summary(cox)$coefficients
test.ph <- cox.zph(cox)
test.ph <- test.ph$table['defect','p']
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = names(goback.nochrom[i]),
cancer = 'any.cancer',
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
p.value.zph = test.ph,
num.comorbid = tab)
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/goback.any.cancer.by.nonchromosomal.defect.v20180611.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
else{
next
}
}
rm(list = ls()); gc()
# Reformat output for convenience -----------------------------------------
#' Reformat tables 3, 4 and 5 to allow easy copy-paste of HRs and CIs into a Word document.
tablenames <- c('defect','cancer','hr','ci.lower','ci.upper','p.val.coef','p.val.zph','n.comorbid')
table3part1 <- read.csv(file='Z:/Jeremy/GOBACK/R outputs/goback.any.cancer.by.chromosomal.defect.v20180611.csv',
header = FALSE, stringsAsFactors = FALSE)
table3part2 <- read.csv(file='Z:/Jeremy/GOBACK/R outputs/goback.any.cancer.by.nonchromosomal.defect.v20180611.csv',
header = FALSE, stringsAsFactors = FALSE)
table3 <- rbind(table3part1, table3part2)
names(table3) <- tablenames
for (i in 3:5){
table3[,i] <- round(table3[,i], 1)
}
table3$hr.ci <- paste0(table3$hr, ' (',table3$ci.lower,'-',table3$ci.upper,')')
table3 <- table3[,c(1,8,9)]
write.csv(table3, file = 'Z:/Jeremy/GOBACK/Tables/table3.raw.v20180611.csv', row.names = FALSE)
table4part1 <- read.csv(file='Z:/Jeremy/GOBACK/R outputs/goback.specific.cancer.by.any.chromosomal.defect.v20180611.csv',
header = FALSE, stringsAsFactors = FALSE)
table4part2 <- read.csv(file='Z:/Jeremy/GOBACK/R outputs/goback.specific.cancer.by.any.nonchromosomal.defect.v20180611.csv',
header = FALSE, stringsAsFactors = FALSE)
table4 <- rbind(table4part1, table4part2)
names(table4) <- tablenames
for (i in 3:5){
table4[,i] <- round(table4[,i], 1)
}
table4$hr.ci <- paste0(table4$hr, ' (',table4$ci.lower,'-',table4$ci.upper,')')
table4 <- table4[,c(1,2,8,9)]
write.csv(table4, file = 'Z:/Jeremy/GOBACK/Tables/table4.raw.v20180611.csv', row.names = FALSE)
load(file = 'Z:/Jeremy/GOBACK/Datasets/Expanded datasets/goback.coxph.top.hits.v20180612.rdata')
table5 <- data.frame(defect = top.hits$defect, cancer = top.hits$cancer, n.comorbid = top.hits$n.comorbid,
hr.ci = paste0(round(top.hits$hr, 1),' (',round(top.hits$ci.lower, 1),'-',round(top.hits$ci.upper, 1),')'))
write.csv(table5, file = 'Z:/Jeremy/GOBACK/Tables/table5.raw.v20180611.csv', row.names = FALSE)
rm(list = ls()); gc()
# Figure 1: Body system defects and body system cancers -------------------
#'-------------------------------------------------------------------------
#'-------------------------------------------------------------------------
#' Philip wants the heatmap to be at the level of the body system defects
#' variables (e.g., 'any CNS anomaly') and body system level cancer (e.g.,
#' 'any CNS cancer'). This is a subset of the results already generated
#' by the Cox models (see the script 'Cox regression models - GOBACK data)
#' and can be astracted from them.
#'-------------------------------------------------------------------------
#'-------------------------------------------------------------------------
load("Z:/Jeremy/GOBACK/Datasets/Expanded datasets/goback.coxph.results.v20180612.rdata")
#' TODO: patterns may need to change; or alternatively, rename variables to comply with patterns.
pat1 <- 'conganomalies.'; pat2 <- 'chromosomalanomalies'
pat3 <- '.any'; pat4 <- '.other'
tmp <- goback.coxmodels[grepl(pat1, goback.coxmodels$defect) & grepl(pat3, goback.coxmodels$cancer), ]
tmp <- tmp[!grepl(pat4, tmp$defect), ]
tmp2 <- goback.coxmodels[grepl(pat2, goback.coxmodels$defect) & grepl(pat3, goback.coxmodels$cancer), ]
tmp2 <- tmp2[!grepl(pat4, tmp2$defect), ]
tmp <- rbind(tmp, tmp2); rm(tmp2,pat1, pat2, pat3, pat4); gc()
heatmap <- tmp
save(heatmap, file = 'Z:/Jeremy/GOBACK/Datasets/figure1.v20180126.1.rdata')
# Etable 5 ----------------------------------------------------------------
require(survival); require(dplyr)
#' Risk of any cancer among children with specific BDs.
#' Only Texas. Only cases DX'd 1 year of age or greater.
load('Z:/Jeremy/GOBACK/Datasets/goback.nochrom.v20180611.rdata')
goback.nochrom$exclude <- ifelse(goback.nochrom$cancer == 1 & goback.nochrom$person.yrs < 1, 1, 0)
goback.nochrom <- filter(filter(goback.nochrom, state == 'TX'),
exclude == 0)
goback.nochrom <- goback.nochrom[,c(1,156,3,6,7,9,15,16,112:151)]; gc()
for (i in 8:80){
tab <- table(goback.nochrom[,i], goback.nochrom$cancer)[2,2]
if (tab >= 1){
goback.surv <- data.frame(time = goback.nochrom$person.yrs,
cancer = goback.nochrom$cancer,
defect = goback.nochrom[,i],
sex = factor(goback.nochrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.nochrom$m.age)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex, data = goback.surv)
cox.coef <- summary(cox)$coefficients
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = names(goback.nochrom[i]),
cancer = 'any.cancer',
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
num.comorbid = tab)
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/etable5.v20180611.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
else{
next
}
}
rm(list = ls()); gc()
load('goback.chrom.v20180611.rdata')
goback.chrom$exclude <- ifelse(goback.chrom$cancer == 1 & goback.chrom$person.yrs < 1, 1, 0)
goback.chrom <- filter(filter(goback.chrom, state == 'TX'),
exclude == 0)
for (i in 95:101){
tab <- table(goback.chrom[,i], goback.chrom$cancer)[2,2]
if (tab >= 1){
goback.surv <- data.frame(time = goback.chrom$person.yrs,
cancer = goback.chrom$cancer,
defect = goback.chrom[,i],
sex = factor(goback.chrom$sex,
levels = c(1,2),
labels = c('Male','Female')),
m.age = goback.chrom$m.age)
cox <- coxph(Surv(time, cancer) ~ defect + m.age + sex, data = goback.surv)
cox.coef <- summary(cox)$coefficients
rm(cox, goback.surv); gc()
estimates <- data.frame(defect = names(goback.chrom[i]),
cancer = 'any.cancer',
HR = exp(cox.coef[1,1]),
ci.lower = exp(cox.coef[1,1]-(1.96*cox.coef[1,3])),
ci.upper = exp(cox.coef[1,1]+(1.96*cox.coef[1,3])),
p.value.coef = cox.coef[1,5],
num.comorbid = tab)
write.table(estimates, file = 'Z:/Jeremy/GOBACK/R Outputs/etable5.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
else{
next
}
}
rm(list = ls()); gc()
#' Let's load the output back in and cbind it to the original table 4.
setwd('Z:/Jeremy/GOBACK/R outputs/')
#' Have rounded HR and CI columns to 2 decimal places in Excel for convenience.
tab.sens <- read.csv(file = './eTable5.csv', header = TRUE, stringsAsFactors = FALSE)
tab.og.nochrom <- read.csv(file = './table 4 nochrom.csv', header = TRUE, stringsAsFactors = FALSE)
tab.og.chrom <- read.csv(file = './table 4 chrom.csv', header = TRUE, stringsAsFactors = FALSE)
tab.og <- select(rbind(tab.og.chrom, tab.og.nochrom), -p.val.zph); rm(tab.og.chrom, tab.og.nochrom)
etable5 <- rename(left_join(tab.og, tab.sens, by = c('defect','cancer')),
num.comorbid.sensitivity = n.comorbid)
etable5$hr <- with(etable5, paste0(hr.x,' (',ci.lower.x,'-',ci.upper.x,')'))
etable5$hr.sensitivity <- with(etable5, paste0(hr.y,' (',ci.lower.y,'-',ci.upper.y,')'))
etable5$change.flag <- ifelse((etable5$hr.x/etable5$hr.y <= 0.5 | etable5$hr.x/etable5$hr.y >= 1.5) |
(etable5$ci.lower.x > 1 & etable5$ci.lower.y < 1), 1, 0)
tmp <- select(etable5, defect, cancer, hr, hr.sensitivity, change.flag, num.comorbid, num.comorbid.sensitivity)
write.csv(tmp, file = 'Z:/Jeremy/GOBACK/R outputs/eTable5.sidebyside.csv', row.names = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mzrtsim.R
\name{mzrtsim}
\alias{mzrtsim}
\title{Generate simulated count data with batch effects for npeaks}
\usage{
mzrtsim(npeaks = 1000, ncomp = 0.1, ncond = 2, ncpeaks = 0.1,
nbatch = 3, nbpeaks = 0.1, npercond = 10, nperbatch = c(8, 5, 7),
shape = 2, scale = 3, shapersd = 1, scalersd = 0.18,
batchtype = "mb", seed = 42)
}
\arguments{
\item{npeaks}{Number of peaks to simulate}
\item{ncomp}{percentage of compounds}
\item{ncond}{Number of conditions to simulate}
\item{ncpeaks}{percentage of peaks influenced by conditions}
\item{nbatch}{Number of batches to simulate}
\item{nbpeaks}{percentage of peaks influenced by batchs}
\item{npercond}{Number of samples per condition to simulate}
\item{nperbatch}{Number of samples per batch to simulate}
\item{shape}{shape for Weibull distribution of sample mean}
\item{scale}{scale for Weibull distribution of sample mean}
\item{shapersd}{shape for Weibull distribution of sample rsd}
\item{scalersd}{scale for Weibull distribution of sample rsd}
\item{batchtype}{type of batch. 'm' means monotonic, 'b' means block, 'r' means random error, 'mb' means mixed mode of monotonic and block, default 'mb'}
\item{seed}{Random seed for reproducibility}
}
\value{
list with rtmz data matrix, row index of peaks influenced by conditions, row index of peaks influenced by batchs, column index of conditions, column of batchs, raw condition matrix, raw batch matrix, peak mean across the samples, peak rsd across the samples
}
\description{
Generate simulated count data with batch effects for npeaks
}
\details{
the numbers of batch columns should be the same with the condition columns.
}
\examples{
sim <- mzrtsim()
}
\seealso{
\code{\link{simmzrt}}
}
| /man/mzrtsim.Rd | no_license | Feigeliudan01/mzrtsim | R | false | true | 1,789 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mzrtsim.R
\name{mzrtsim}
\alias{mzrtsim}
\title{Generate simulated count data with batch effects for npeaks}
\usage{
mzrtsim(npeaks = 1000, ncomp = 0.1, ncond = 2, ncpeaks = 0.1,
nbatch = 3, nbpeaks = 0.1, npercond = 10, nperbatch = c(8, 5, 7),
shape = 2, scale = 3, shapersd = 1, scalersd = 0.18,
batchtype = "mb", seed = 42)
}
\arguments{
\item{npeaks}{Number of peaks to simulate}
\item{ncomp}{percentage of compounds}
\item{ncond}{Number of conditions to simulate}
\item{ncpeaks}{percentage of peaks influenced by conditions}
\item{nbatch}{Number of batches to simulate}
\item{nbpeaks}{percentage of peaks influenced by batchs}
\item{npercond}{Number of samples per condition to simulate}
\item{nperbatch}{Number of samples per batch to simulate}
\item{shape}{shape for Weibull distribution of sample mean}
\item{scale}{scale for Weibull distribution of sample mean}
\item{shapersd}{shape for Weibull distribution of sample rsd}
\item{scalersd}{scale for Weibull distribution of sample rsd}
\item{batchtype}{type of batch. 'm' means monotonic, 'b' means block, 'r' means random error, 'mb' means mixed mode of monotonic and block, default 'mb'}
\item{seed}{Random seed for reproducibility}
}
\value{
list with rtmz data matrix, row index of peaks influenced by conditions, row index of peaks influenced by batchs, column index of conditions, column of batchs, raw condition matrix, raw batch matrix, peak mean across the samples, peak rsd across the samples
}
\description{
Generate simulated count data with batch effects for npeaks
}
\details{
the numbers of batch columns should be the same with the condition columns.
}
\examples{
sim <- mzrtsim()
}
\seealso{
\code{\link{simmzrt}}
}
|
inspect_project <- function(path = ".", write_reports = FALSE, outdir = ".") {
current_directory <- getwd()
setwd(path)
if (!file.exists("./0_metadata/project_overview.yaml")) {
setwd(current_directory)
stop("0_metadata/project_overview.yaml not found!")
}
meta <- read_yaml2("./0_metadata/project_overview.yaml")
if (is.null(meta$n_interviews_handcount)) meta$n_interviews_handcount <- NA
if (is.null(meta$interview_date_key)) meta$interview_date_key <- NA
############################
# initialize daemon report
out <- list(
daemon_report_date = Sys.time(),
project_name = meta$project_name,
principal_investigator = meta$principal_investigator,
interview_start_date = NA,
interview_end_date = NA,
n_interviews_handcount = as.numeric(meta$n_interviews_handcount),
transcribers = NA,
n_transcribers = 0,
reviewers = NA,
n_reviewers = 0,
n_commits = 0,
date_first_commit = NA,
date_last_commit = NA,
all_changes_committed = NA
)
############################
# project initialization checks
out$is_git_repo <- file.exists(".git") | file.exists("../.git")
out$has_gitignore <- file.exists(".gitignore") | file.exists("../.gitignore")
git_set_up <- out$is_git_repo & out$has_gitignore
out$has_metadata_folder <- file.exists("./0_metadata")
out$has_primary_sources_folder <- file.exists("./1_primary_sources")
has_folders <- out$has_metadata_folder & out$has_primary_sources_folder
out$project_structure_correct <- has_folders
if (out$project_structure_correct) {
out$has_template_yaml <- length(dir("./0_metadata", pattern = ".*template.*\\.yaml$")) > 0
out$has_template_pdf <- length(dir("./0_metadata", pattern = ".*template.*\\.pdf$")) > 0
############################
# save the commit history
parse_log_simple() %>% as.data.frame() -> commits
out$n_commits <- nrow(commits)
if (out$n_commits > 0) {
commits$timestamp <- as.POSIXlt(commits$date)
commits$date <- substr(commits$timestamp, 1, 10)
commits$project_name <- meta$project_name
commits$principal_investigator <- meta$principal_investigator
if (write_reports) write.csv(commits, file.path(outdir, "project_commits.csv"),
row.names = FALSE)
}
check_unstaged <- "if [[ `git status --porcelain` ]]; then echo \"TRUE\"; \
else echo \"FALSE\"; fi"
out$all_changes_committed <- !as.logical(system(check_unstaged, intern = TRUE))
############################
# catalogue all files created so far
pdfs <- list.files("./1_primary_sources/1_pdf", full.names = TRUE,
pattern = "*.pdf$", recursive = TRUE)
yamls_transcription1 <- c(
list.files("./1_primary_sources/2_transcription1/1_pdf/0_completed",
pattern = ".yaml", recursive = TRUE, full.names = TRUE),
list.files("./1_primary_sources/2_transcription1/2_yaml",
pattern = ".yaml", recursive = TRUE, full.names = TRUE)
)
yamls_transcription2 <- c(
list.files("./1_primary_sources/2_transcription2/1_pdf/0_completed",
pattern = ".yaml", recursive = TRUE, full.names = TRUE),
list.files("./1_primary_sources/2_transcription2/2_yaml",
pattern = ".yaml", recursive = TRUE, full.names = TRUE)
)
yamls_merged <- c(
list.files("./1_primary_sources/3_transcription_merged",
pattern = ".yaml", recursive = TRUE, full.names = TRUE)
)
csvs <- list.files("./3_relational_tables", pattern = "*\\.csv$", full.names = TRUE)
files <- c(pdfs, yamls_transcription1, yamls_transcription2, yamls_merged, csvs)
if (length(files) > 0) {
project_files <- file.info(files)
project_files <- project_files[order(project_files$ctime), ]
project_files$full_filename <- rownames(project_files)
project_files$filename <- basename(project_files$full_filename)
project_files$dirname <- dirname(project_files$full_filename)
project_files$extension <- tools::file_ext(project_files$filename)
project_files$n_lines <- NA
project_files$is_plaintext <- tolower(project_files$extension) %in% c("yaml", "r", "csv", "txt")
project_files$full_filename[project_files$is_plaintext] %>%
map(R.utils::countLines) %>% as.numeric() -> project_files$n_lines[project_files$is_plaintext]
project_files$osx_creation_date <- NA
for (i in 1:nrow(project_files)) {
call <- paste0("GetFileInfo ", project_files$full_filename[i], " | grep 'created'")
try(project_files$osx_creation_date[i] <- system(call, intern = TRUE))
}
project_files$is_pdf <- tolower(project_files$extension) %in% c("pdf")
project_files$osx_creation_date <- gsub("created: ", "", project_files$osx_creation_date)
project_files$osx_creation_date <- strptime(project_files$osx_creation_date, "%m/%d/%Y %H:%M:%S")
project_files$project_name <- meta$project_name
project_files$principal_investigator <- meta$principal_investigator
project_files <- select(project_files, project_name, principal_investigator, filename,
dirname, extension, n_lines, full_filename, size, ctime, osx_creation_date)
if (write_reports) write.csv(project_files,
file.path(outdir, "project_files.csv"), row.names = FALSE)
if (out$n_commits > 0) {
out$date_first_commit <- as.character(as.Date(min(commits$timestamp)))
out$date_last_commit <- as.character(as.Date(max(commits$timestamp)))
}
}
############################
# track transcription progress
pdfs <- list.files("./1_primary_sources/1_pdf", full.names = TRUE,
pattern = "*.pdf$|*.PDF$", recursive = TRUE)
pdf_hashes <- pdfs %>% basename %>% substr(1, 7)
out$n_interviews_scanned <- length(pdfs)
out$n_interviews_unscanned <- out$n_interviews_handcount - out$n_interviews_scanned
out$scanning_complete <- FALSE
if (!is.na(out$n_interviews_unscanned)) out$scanning_complete <- out$n_interviews_handcount == out$n_interviews_scanned
# inspect completed yamls in 2_transcription1
out$n_transcription1_transcribed <- 0
out$transcription1_yamls_named_correctly <- NA
out$transcription1_yamls_valid <- NA
out$transcription1_complete <- FALSE
yamls1 <- c(
list.files("./1_primary_sources/2_transcription1/1_pdf/0_completed",
pattern = ".yaml", recursive = TRUE, full.names = TRUE),
list.files("./1_primary_sources/2_transcription1/2_yaml",
pattern = ".yaml", recursive = TRUE, full.names = TRUE)
)
out$n_transcription1_transcribed <- length(unique(basename(yamls1)))
if (out$n_transcription1_transcribed > 0) {
files <- yamls1
files %>% basename() %>% substr(1, 7) -> yaml_filename_hashes # change this to be arbitrary length
loads <- rep(NA, length(files))
transcriber_ok <- rep(NA, length(files))
reviewer_ok <- rep(NA, length(files))
stamp_ok <- rep(NA, length(files))
for (i in 1:length(files)) {
loads[i] <- yaml_loads(files[i])
if (loads[i]) {
data <- read_yaml2(files[i])
transcriber_ok[i] <- !bad_transcriber(data$transcriber)
if ("stamp_num" %in% names(data)) {
stamp_ok[i] <- !bad_stamp(data$stamp_num)
}
}
}
if (any(!loads, na.rm = TRUE)) {
print(paste("invalid yamls:", files[!loads]))
}
if (any(!transcriber_ok, na.rm = TRUE)) {
print(paste("missing transcriber information:", files[!transcriber_ok]))
}
if (any(!stamp_ok, na.rm = TRUE)) {
print(paste("invalid stamp number:", files[!stamp_ok]))
}
out$transcription1_yamls_named_correctly <- all(yaml_filename_hashes %in% pdf_hashes)
out$transcription1_yamls_valid <- out$transcription1_yamls_named_correctly &
all(loads) & all(transcriber_ok)
out$transcription1_complete <- all(pdf_hashes %in% yaml_filename_hashes) &
out$transcription1_yamls_valid & out$scanning_complete
# silke wants the transcriber names for each transcription job separated here!
}
# can the yaml be transformed to a json file, and if so, DOES THAT JSON LOAD PROPERLY
# inspect completed yamls in 2_transcription2
out$n_transcription2_transcribed <- 0
out$transcription2_yamls_named_correctly <- NA
out$transcription2_yamls_valid <- NA
out$transcription2_complete <- FALSE
yamls2 <- c(
list.files("./1_primary_sources/2_transcription2/1_pdf/0_completed",
pattern = ".yaml", recursive = TRUE, full.names = TRUE),
list.files("./1_primary_sources/2_transcription2/2_yaml",
pattern = ".yaml", recursive = TRUE, full.names = TRUE)
)
out$double_transcription_started <- length(yamls2) > 0
out$n_transcription2_transcribed <- length(unique(basename(yamls2)))
if (out$double_transcription_started) {
files <- yamls2
files %>% basename() %>% substr(1, 7) -> yaml_filename_hashes
loads <- rep(NA, length(files))
transcriber_ok <- rep(NA, length(files))
reviewer_ok <- rep(NA, length(files))
stamp_ok <- rep(NA, length(files))
for (i in 1:length(files)) {
loads[i] <- yaml_loads(files[i])
if (loads[i]) {
data <- read_yaml2(files[i])
transcriber_ok[i] <- !bad_transcriber(data$transcriber)
if ("stamp_num" %in% names(data)) {
stamp_ok[i] <- !bad_stamp(data$stamp_num)
}
}
}
if (any(!loads, na.rm = TRUE)) {
print(paste("invalid yamls:", files[!loads]))
}
if (any(!transcriber_ok, na.rm = TRUE)) {
print(paste("missing transcriber information:", files[!transcriber_ok]))
}
if (any(!stamp_ok, na.rm = TRUE)) {
print(paste("invalid stamp number:", files[!stamp_ok]))
}
out$transcription2_yamls_named_correctly <- all(yaml_filename_hashes %in% pdf_hashes)
out$transcription2_yamls_valid <- out$transcription2_yamls_named_correctly &
all(loads) & all(transcriber_ok)
out$transcription2_complete <- all(pdf_hashes %in% yaml_filename_hashes) &
out$transcription2_yamls_valid & out$scanning_complete
}
# inspect completed yamls in 3_transcription_merged
out$n_transcription_merged <- 0
out$merged_yamls_named_correctly <- NA
out$merged_yamls_valid <- NA
out$transcription_merged_complete <- FALSE
yamls_merged <- list.files("./1_primary_sources/3_transcription_merged/2_yaml",
pattern = ".yaml", recursive = TRUE, full.names = TRUE)
out$n_transcription_merged <- length(unique(basename(yamls_merged)))
if (out$n_transcription_merged > 0) {
files <- yamls_merged
files %>% basename() %>% substr(1, 7) -> yaml_filename_hashes
loads <- rep(NA, length(files))
transcriber_ok <- rep(NA, length(files))
reviewer_ok <- rep(NA, length(files))
stamp_ok <- rep(NA, length(files))
for (i in 1:length(files)) {
loads[i] <- yaml_loads(files[i])
if (loads[i]) {
data <- read_yaml2(files[i])
transcriber_ok[i] <- !bad_transcriber(data$transcriber)
if ("stamp_num" %in% names(data)) {
stamp_ok[i] <- !bad_stamp(data$stamp_num)
}
# test that the reviewer info is present
}
}
if (any(!loads, na.rm = TRUE)) {
print(paste("invalid yamls:", files[!loads]))
}
if (any(!transcriber_ok, na.rm = TRUE)) {
print(paste("missing transcriber information:", files[!transcriber_ok]))
}
if (any(!stamp_ok, na.rm = TRUE)) {
print(paste("invalid stamp number:", files[!stamp_ok]))
}
out$merged_yamls_named_correctly <- all(yaml_filename_hashes %in% pdf_hashes)
out$merged_yamls_valid <- out$merged_yamls_named_correctly & all(loads) & all(transcriber_ok)
out$transcription_merged_complete <- all(pdf_hashes %in% yaml_filename_hashes) &
out$merged_yamls_valid
}
# make it switch if EITHER yamls in transcription2 OR transcription_merged
# is_double_transcription
if (out$double_transcription_started) {
out$transcription_complete <- out$transcription_merged_complete
} else {
out$transcription_complete <- out$transcription1_complete
}
############################
# extract interviews.csv data
out$has_scrape_yamls <- "scrape_yamls.r" %in% dir("1_primary_sources/2_transcription1")
out$has_relational_tables <- file.exists("./3_relational_tables/interviews.csv")
if (out$has_relational_tables) {
ints <- read.csv("./3_relational_tables/interviews.csv", stringsAsFactors = FALSE)
if (is.na(meta$interview_date_key) | !meta$interview_date_key %in% colnames(ints)) {
print("interview date variable not found")
} else {
out$interview_start_date = sort(as.character(ints[[meta$interview_date_key]]))[1]
out$interview_end_date = rev(sort(as.character(ints[[meta$interview_date_key]])))[1]
}
transcribers <- sort(unique(unlist(strsplit(ints$transcriber, ", "))))
out$transcribers <- paste(transcribers, collapse = ", ")
out$n_transcribers = length(transcribers)
if (length(ints$reviewer) > 0) {
ints$reviewer <- as.character(ints$reviewer)
reviewers <- sort(unique(unlist(strsplit(ints$reviewer, ", "))))
out$reviewers <- paste(reviewers, collapse = ", ")
out$n_reviewers = length(reviewers)
}
}
}
############################
# check relational integrity of tables
# this has to be customized to the project I suppose...
# out$has_relational_integrity <- FALSE
############################
# report findings
# some kind of global completion check?
if (write_reports) write_json(out, file.path(outdir, "project_report.json"), pretty = TRUE)
setwd(current_directory)
return(out)
print(paste(meta$project_name, "inspected!"))
} | /R/inspect_project.r | no_license | babeheim/ecodata | R | false | false | 13,985 | r |
inspect_project <- function(path = ".", write_reports = FALSE, outdir = ".") {
current_directory <- getwd()
setwd(path)
if (!file.exists("./0_metadata/project_overview.yaml")) {
setwd(current_directory)
stop("0_metadata/project_overview.yaml not found!")
}
meta <- read_yaml2("./0_metadata/project_overview.yaml")
if (is.null(meta$n_interviews_handcount)) meta$n_interviews_handcount <- NA
if (is.null(meta$interview_date_key)) meta$interview_date_key <- NA
############################
# initialize daemon report
out <- list(
daemon_report_date = Sys.time(),
project_name = meta$project_name,
principal_investigator = meta$principal_investigator,
interview_start_date = NA,
interview_end_date = NA,
n_interviews_handcount = as.numeric(meta$n_interviews_handcount),
transcribers = NA,
n_transcribers = 0,
reviewers = NA,
n_reviewers = 0,
n_commits = 0,
date_first_commit = NA,
date_last_commit = NA,
all_changes_committed = NA
)
############################
# project initialization checks
out$is_git_repo <- file.exists(".git") | file.exists("../.git")
out$has_gitignore <- file.exists(".gitignore") | file.exists("../.gitignore")
git_set_up <- out$is_git_repo & out$has_gitignore
out$has_metadata_folder <- file.exists("./0_metadata")
out$has_primary_sources_folder <- file.exists("./1_primary_sources")
has_folders <- out$has_metadata_folder & out$has_primary_sources_folder
out$project_structure_correct <- has_folders
if (out$project_structure_correct) {
out$has_template_yaml <- length(dir("./0_metadata", pattern = ".*template.*\\.yaml$")) > 0
out$has_template_pdf <- length(dir("./0_metadata", pattern = ".*template.*\\.pdf$")) > 0
############################
# save the commit history
parse_log_simple() %>% as.data.frame() -> commits
out$n_commits <- nrow(commits)
if (out$n_commits > 0) {
commits$timestamp <- as.POSIXlt(commits$date)
commits$date <- substr(commits$timestamp, 1, 10)
commits$project_name <- meta$project_name
commits$principal_investigator <- meta$principal_investigator
if (write_reports) write.csv(commits, file.path(outdir, "project_commits.csv"),
row.names = FALSE)
}
check_unstaged <- "if [[ `git status --porcelain` ]]; then echo \"TRUE\"; \
else echo \"FALSE\"; fi"
out$all_changes_committed <- !as.logical(system(check_unstaged, intern = TRUE))
############################
# catalogue all files created so far
pdfs <- list.files("./1_primary_sources/1_pdf", full.names = TRUE,
pattern = "*.pdf$", recursive = TRUE)
yamls_transcription1 <- c(
list.files("./1_primary_sources/2_transcription1/1_pdf/0_completed",
pattern = ".yaml", recursive = TRUE, full.names = TRUE),
list.files("./1_primary_sources/2_transcription1/2_yaml",
pattern = ".yaml", recursive = TRUE, full.names = TRUE)
)
yamls_transcription2 <- c(
list.files("./1_primary_sources/2_transcription2/1_pdf/0_completed",
pattern = ".yaml", recursive = TRUE, full.names = TRUE),
list.files("./1_primary_sources/2_transcription2/2_yaml",
pattern = ".yaml", recursive = TRUE, full.names = TRUE)
)
yamls_merged <- c(
list.files("./1_primary_sources/3_transcription_merged",
pattern = ".yaml", recursive = TRUE, full.names = TRUE)
)
csvs <- list.files("./3_relational_tables", pattern = "*\\.csv$", full.names = TRUE)
files <- c(pdfs, yamls_transcription1, yamls_transcription2, yamls_merged, csvs)
if (length(files) > 0) {
project_files <- file.info(files)
project_files <- project_files[order(project_files$ctime), ]
project_files$full_filename <- rownames(project_files)
project_files$filename <- basename(project_files$full_filename)
project_files$dirname <- dirname(project_files$full_filename)
project_files$extension <- tools::file_ext(project_files$filename)
project_files$n_lines <- NA
project_files$is_plaintext <- tolower(project_files$extension) %in% c("yaml", "r", "csv", "txt")
project_files$full_filename[project_files$is_plaintext] %>%
map(R.utils::countLines) %>% as.numeric() -> project_files$n_lines[project_files$is_plaintext]
project_files$osx_creation_date <- NA
for (i in 1:nrow(project_files)) {
call <- paste0("GetFileInfo ", project_files$full_filename[i], " | grep 'created'")
try(project_files$osx_creation_date[i] <- system(call, intern = TRUE))
}
project_files$is_pdf <- tolower(project_files$extension) %in% c("pdf")
project_files$osx_creation_date <- gsub("created: ", "", project_files$osx_creation_date)
project_files$osx_creation_date <- strptime(project_files$osx_creation_date, "%m/%d/%Y %H:%M:%S")
project_files$project_name <- meta$project_name
project_files$principal_investigator <- meta$principal_investigator
project_files <- select(project_files, project_name, principal_investigator, filename,
dirname, extension, n_lines, full_filename, size, ctime, osx_creation_date)
if (write_reports) write.csv(project_files,
file.path(outdir, "project_files.csv"), row.names = FALSE)
if (out$n_commits > 0) {
out$date_first_commit <- as.character(as.Date(min(commits$timestamp)))
out$date_last_commit <- as.character(as.Date(max(commits$timestamp)))
}
}
############################
# track transcription progress
pdfs <- list.files("./1_primary_sources/1_pdf", full.names = TRUE,
pattern = "*.pdf$|*.PDF$", recursive = TRUE)
pdf_hashes <- pdfs %>% basename %>% substr(1, 7)
out$n_interviews_scanned <- length(pdfs)
out$n_interviews_unscanned <- out$n_interviews_handcount - out$n_interviews_scanned
out$scanning_complete <- FALSE
if (!is.na(out$n_interviews_unscanned)) out$scanning_complete <- out$n_interviews_handcount == out$n_interviews_scanned
# inspect completed yamls in 2_transcription1
out$n_transcription1_transcribed <- 0
out$transcription1_yamls_named_correctly <- NA
out$transcription1_yamls_valid <- NA
out$transcription1_complete <- FALSE
yamls1 <- c(
list.files("./1_primary_sources/2_transcription1/1_pdf/0_completed",
pattern = ".yaml", recursive = TRUE, full.names = TRUE),
list.files("./1_primary_sources/2_transcription1/2_yaml",
pattern = ".yaml", recursive = TRUE, full.names = TRUE)
)
out$n_transcription1_transcribed <- length(unique(basename(yamls1)))
if (out$n_transcription1_transcribed > 0) {
files <- yamls1
files %>% basename() %>% substr(1, 7) -> yaml_filename_hashes # change this to be arbitrary length
loads <- rep(NA, length(files))
transcriber_ok <- rep(NA, length(files))
reviewer_ok <- rep(NA, length(files))
stamp_ok <- rep(NA, length(files))
for (i in 1:length(files)) {
loads[i] <- yaml_loads(files[i])
if (loads[i]) {
data <- read_yaml2(files[i])
transcriber_ok[i] <- !bad_transcriber(data$transcriber)
if ("stamp_num" %in% names(data)) {
stamp_ok[i] <- !bad_stamp(data$stamp_num)
}
}
}
if (any(!loads, na.rm = TRUE)) {
print(paste("invalid yamls:", files[!loads]))
}
if (any(!transcriber_ok, na.rm = TRUE)) {
print(paste("missing transcriber information:", files[!transcriber_ok]))
}
if (any(!stamp_ok, na.rm = TRUE)) {
print(paste("invalid stamp number:", files[!stamp_ok]))
}
out$transcription1_yamls_named_correctly <- all(yaml_filename_hashes %in% pdf_hashes)
out$transcription1_yamls_valid <- out$transcription1_yamls_named_correctly &
all(loads) & all(transcriber_ok)
out$transcription1_complete <- all(pdf_hashes %in% yaml_filename_hashes) &
out$transcription1_yamls_valid & out$scanning_complete
# silke wants the transcriber names for each transcription job separated here!
}
# can the yaml be transformed to a json file, and if so, DOES THAT JSON LOAD PROPERLY
# inspect completed yamls in 2_transcription2
out$n_transcription2_transcribed <- 0
out$transcription2_yamls_named_correctly <- NA
out$transcription2_yamls_valid <- NA
out$transcription2_complete <- FALSE
yamls2 <- c(
list.files("./1_primary_sources/2_transcription2/1_pdf/0_completed",
pattern = ".yaml", recursive = TRUE, full.names = TRUE),
list.files("./1_primary_sources/2_transcription2/2_yaml",
pattern = ".yaml", recursive = TRUE, full.names = TRUE)
)
out$double_transcription_started <- length(yamls2) > 0
out$n_transcription2_transcribed <- length(unique(basename(yamls2)))
if (out$double_transcription_started) {
files <- yamls2
files %>% basename() %>% substr(1, 7) -> yaml_filename_hashes
loads <- rep(NA, length(files))
transcriber_ok <- rep(NA, length(files))
reviewer_ok <- rep(NA, length(files))
stamp_ok <- rep(NA, length(files))
for (i in 1:length(files)) {
loads[i] <- yaml_loads(files[i])
if (loads[i]) {
data <- read_yaml2(files[i])
transcriber_ok[i] <- !bad_transcriber(data$transcriber)
if ("stamp_num" %in% names(data)) {
stamp_ok[i] <- !bad_stamp(data$stamp_num)
}
}
}
if (any(!loads, na.rm = TRUE)) {
print(paste("invalid yamls:", files[!loads]))
}
if (any(!transcriber_ok, na.rm = TRUE)) {
print(paste("missing transcriber information:", files[!transcriber_ok]))
}
if (any(!stamp_ok, na.rm = TRUE)) {
print(paste("invalid stamp number:", files[!stamp_ok]))
}
out$transcription2_yamls_named_correctly <- all(yaml_filename_hashes %in% pdf_hashes)
out$transcription2_yamls_valid <- out$transcription2_yamls_named_correctly &
all(loads) & all(transcriber_ok)
out$transcription2_complete <- all(pdf_hashes %in% yaml_filename_hashes) &
out$transcription2_yamls_valid & out$scanning_complete
}
# inspect completed yamls in 3_transcription_merged
out$n_transcription_merged <- 0
out$merged_yamls_named_correctly <- NA
out$merged_yamls_valid <- NA
out$transcription_merged_complete <- FALSE
yamls_merged <- list.files("./1_primary_sources/3_transcription_merged/2_yaml",
pattern = ".yaml", recursive = TRUE, full.names = TRUE)
out$n_transcription_merged <- length(unique(basename(yamls_merged)))
if (out$n_transcription_merged > 0) {
files <- yamls_merged
files %>% basename() %>% substr(1, 7) -> yaml_filename_hashes
loads <- rep(NA, length(files))
transcriber_ok <- rep(NA, length(files))
reviewer_ok <- rep(NA, length(files))
stamp_ok <- rep(NA, length(files))
for (i in 1:length(files)) {
loads[i] <- yaml_loads(files[i])
if (loads[i]) {
data <- read_yaml2(files[i])
transcriber_ok[i] <- !bad_transcriber(data$transcriber)
if ("stamp_num" %in% names(data)) {
stamp_ok[i] <- !bad_stamp(data$stamp_num)
}
# test that the reviewer info is present
}
}
if (any(!loads, na.rm = TRUE)) {
print(paste("invalid yamls:", files[!loads]))
}
if (any(!transcriber_ok, na.rm = TRUE)) {
print(paste("missing transcriber information:", files[!transcriber_ok]))
}
if (any(!stamp_ok, na.rm = TRUE)) {
print(paste("invalid stamp number:", files[!stamp_ok]))
}
out$merged_yamls_named_correctly <- all(yaml_filename_hashes %in% pdf_hashes)
out$merged_yamls_valid <- out$merged_yamls_named_correctly & all(loads) & all(transcriber_ok)
out$transcription_merged_complete <- all(pdf_hashes %in% yaml_filename_hashes) &
out$merged_yamls_valid
}
# make it switch if EITHER yamls in transcription2 OR transcription_merged
# is_double_transcription
if (out$double_transcription_started) {
out$transcription_complete <- out$transcription_merged_complete
} else {
out$transcription_complete <- out$transcription1_complete
}
############################
# extract interviews.csv data
out$has_scrape_yamls <- "scrape_yamls.r" %in% dir("1_primary_sources/2_transcription1")
out$has_relational_tables <- file.exists("./3_relational_tables/interviews.csv")
if (out$has_relational_tables) {
ints <- read.csv("./3_relational_tables/interviews.csv", stringsAsFactors = FALSE)
if (is.na(meta$interview_date_key) | !meta$interview_date_key %in% colnames(ints)) {
print("interview date variable not found")
} else {
out$interview_start_date = sort(as.character(ints[[meta$interview_date_key]]))[1]
out$interview_end_date = rev(sort(as.character(ints[[meta$interview_date_key]])))[1]
}
transcribers <- sort(unique(unlist(strsplit(ints$transcriber, ", "))))
out$transcribers <- paste(transcribers, collapse = ", ")
out$n_transcribers = length(transcribers)
if (length(ints$reviewer) > 0) {
ints$reviewer <- as.character(ints$reviewer)
reviewers <- sort(unique(unlist(strsplit(ints$reviewer, ", "))))
out$reviewers <- paste(reviewers, collapse = ", ")
out$n_reviewers = length(reviewers)
}
}
}
############################
# check relational integrity of tables
# this has to be customized to the project I suppose...
# out$has_relational_integrity <- FALSE
############################
# report findings
# some kind of global completion check?
if (write_reports) write_json(out, file.path(outdir, "project_report.json"), pretty = TRUE)
setwd(current_directory)
return(out)
print(paste(meta$project_name, "inspected!"))
} |
library(rtweet)
library(twitteR)
library(streamR)
library(tidytext)
library(Rsentiment)
library(syuzhet)
library(SnowballC)
library(tm)
library(RColorBrewer)
library(plyr)
library(dplyr)
library(tmap)
library(wordcloud)
consumer_key <-"DNnhocvfRJwAdVq05EKHFBV7v"
consumer_secret <- "FnVrIdxCG95Qainl1gqDeg4uLOaNHmFQIslO20RB2pfqC0BPMn"
access_token<-"2814898578-wKarKmnCDX6SaASPdY70yi0aFkQcBRlrrajAZ8S"
access_secret <- "jO0gXM1lpYj3Ty1AFqajDUwAZ7Cf1pnJAOlZSXHb81Dis"
setup_twitter_oauth(consumer_key ,consumer_secret,access_token,access_secret)
tweetsretrived <- searchTwitter("liberals", lang="en", n=1500)
tweetsdf <- twListToDF(tweetsretrived)
write.csv(tweetsdf, file = "Tweets.csv")
tweets_text <- sapply(tweetsretrived, function(x) x$getText())
# View(tweets_text)
#cleaning Text
clean_text1 <- gsub("RT|via)((?:\\b\\w*@\\w+)+)","",tweets_text)
clean_text2 <- gsub("http[^[:blank:]]+","",clean_text1)
clean_text3 <- gsub("@\\w+","",clean_text2)
clean_text4 <- gsub("[[:punct:]]"," ",clean_text3)
clean_text5 <- gsub("[^[:alnum:]]"," ",clean_text4)
write.csv(clean_text5, "Tweets1.csv")
#creating wordcorpus word cloud
clean_text7 <- tm::Corpus(tm::VectorSource(clean_text5))
clean_text7 <- tm::tm_map(clean_text7,tm::removePunctuation)
clean_text7 <- tm::tm_map(clean_text7,tm::content_transformer(tolower))
clean_text7 <- tm::tm_map(clean_text7, tm::removeWords, tm::stopwords("english"))
clean_text7 <- tm::tm_map(clean_text7, tm::stripWhitespace)
pal <- RColorBrewer::brewer.pal(8,"Dark2")
wordcloud::wordcloud(clean_text7,min.freq = 7, max.words = Inf, width = 1000, height = 1000, random.order = FALSE, color = pal)
# wordcloud(clean_text7, random.order=F,max.words=80, col=rainbow(50), scale=c(4,0.5))
#getting different emotions from the cleaned tweets
mysentiment <- syuzhet::get_nrc_sentiment(clean_text5)
sentimentscores <- data.frame(colSums(mysentiment[,]))
names(sentimentscores) <- "score"
sentimentscores <- cbind("sentiment" = rownames(sentimentscores), sentimentscores)
row.names(sentimentscores) <- NULL
#plotting them on to the bar garph
ggplot2::ggplot(data = sentimentscores, ggplot2::aes(x = sentiment, y = score))+
ggplot2::geom_bar(ggplot2::aes(fill = sentiment),stat = "identity")+
ggplot2::theme(legend.position = "none")+
ggplot2::xlab("sentiment") + ggplot2::ylab("score") + ggplot2::ggtitle("Total sentiment score based on tweets")
# trying to seggragate positive and negative tweets and plot it in the graph but it doesn't worked here
# sent.value <- syuzhet::get_sentiment(clean_text5)
#
# value <- RSentiment::calculate_sentiment(clean_text5)
#
# positive <- clean_text5[sent.value > 0]
# negative <- clean_text5[sent.value < 0]
# neutral <- clean_text5[sent.value = 0]
#
| /SentimentAnalysis.R | no_license | battulabharath/Sentiment-Analysis | R | false | false | 2,789 | r | library(rtweet)
library(twitteR)
library(streamR)
library(tidytext)
library(Rsentiment)
library(syuzhet)
library(SnowballC)
library(tm)
library(RColorBrewer)
library(plyr)
library(dplyr)
library(tmap)
library(wordcloud)
consumer_key <-"DNnhocvfRJwAdVq05EKHFBV7v"
consumer_secret <- "FnVrIdxCG95Qainl1gqDeg4uLOaNHmFQIslO20RB2pfqC0BPMn"
access_token<-"2814898578-wKarKmnCDX6SaASPdY70yi0aFkQcBRlrrajAZ8S"
access_secret <- "jO0gXM1lpYj3Ty1AFqajDUwAZ7Cf1pnJAOlZSXHb81Dis"
setup_twitter_oauth(consumer_key ,consumer_secret,access_token,access_secret)
tweetsretrived <- searchTwitter("liberals", lang="en", n=1500)
tweetsdf <- twListToDF(tweetsretrived)
write.csv(tweetsdf, file = "Tweets.csv")
tweets_text <- sapply(tweetsretrived, function(x) x$getText())
# View(tweets_text)
#cleaning Text
clean_text1 <- gsub("RT|via)((?:\\b\\w*@\\w+)+)","",tweets_text)
clean_text2 <- gsub("http[^[:blank:]]+","",clean_text1)
clean_text3 <- gsub("@\\w+","",clean_text2)
clean_text4 <- gsub("[[:punct:]]"," ",clean_text3)
clean_text5 <- gsub("[^[:alnum:]]"," ",clean_text4)
write.csv(clean_text5, "Tweets1.csv")
#creating wordcorpus word cloud
clean_text7 <- tm::Corpus(tm::VectorSource(clean_text5))
clean_text7 <- tm::tm_map(clean_text7,tm::removePunctuation)
clean_text7 <- tm::tm_map(clean_text7,tm::content_transformer(tolower))
clean_text7 <- tm::tm_map(clean_text7, tm::removeWords, tm::stopwords("english"))
clean_text7 <- tm::tm_map(clean_text7, tm::stripWhitespace)
pal <- RColorBrewer::brewer.pal(8,"Dark2")
wordcloud::wordcloud(clean_text7,min.freq = 7, max.words = Inf, width = 1000, height = 1000, random.order = FALSE, color = pal)
# wordcloud(clean_text7, random.order=F,max.words=80, col=rainbow(50), scale=c(4,0.5))
#getting different emotions from the cleaned tweets
mysentiment <- syuzhet::get_nrc_sentiment(clean_text5)
sentimentscores <- data.frame(colSums(mysentiment[,]))
names(sentimentscores) <- "score"
sentimentscores <- cbind("sentiment" = rownames(sentimentscores), sentimentscores)
row.names(sentimentscores) <- NULL
#plotting them on to the bar garph
ggplot2::ggplot(data = sentimentscores, ggplot2::aes(x = sentiment, y = score))+
ggplot2::geom_bar(ggplot2::aes(fill = sentiment),stat = "identity")+
ggplot2::theme(legend.position = "none")+
ggplot2::xlab("sentiment") + ggplot2::ylab("score") + ggplot2::ggtitle("Total sentiment score based on tweets")
# trying to seggragate positive and negative tweets and plot it in the graph but it doesn't worked here
# sent.value <- syuzhet::get_sentiment(clean_text5)
#
# value <- RSentiment::calculate_sentiment(clean_text5)
#
# positive <- clean_text5[sent.value > 0]
# negative <- clean_text5[sent.value < 0]
# neutral <- clean_text5[sent.value = 0]
#
|
#' Find Modules with Network Adjacency Matrix Using Walktrap Clustering
#'
#' This function wraps a function iteratively to get modules from network adjacency
#' matrix using igraph's walktrap clusting function.
#'
#' @inheritParams findModules.CFinder
#'
#' @return GeneModules = n x 3 dimensional data frame with column names as Gene.ID,
#' moduleNumber, and moduleLabel.
#'
#' @importFrom magrittr %>%
#' @export
findModules.walktrap <- function(adj, nperm = 10, path, min.module.size = 30){
# Error functions
if(class(adj) != "matrix")
stop('Adjacency matrix should be of class matrix')
if(dim(adj)[1] != dim(adj)[2])
stop('Adjacency matrix should be symmetric')
if(!all(adj[lower.tri(adj)] == 0))
stop('Adjacency matrix should be upper triangular')
# Make adjacency matrix symmetric
adj = adj + t(adj)
adj[diag(adj)] = 0
# Compute modules by permuting the labels nperm times
all.modules = plyr::llply(1:nperm, .fun= function(i, adj, path, min.module.size){
# Permute gene ordering
ind = sample(1:dim(adj)[1], dim(adj)[1], replace = FALSE)
adj1 = adj[ind,ind]
# Find modules
mod = findModules.walktrap.once(adj1, min.module.size)
# Compute local and global modularity
adj1[lower.tri(adj1)] = 0
Q = compute.Modularity(adj1, mod)
Qds = compute.ModularityDensity(adj1, mod)
return(list(mod = mod, Q = Q, Qds = Qds))
}, adj, path, min.module.size)
# Find the best module based on Q and Qds
tmp = plyr::ldply(all.modules, function(x){
data.frame(Q = x$Q, Qds = x$Qds)
}) %>%
dplyr::mutate(r = base::rank(Q)+base::rank(Qds))
ind = which.max(tmp$r)
mod = all.modules[[ind]]$mod
return(mod)
}
| /R/findModules.walktrap.R | permissive | Sage-Bionetworks/metanetwork | R | false | false | 1,723 | r | #' Find Modules with Network Adjacency Matrix Using Walktrap Clustering
#'
#' This function wraps a function iteratively to get modules from network adjacency
#' matrix using igraph's walktrap clusting function.
#'
#' @inheritParams findModules.CFinder
#'
#' @return GeneModules = n x 3 dimensional data frame with column names as Gene.ID,
#' moduleNumber, and moduleLabel.
#'
#' @importFrom magrittr %>%
#' @export
findModules.walktrap <- function(adj, nperm = 10, path, min.module.size = 30){
# Error functions
if(class(adj) != "matrix")
stop('Adjacency matrix should be of class matrix')
if(dim(adj)[1] != dim(adj)[2])
stop('Adjacency matrix should be symmetric')
if(!all(adj[lower.tri(adj)] == 0))
stop('Adjacency matrix should be upper triangular')
# Make adjacency matrix symmetric
adj = adj + t(adj)
adj[diag(adj)] = 0
# Compute modules by permuting the labels nperm times
all.modules = plyr::llply(1:nperm, .fun= function(i, adj, path, min.module.size){
# Permute gene ordering
ind = sample(1:dim(adj)[1], dim(adj)[1], replace = FALSE)
adj1 = adj[ind,ind]
# Find modules
mod = findModules.walktrap.once(adj1, min.module.size)
# Compute local and global modularity
adj1[lower.tri(adj1)] = 0
Q = compute.Modularity(adj1, mod)
Qds = compute.ModularityDensity(adj1, mod)
return(list(mod = mod, Q = Q, Qds = Qds))
}, adj, path, min.module.size)
# Find the best module based on Q and Qds
tmp = plyr::ldply(all.modules, function(x){
data.frame(Q = x$Q, Qds = x$Qds)
}) %>%
dplyr::mutate(r = base::rank(Q)+base::rank(Qds))
ind = which.max(tmp$r)
mod = all.modules[[ind]]$mod
return(mod)
}
|
library(cir)
### Name: deltaInverse
### Title: Calculate inverse (dose-finding) intervals, using local
### inversion and the Delta method
### Aliases: deltaInverse
### ** Examples
# Interesting run (#664) from a simulated up-and-down ensemble:
# (x will be auto-generated as dose levels 1:5)
dat=doseResponse(y=c(1/7,1/8,1/2,1/4,4/17),wt=c(7,24,20,12,17))
# The experiment's goal is to find the 30th percentile
quick1=quickIsotone(dat)
invDelta=deltaInverse(dat)
### Showing the data and the estimates
par(mar=c(3,3,4,1),mgp=c(2,.5,0),tcl=-0.25)
# Following command uses plot.doseResponse()
plot(dat,ylim=c(0.05,0.55),refsize=4,las=1,xlim=c(-1,6),main="Inverse-Estimation CIs")
# The true response function; true target is where it crosses the y=0.3 line
lines(seq(0,7,0.1),pweibull(seq(0,7,0.1),shape=1.1615,scale=8.4839),col=4)
abline(h=0.3,col=2,lty=3) ### The experiment's official target
# Forward CIs; the "global" inverse interval just draws horizontal lines between them
# To get "global" values calculated for you at specific targets, choose 'delta=FALSE'
# when calling quickInverse()
lines(quick1$lower90conf,lty=2,col=3)
lines(quick1$upper90conf,lty=2,col=3)
# Note how for y=0.3, both bounds are infinite (i.e., no intersection with the horizontal line)
# unless one dares to extrapolate outside range of observations.
# Now, the default "local" inverse interval, which is finite for the range of estimated y values.
# In particular, it is finite (albeit very wide) for y=0.3.
lines(invDelta[,1],quick1$y,lty=2)
lines(invDelta[,2],quick1$y,lty=2)
legend('topleft',pch=c(NA,'X',NA,NA),lty=c(1,NA,2,2),col=c(4,1,1,3),legend=
c('True Curve','Observations','Local Interval (default)','Forward/Global Interval'),bty='n')
| /data/genthat_extracted_code/cir/examples/deltaInverse.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,749 | r | library(cir)
### Name: deltaInverse
### Title: Calculate inverse (dose-finding) intervals, using local
### inversion and the Delta method
### Aliases: deltaInverse
### ** Examples
# Interesting run (#664) from a simulated up-and-down ensemble:
# (x will be auto-generated as dose levels 1:5)
dat=doseResponse(y=c(1/7,1/8,1/2,1/4,4/17),wt=c(7,24,20,12,17))
# The experiment's goal is to find the 30th percentile
quick1=quickIsotone(dat)
invDelta=deltaInverse(dat)
### Showing the data and the estimates
par(mar=c(3,3,4,1),mgp=c(2,.5,0),tcl=-0.25)
# Following command uses plot.doseResponse()
plot(dat,ylim=c(0.05,0.55),refsize=4,las=1,xlim=c(-1,6),main="Inverse-Estimation CIs")
# The true response function; true target is where it crosses the y=0.3 line
lines(seq(0,7,0.1),pweibull(seq(0,7,0.1),shape=1.1615,scale=8.4839),col=4)
abline(h=0.3,col=2,lty=3) ### The experiment's official target
# Forward CIs; the "global" inverse interval just draws horizontal lines between them
# To get "global" values calculated for you at specific targets, choose 'delta=FALSE'
# when calling quickInverse()
lines(quick1$lower90conf,lty=2,col=3)
lines(quick1$upper90conf,lty=2,col=3)
# Note how for y=0.3, both bounds are infinite (i.e., no intersection with the horizontal line)
# unless one dares to extrapolate outside range of observations.
# Now, the default "local" inverse interval, which is finite for the range of estimated y values.
# In particular, it is finite (albeit very wide) for y=0.3.
lines(invDelta[,1],quick1$y,lty=2)
lines(invDelta[,2],quick1$y,lty=2)
legend('topleft',pch=c(NA,'X',NA,NA),lty=c(1,NA,2,2),col=c(4,1,1,3),legend=
c('True Curve','Observations','Local Interval (default)','Forward/Global Interval'),bty='n')
|
fTotalLine <- function(df, y_lab) {
df %>%
ggplot(aes(x = year, y = value)) +
geom_line() +
geom_area(aes(fill = name), show.legend = FALSE) +
geom_point() +
theme_classic() +
scale_fill_manual(values = c("#009E73", "#E69F00")) +
scale_y_continuous(
breaks = scales::breaks_pretty(),
labels = scales::label_number_si()
) +
scale_x_continuous(
breaks = scales::breaks_pretty()
) +
labs(
x = "Jahr",
y = y_lab
) +
facet_wrap(~label, scales = "free_y") +
theme(
panel.grid.major.y = element_line()
)
}
| /functions/modules/total_line.R | permissive | HannesOberreiter/inat-austria-city-challenge-2021 | R | false | false | 695 | r | fTotalLine <- function(df, y_lab) {
df %>%
ggplot(aes(x = year, y = value)) +
geom_line() +
geom_area(aes(fill = name), show.legend = FALSE) +
geom_point() +
theme_classic() +
scale_fill_manual(values = c("#009E73", "#E69F00")) +
scale_y_continuous(
breaks = scales::breaks_pretty(),
labels = scales::label_number_si()
) +
scale_x_continuous(
breaks = scales::breaks_pretty()
) +
labs(
x = "Jahr",
y = y_lab
) +
facet_wrap(~label, scales = "free_y") +
theme(
panel.grid.major.y = element_line()
)
}
|
#-------------------------------------------owner: Liran Ben-Zion-------------------------------------------
#-------------------------------------------email- bzliran@gmail.com----------------------------------------
rm(list=ls())
library(dplyr)
library(data.table)
library(mice)
library(ggplot2)
library(corrplot)
library(RCurl)
library(Hmisc)
#--------------------------------load data-------------------------------------------------------------
x <- getURL("https://raw.githubusercontent.com/Liranbz/SB/master/kc_house_data.csv")
data <- read.csv(text = x,header=TRUE, sep=",")
#data <- read.csv(file="C:\\Users\\benzionl\\Desktop\\SB\\kc_house_data.csv", header=TRUE, sep=",")
str(data)
names(data)
summary(data)
#-----------------------------Data preparation--------------------------------------------
data<-data[,-1] # ramove id col
data[,c(9:11,16)]<-lapply(data[,c(9:11,16)],as.factor) #change variables (view, condition, grade,zipcode) to factors
data$date <- as.Date(as.Date(as.character(data$date),"%Y%m%d")) # Formatting date as date format from string
data$age <- as.numeric(format(data$date, "%Y")) - data$yr_built # Creating a variable column name 'age' of house
#Creating a variable column name 'is_basement`
data$is_basement<-0
data$is_basement[data$sqft_basement > 0]<-1
data$is_basement=factor(data$is_basement)
# Column 'rate' is created which is selling price per square feet
data$rate <- data$price/data$sqft_living
# Checking how many NA are there
missing_values_summary_table<-t(md.pattern(data, plot = TRUE))
#----------------------------corrleations-----------------------------------------
library(GGally)
numeric_data<-select_if(data, is.numeric)
cor <- rcorr(as.matrix(numeric_data))
M <- cor$r
p_mat <- cor$P
col <- colorRampPalette(c("#BB4444", "#EE9988", "#FFFFFF", "#77AADD", "#4477AA"))
corrplot(M, method = "color", col = col(200),
type = "upper", order = "hclust",
addCoef.col = "black", # Add coefficient of correlation
tl.col = "darkblue", tl.srt = 45, #Text label color and rotation
# Combine with significance level
p.mat = p_mat, sig.level = 0.01,
# hide correlation coefficient on the principal diagonal
diag = FALSE
)
#----------------------------------------Plots---------------------------
# histogram of prices
hist(data$price)
## Checking Relationship between price, bedrooms, bathrooms, sqft_living and sqft lot
plot1<-ggpairs(data=data, columns=2:6,
mapping = aes(color = "dark red"),
axisLabels="show")
plot1
# Sqft vs Price- 6 figures arranged in 3 rows and 2 columns
attach(mtcars)
par(mfrow=c(4,2))
plot(data$sqft_living,log(data$price), main="Scatterplot of sqft_living vs. price", xlab = "sqft_living",ylab = "Price",col="blue")
plot(data$sqft_lot,log(data$price), main="Scatterplot of sqft_lot vs. price", xlab = "sqft_lot",ylab = "Price",col="red")
plot(data$sqft_living15,log(data$price),main="Scatterplot of sqft_living15 vs. price", xlab = "sqft_living15",ylab = "Price",col="green")
plot(data$sqft_lot15,log(data$price),main="Scatterplot of sqft_lot15 vs price", xlab = "sqft_lot15",ylab = "Price",col="purple")
plot(data$sqft_above,log(data$price), main="Scatterplot of sqft_above vs price", xlab = "sqft_above",ylab = "Price",col="dark red")
plot(data$sqft_basement,log(data$price), main="Scatterplot of sqft_basement vs price", xlab = "sqft_basement",ylab = "Price",col="dark blue")
#Price vs Bedrooms
p1<-ggplot(data,aes(bedrooms,price), main="Scatterplot of Bedrooms vs. price",
xlab = "bedrooms",ylab = "Price",col="blue")+ xlim(1,10)
p1+geom_bar(stat = "identity")
plot(data$price~factor(data$bedrooms), main="plot of Bedrooms vs. price" )
#--------------------------------------Create sets for train and test--------------------------------------
#sample data
row_sampler=function(df){
set.seed(789)
n_rows_data=(nrow(df))
random_row_nums <-sample(x=1:n_rows_data,size=n_rows_data,replace = FALSE)
return(random_row_nums)
}
Train_test_division=function(train_fraction,df){
random_rows=row_sampler(df)
Division_point=round(nrow(df)*train_fraction,digits = 0)
Train_indices=random_rows[1:Division_point]
Test_indices=random_rows[(1+Division_point):length(random_rows)]
Train=df[Train_indices,]
Test=df[Test_indices,]
return(list(Train=Train,Test=Test))
}
Train_test_Data=Train_test_division(0.75,data)
Train=Train_test_Data$Train
Test=Train_test_Data$Test
#-----------------------------------------Linear Regression models------------------------------------------------------
full_model=lm(formula = Train$price~.,data = Train)
summary(full_model)
Linear_Predictions=predict(full_model)
Test$predicted_price=predict(full_model,Test)
RMSE=RMSE(Test$price,Test$predicted_price)
#model 3
model3 <- lm(price~ sqft_living + bedrooms + bathrooms + grade + sqft_above + zipcode,data = data)
summary(model3)
#model 4-log to price
model4 <- lm(log(price)~ sqft_living + bedrooms + bathrooms + grade + sqft_above + zipcode,data = data)
summary(model4)
#model 5-log to price+vars
model5 <- lm(log(price)~ log(sqft_living) + bedrooms + bathrooms + grade + log(sqft_above) + zipcode+age+lat+long,data = data)
summary(model5)
Linear_Predictions_model5=predict(model5,Test)
RMSE=RMSE(Test$price,Linear_Predictions_model5)
#---------------------------------------Machine Learning models-------------------------
#----------------------try all ML regression models from caret library-------------
install.packages.compile.from.source = "always"
install.packages(c("feather","tidyr"), type = "both")
library(caret)
library(foreach)
library(doParallel)
gc()
setwd("C:\\Users\\benzionl\\Desktop\\SB")
#--------train control-----
trCtrl <- trainControl(
method = "repeatedcv"
, number = 2
, repeats = 5
, allowParallel = TRUE
)
# sample with 300 observations
ttt <- sample_n(data,300)
str(ttt)
# all caret models
names(getModelInfo())
#regression ML models only:
caret_models <-c("xgbDART","xgbLinear","ppr","gamLoess","cubist","glm","lm","foba","monmlp","glmStepAIC","lmStepAIC","lars2","rqnc","lars","extraTrees","glmnet","qrf","penalized","bagEarthGCV","bagEarth","xgbTree","Rborist","glmboost","M5Rules","M5","ranger","parRF","nnls","rf","RRFglobal","earth","gcvEarth","msaenet","RRF","relaxo","bstTree","leapBackward","blackboost","gbm","nodeHarvest","treebag","kknn","evtree","rpart1SE","rpart2","icr","rpart","partDSA","leapForward","leapSeq","kernelpls","pls","simpls","widekernelpls","BstLm","pcr","knn","svmRadial","svmRadialCost","xyf","svmRadialSigma","null","neuralnet","mlpWeightDecayML","mlp","rfRules","mlpWeightDecay","gaussprRadial","dnn","mlpML","rqlasso","rvmRadial","avNNet","nnet","pcaNNet","superpc","rbfDDA","svmLinear3","svmPoly","randomGLM","svmLinear2","svmLinear")
# create a log file
fname <- 'ttt.log'
cat(paste0(paste('counter','method','user','system','elapsed','mse',sep=','),'\n'), file=fname)
counter <- 0
for(current_method in caret_models) {
counter <- counter+1
print(paste('Trying model #',counter,'/',length(caret_models),current_method))
tryCatch({
registerDoSEQ() # to disable "invalid connection" error
profiler <- system.time(model.1 <- train(form = price~., data=ttt, trControl = trCtrl, method=current_method))
mse <- mean((predict(model.1)-ttt$price)^2)
# write status of current method to log file
status <- paste(counter,current_method,profiler[[1]],profiler[[2]],profiler[[3]],mse,sep=',')
cat(paste0(status,'\n'), file=fname, append=T)
}, error = function(err_cond) {
print(paste('An error with model',current_method))
cat(paste('Error with model #',counter,'/',length(caret_models),current_method,'Error',err_cond,'\n'), file=fname, append=T)
})
}
#----------------------------------------model_xgbLinear with some features-------------------------------------------
model_xgbLinear1<-train(form = price~sqft_living + bedrooms + bathrooms + grade + sqft_above +lat+long, data=Train, trControl = trCtrl,method='xgbLinear')
summary(model_xgbLinear1) # summarizing the model
print(model_xgbLinear1)
plot(model_xgbLinear1)
varImp(object=model_xgbLinear1)
plot(varImp(object=model_xgbLinear1),main="model_xgbLinear - Variable Importance, 7 features")
#Predictions
predictions1<-predict.train(object=model_xgbLinear1,Test[,-Test$price],type="raw")
RMSE_model_xgbLinear1=RMSE(predictions1,Test$price)
library(PRROC)
roc<-roc.curve(predictions1,Test$price,curve = TRUE)
roc
#----------------------------------------model_xgbLinear- all features-------------------------------------------
model_xgbLinear<-train(form = price~., data=Train, trControl = trCtrl,method='xgbLinear')
print(model_xgbLinear) # summarizing the model
plot(model_xgbLinear)
varImp(object=model_xgbLinear)
#Plotting Varianle importance for model_xgbLinear
#plot(varImp(object=model_xgbLinear),main="model_xgbLinear - Variable Importance")
#Predictions
predictions<-predict.train(object=model_xgbLinear,Test[,-Test$price],type="raw")
RMSE_model_xgbLinear1=RMSE(predictions,Test$price)
#----------------------------------------nnet with features--------------------------------------------
model_nnet<-train(form = price~sqft_living + bedrooms + bathrooms + grade + sqft_above + zipcode, data=Train, trControl = trCtrl,method='nnet')
summary(model_nnet) # summarizing the model
print(model_nnet)
plot(model_nnet)
varImp(object=model_nnet)
plot(varImp(object=model_nnet),main="model_nnet - Variable Importance, 6 features")
#Predictions
predictions_nnet<-predict.train(object=model_nnet,Test[,-Test$price],type="raw")
RMSE_model_nnet=RMSE(predictions_nnet,Test$price)
#----------------------------------------RF- with features-------------------------------------------
model_rf<-train(form = price~sqft_living + bedrooms + grade + sqft_above + zipcode+lat+long, data=Train, trControl = trCtrl,method='rf')
summary(model_rf)
print(model_rf)
plot(model_rf)
varImp(object=model_rf)
plot(varImp(object=model_rf),main="model_rf - Variable Importance, 6 features")
#Predictions
predictions_rf<-predict.train(object=model_rf,Test[,-Test$price],type="raw")
RMSE_model_xgbLinear1=RMSE(predictions_rf,Test$price)
#---------------------------------jointEntropy--------------------------------
entropy=function(y)
{
if (length(y)==0){
return(0)
}
p1=sum(y)/length(y)
p0=1-p1
return(-p0*log2(max(1e-10, p0))-p1*log2(max(1e-10, p1)))
}
jointEntropy=function(x1,x2,y)
{
total_entropy=0
for (i in 0:1 )
{
for (j in 0:1)
{
x1_idx=which(x1==i)
x2_idx=which(x2==j)
idx=x1_idx[x1_idx%in%x2_idx]
subset_y=y[idx]
subset_y
w = (length(idx)/length(y))
e = entropy(subset_y)
total_entropy=total_entropy+ e*w
}
}
return (total_entropy)
}
#get results from function
#test_1
a=c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
b=c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
labels=c(1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0)
jointEntropy(a,b,labels)
#test_2
a=c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
b=c(1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0)
labels=c(1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0)
jointEntropy(a,b,labels)
#test_3
a=c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
b=c(0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0)
labels=c(1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0)
jointEntropy(a,b,labels)
#test_4
a=c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
b=c(1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0)
labels=c(1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0)
jointEntropy(a,b,labels)
#test_5
a=c(0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0)
b=c(1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0)
labels=c(1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0)
jointEntropy(a,b,labels)
#test_6
a=c(1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0)
b=c(1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0)
labels=c(1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0)
entropy1=jointEntropy(a,b,labels)
as.logical(abs((entropy1 - 0.344) < 0.01))
#------------------The END---------------------------------- | /Liran Ben Zion- Predicting House Sales in King County, USA.R | no_license | Liranbz/House_Sales_Prediction | R | false | false | 12,421 | r | #-------------------------------------------owner: Liran Ben-Zion-------------------------------------------
#-------------------------------------------email- bzliran@gmail.com----------------------------------------
rm(list=ls())
library(dplyr)
library(data.table)
library(mice)
library(ggplot2)
library(corrplot)
library(RCurl)
library(Hmisc)
#--------------------------------load data-------------------------------------------------------------
x <- getURL("https://raw.githubusercontent.com/Liranbz/SB/master/kc_house_data.csv")
data <- read.csv(text = x,header=TRUE, sep=",")
#data <- read.csv(file="C:\\Users\\benzionl\\Desktop\\SB\\kc_house_data.csv", header=TRUE, sep=",")
str(data)
names(data)
summary(data)
#-----------------------------Data preparation--------------------------------------------
data<-data[,-1] # ramove id col
data[,c(9:11,16)]<-lapply(data[,c(9:11,16)],as.factor) #change variables (view, condition, grade,zipcode) to factors
data$date <- as.Date(as.Date(as.character(data$date),"%Y%m%d")) # Formatting date as date format from string
data$age <- as.numeric(format(data$date, "%Y")) - data$yr_built # Creating a variable column name 'age' of house
#Creating a variable column name 'is_basement`
data$is_basement<-0
data$is_basement[data$sqft_basement > 0]<-1
data$is_basement=factor(data$is_basement)
# Column 'rate' is created which is selling price per square feet
data$rate <- data$price/data$sqft_living
# Checking how many NA are there
missing_values_summary_table<-t(md.pattern(data, plot = TRUE))
#----------------------------corrleations-----------------------------------------
library(GGally)
numeric_data<-select_if(data, is.numeric)
cor <- rcorr(as.matrix(numeric_data))
M <- cor$r
p_mat <- cor$P
col <- colorRampPalette(c("#BB4444", "#EE9988", "#FFFFFF", "#77AADD", "#4477AA"))
corrplot(M, method = "color", col = col(200),
type = "upper", order = "hclust",
addCoef.col = "black", # Add coefficient of correlation
tl.col = "darkblue", tl.srt = 45, #Text label color and rotation
# Combine with significance level
p.mat = p_mat, sig.level = 0.01,
# hide correlation coefficient on the principal diagonal
diag = FALSE
)
#----------------------------------------Plots---------------------------
# histogram of prices
hist(data$price)
## Checking Relationship between price, bedrooms, bathrooms, sqft_living and sqft lot
plot1<-ggpairs(data=data, columns=2:6,
mapping = aes(color = "dark red"),
axisLabels="show")
plot1
# Sqft vs Price- 6 figures arranged in 3 rows and 2 columns
attach(mtcars)
par(mfrow=c(4,2))
plot(data$sqft_living,log(data$price), main="Scatterplot of sqft_living vs. price", xlab = "sqft_living",ylab = "Price",col="blue")
plot(data$sqft_lot,log(data$price), main="Scatterplot of sqft_lot vs. price", xlab = "sqft_lot",ylab = "Price",col="red")
plot(data$sqft_living15,log(data$price),main="Scatterplot of sqft_living15 vs. price", xlab = "sqft_living15",ylab = "Price",col="green")
plot(data$sqft_lot15,log(data$price),main="Scatterplot of sqft_lot15 vs price", xlab = "sqft_lot15",ylab = "Price",col="purple")
plot(data$sqft_above,log(data$price), main="Scatterplot of sqft_above vs price", xlab = "sqft_above",ylab = "Price",col="dark red")
plot(data$sqft_basement,log(data$price), main="Scatterplot of sqft_basement vs price", xlab = "sqft_basement",ylab = "Price",col="dark blue")
#Price vs Bedrooms
p1<-ggplot(data,aes(bedrooms,price), main="Scatterplot of Bedrooms vs. price",
xlab = "bedrooms",ylab = "Price",col="blue")+ xlim(1,10)
p1+geom_bar(stat = "identity")
plot(data$price~factor(data$bedrooms), main="plot of Bedrooms vs. price" )
#--------------------------------------Create sets for train and test--------------------------------------
#sample data
row_sampler=function(df){
set.seed(789)
n_rows_data=(nrow(df))
random_row_nums <-sample(x=1:n_rows_data,size=n_rows_data,replace = FALSE)
return(random_row_nums)
}
Train_test_division=function(train_fraction,df){
random_rows=row_sampler(df)
Division_point=round(nrow(df)*train_fraction,digits = 0)
Train_indices=random_rows[1:Division_point]
Test_indices=random_rows[(1+Division_point):length(random_rows)]
Train=df[Train_indices,]
Test=df[Test_indices,]
return(list(Train=Train,Test=Test))
}
Train_test_Data=Train_test_division(0.75,data)
Train=Train_test_Data$Train
Test=Train_test_Data$Test
#-----------------------------------------Linear Regression models------------------------------------------------------
full_model=lm(formula = Train$price~.,data = Train)
summary(full_model)
Linear_Predictions=predict(full_model)
Test$predicted_price=predict(full_model,Test)
RMSE=RMSE(Test$price,Test$predicted_price)
#model 3
model3 <- lm(price~ sqft_living + bedrooms + bathrooms + grade + sqft_above + zipcode,data = data)
summary(model3)
#model 4-log to price
model4 <- lm(log(price)~ sqft_living + bedrooms + bathrooms + grade + sqft_above + zipcode,data = data)
summary(model4)
#model 5-log to price+vars
model5 <- lm(log(price)~ log(sqft_living) + bedrooms + bathrooms + grade + log(sqft_above) + zipcode+age+lat+long,data = data)
summary(model5)
Linear_Predictions_model5=predict(model5,Test)
RMSE=RMSE(Test$price,Linear_Predictions_model5)
#---------------------------------------Machine Learning models-------------------------
#----------------------try all ML regression models from caret library-------------
install.packages.compile.from.source = "always"
install.packages(c("feather","tidyr"), type = "both")
library(caret)
library(foreach)
library(doParallel)
gc()
setwd("C:\\Users\\benzionl\\Desktop\\SB")
#--------train control-----
trCtrl <- trainControl(
method = "repeatedcv"
, number = 2
, repeats = 5
, allowParallel = TRUE
)
# sample with 300 observations
ttt <- sample_n(data,300)
str(ttt)
# all caret models
names(getModelInfo())
#regression ML models only:
caret_models <-c("xgbDART","xgbLinear","ppr","gamLoess","cubist","glm","lm","foba","monmlp","glmStepAIC","lmStepAIC","lars2","rqnc","lars","extraTrees","glmnet","qrf","penalized","bagEarthGCV","bagEarth","xgbTree","Rborist","glmboost","M5Rules","M5","ranger","parRF","nnls","rf","RRFglobal","earth","gcvEarth","msaenet","RRF","relaxo","bstTree","leapBackward","blackboost","gbm","nodeHarvest","treebag","kknn","evtree","rpart1SE","rpart2","icr","rpart","partDSA","leapForward","leapSeq","kernelpls","pls","simpls","widekernelpls","BstLm","pcr","knn","svmRadial","svmRadialCost","xyf","svmRadialSigma","null","neuralnet","mlpWeightDecayML","mlp","rfRules","mlpWeightDecay","gaussprRadial","dnn","mlpML","rqlasso","rvmRadial","avNNet","nnet","pcaNNet","superpc","rbfDDA","svmLinear3","svmPoly","randomGLM","svmLinear2","svmLinear")
# create a log file
fname <- 'ttt.log'
cat(paste0(paste('counter','method','user','system','elapsed','mse',sep=','),'\n'), file=fname)
counter <- 0
for(current_method in caret_models) {
counter <- counter+1
print(paste('Trying model #',counter,'/',length(caret_models),current_method))
tryCatch({
registerDoSEQ() # to disable "invalid connection" error
profiler <- system.time(model.1 <- train(form = price~., data=ttt, trControl = trCtrl, method=current_method))
mse <- mean((predict(model.1)-ttt$price)^2)
# write status of current method to log file
status <- paste(counter,current_method,profiler[[1]],profiler[[2]],profiler[[3]],mse,sep=',')
cat(paste0(status,'\n'), file=fname, append=T)
}, error = function(err_cond) {
print(paste('An error with model',current_method))
cat(paste('Error with model #',counter,'/',length(caret_models),current_method,'Error',err_cond,'\n'), file=fname, append=T)
})
}
#----------------------------------------model_xgbLinear with some features-------------------------------------------
model_xgbLinear1<-train(form = price~sqft_living + bedrooms + bathrooms + grade + sqft_above +lat+long, data=Train, trControl = trCtrl,method='xgbLinear')
summary(model_xgbLinear1) # summarizing the model
print(model_xgbLinear1)
plot(model_xgbLinear1)
varImp(object=model_xgbLinear1)
plot(varImp(object=model_xgbLinear1),main="model_xgbLinear - Variable Importance, 7 features")
#Predictions
predictions1<-predict.train(object=model_xgbLinear1,Test[,-Test$price],type="raw")
RMSE_model_xgbLinear1=RMSE(predictions1,Test$price)
library(PRROC)
roc<-roc.curve(predictions1,Test$price,curve = TRUE)
roc
#----------------------------------------model_xgbLinear- all features-------------------------------------------
model_xgbLinear<-train(form = price~., data=Train, trControl = trCtrl,method='xgbLinear')
print(model_xgbLinear) # summarizing the model
plot(model_xgbLinear)
varImp(object=model_xgbLinear)
#Plotting Varianle importance for model_xgbLinear
#plot(varImp(object=model_xgbLinear),main="model_xgbLinear - Variable Importance")
#Predictions
predictions<-predict.train(object=model_xgbLinear,Test[,-Test$price],type="raw")
RMSE_model_xgbLinear1=RMSE(predictions,Test$price)
#----------------------------------------nnet with features--------------------------------------------
model_nnet<-train(form = price~sqft_living + bedrooms + bathrooms + grade + sqft_above + zipcode, data=Train, trControl = trCtrl,method='nnet')
summary(model_nnet) # summarizing the model
print(model_nnet)
plot(model_nnet)
varImp(object=model_nnet)
plot(varImp(object=model_nnet),main="model_nnet - Variable Importance, 6 features")
#Predictions
predictions_nnet<-predict.train(object=model_nnet,Test[,-Test$price],type="raw")
RMSE_model_nnet=RMSE(predictions_nnet,Test$price)
#----------------------------------------RF- with features-------------------------------------------
model_rf<-train(form = price~sqft_living + bedrooms + grade + sqft_above + zipcode+lat+long, data=Train, trControl = trCtrl,method='rf')
summary(model_rf)
print(model_rf)
plot(model_rf)
varImp(object=model_rf)
plot(varImp(object=model_rf),main="model_rf - Variable Importance, 6 features")
#Predictions
predictions_rf<-predict.train(object=model_rf,Test[,-Test$price],type="raw")
RMSE_model_xgbLinear1=RMSE(predictions_rf,Test$price)
#---------------------------------jointEntropy--------------------------------
entropy=function(y)
{
if (length(y)==0){
return(0)
}
p1=sum(y)/length(y)
p0=1-p1
return(-p0*log2(max(1e-10, p0))-p1*log2(max(1e-10, p1)))
}
jointEntropy=function(x1,x2,y)
{
total_entropy=0
for (i in 0:1 )
{
for (j in 0:1)
{
x1_idx=which(x1==i)
x2_idx=which(x2==j)
idx=x1_idx[x1_idx%in%x2_idx]
subset_y=y[idx]
subset_y
w = (length(idx)/length(y))
e = entropy(subset_y)
total_entropy=total_entropy+ e*w
}
}
return (total_entropy)
}
#get results from function
#test_1
a=c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
b=c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
labels=c(1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0)
jointEntropy(a,b,labels)
#test_2
a=c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
b=c(1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0)
labels=c(1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0)
jointEntropy(a,b,labels)
#test_3
a=c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
b=c(0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0)
labels=c(1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0)
jointEntropy(a,b,labels)
#test_4
a=c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
b=c(1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0)
labels=c(1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0)
jointEntropy(a,b,labels)
#test_5
a=c(0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0)
b=c(1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0)
labels=c(1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0)
jointEntropy(a,b,labels)
#test_6
a=c(1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0)
b=c(1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0)
labels=c(1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0)
entropy1=jointEntropy(a,b,labels)
as.logical(abs((entropy1 - 0.344) < 0.01))
#------------------The END---------------------------------- |
#!/usr/bin/env Rscript
##The intent of this script is to 'tune' crown delineation parameters
## all parameters will be varied randomly withing set range
## and run 10000 times
## Author:Jack
## Start date: 2/21/19
#All methods are based on a seed/marker generated from lidR::tree_detection()
## All methods are from the lidR package:
# https://cran.r-project.org/web/packages/lidR/lidR.pdf
# https://github.com/Jean-Romain/lidR/wiki
## set seed so that the random parameter tuning is reproducible
# set.seed(1)
##Load in required packages
library(lidR)
library(raster)
library(rgdal)
library(sp)
library(rgeos)
library(sp)
library(maptools)
library(dplyr)
library(rgeos)
library(snow)
library(cleangeo)
max_iteration = 300 ## how many loops?
#load in CHM, LAS, and plots
chm <- raster("CHM_trim.tif")
##chm <- raster::focal(chm, w=matrix(1,3,3), fun=mean)
las <- readLAS("las_trim.las")
plots <- readOGR("20m_plots.shp"); names(plots) <- c("plotID")
#_____MANUAL CROWN PREP
manual <- readOGR("manual_crown_delineation_fixed_geom.shp")
man_geom_report <- clgeo_CollectionReport(manual)
if(length(unique(man_geom_report$valid == 2))){
manual <- clgeo_Clean(manual)
}
# clean it up a bit
manual@data$crownID <-NULL
manual$man.ID <- 1:length(manual) # give unique ID
manual$man.area <- area(manual) # calculate area
manual <- manual[,c(2,1,3)] # reorder my columns
# -- calculate manual centroid, and remove any polygons where the centroid falls outside plot bounds
man_cent <- gCentroid(manual, byid = T)
manIN <-over(man_cent, plots) # calculate if centroids fall WITHIN plot boundaries; this will also assign plot IDs
manIN$man.ID <-1:length(manIN$plotID) # make identifer column, with same name as manual to join on
manual@data <- dplyr::left_join(manual@data, manIN) ## joined
## now we need to remove the polygons outside the plot
manual <- manual[!is.na(manual@data$plotID),]
# remove manual excess -- left with just manual
rm(man_cent, manIN, man_geom_report); gc()
#
#
#
#
accuracy_assessment <- function(auto, manual, plots){
## Auto crowns will be spit out with just one column: Tree ID
auto$aut.ID <- 1:length(auto@data$treeID) # assign polygon ID
auto <- auto[,-1]# remove treeID -- useless?
if(identicalCRS(auto, manual) == FALSE){ # I think CRS will always be wrong -- this checks and corrects
auto <- spTransform(auto, crs(manual))
}
plot_buf <- gBuffer(plots, width = 3, byid = T)
aut_cent <- gCentroid(auto, byid = T)# calculate centroid
autIN <- over(aut_cent, plot_buf) #find centroids in polygons -- this also assigns plot ID
autIN$aut.ID <- 1:length(autIN$plotID) # assign polygon ID to be joined on.
auto@data <- dplyr::left_join(auto@data, autIN) # join centroid 'over' data with auto
auto <- auto[!is.na(auto@data$plotID),] # remove crowns that are not in plot
auto$aut.area <- area(auto) # calculate area of autos
row.names(auto@data)<-NULL
auto<- gBuffer(auto,byid = T, width = 0 )
aut_geom_report <- clgeo_CollectionReport(auto)
if (length(unique(aut_geom_report$valid == 2))){
print("Fixing broken geometry")
auto <- clgeo_Clean(auto)
}
#intersection between auto and manual
inter <- raster::intersect(auto, manual)
inter <- inter[,c(4,1,2,6,3,5)] # rearrange, and drop duplicate colums
colnames(inter@data)[colnames(inter@data)=="plotID.1"] <- "plot.ID" # clean up colnames
# calculate intersection area
inter$int.area <- area(inter)
for (i in 1:length(inter@polygons)){
inter@polygons[[i]]@ID <- as.character(i)
}
inter <- gBuffer(inter, byid=TRUE, width=0)
inter$int.man <- inter$int.area/inter$man.area # ratio of intersected area to manual area
inter$int.aut <- inter$int.area/inter$aut.area # ratio of intersected area to auto area
#determine number of true positives
#currently this will just classify an intersection as a true positive (1) or a miss (0)
inter$TP <- ifelse(inter$int.man >= 0.5 & inter$int.aut >= 0.5, 1, 0)
inter@data$TP <- as.factor(inter@data$TP)
#compile data frame with slots for TP count
tpX <- as.data.frame(inter@data %>%
group_by(plot.ID, TP)%>%
tally())
tpX$TP <- as.numeric(as.character(tpX$TP))
#this unfortunate chunk of code is for the circumstances when a plot as no TP
for(i in 1:NROW(tpX)){
if(length(tpX[tpX$TP[[i]] == 1])== 0){
tpX[i, c(2:3)] <- c(1,0)}
}
TP_agg <- as.data.frame(tpX %>%
group_by(plot.ID, TP, n) %>%
tally())
TP_agg <- TP_agg[order(TP_agg$plot.ID, TP_agg$n, decreasing = T),]
TP_agg <- TP_agg[!duplicated(TP_agg$plot.ID),c(1:3)]
TP_agg <- TP_agg[,c(1,3)]
names(TP_agg) <- c("plotID", "TP")
#compile data.frame with counts of manual crowns per plot
man_agg <- as.data.frame(count(manual@data, plotID))
names(man_agg) <- c("plotID", "MC")
#compile data.frames with counts of auto crowns per plot
aut_agg <- as.data.frame(count(auto@data, plotID))
names(aut_agg) <- c("plotID", "AC")
# merge to one data.frame
CrownCount <- merge(TP_agg, man_agg, by = "plotID")
CrownCount <- merge(CrownCount, aut_agg, by = "plotID")
#add new column for "true positive accuracy
# simply the ratio of true positives to manually delineated crowns
CrownCount$accuracy <- CrownCount$TP/CrownCount$MC
CrownCount$plotID <- as.integer(as.character(CrownCount$plotID))
CrownCount <- CrownCount[order(CrownCount$plotID), ]
#Overall_Accuracy <- (sum(CrownCount$TP))/(sum(CrownCount$CC)) # this should likely just be left as a post function analysis
return(CrownCount) ## Can easily change this to return a table to look at individual plots
}
## delineation method: simple watershed
# Cite:
## build empty lists before loop -- these will be populated with iteration, parameters, and accuracy score
iterate <- c() #necessary? counts rep
TH_Tree <- c()
EXT <- c()
TOL <- c()
#-error
nMC <-c() #number of manual crowns
nAC <-c() # number of automatic crowns
Acc <-c() #overall accuracy
#plot 1-15 accuracies
P1 <-c()
P2 <-c()
P3 <-c()
P4 <-c()
P5 <-c()
P6 <-c()
P7 <-c()
P8 <-c()
P9 <-c()
P10 <-c()
P11 <-c()
P12 <-c()
P13 <-c()
P14 <-c()
P15 <-c()
for (i in 1:max_iteration){
print(i)
##define parameter variablity
thTree <-signif(runif(1,2,10),4)
ext <-sample(1:3,1)
tol <-signif(runif(1,0.001,1),4)
#________________________________________________________________
ws_crowns <- lastrees(las, watershed(chm = chm, th_tree = thTree, tol = tol, ext = ext))
auto <- tree_hulls(ws_crowns, type = "concave")
#_________________________________________________________________
##Error
CrownCount<- accuracy_assessment(auto, manual,plots); print (CrownCount)
Overall_Accuracy <- (sum(CrownCount$TP))/(sum(CrownCount$MC))
Nmc <- sum(CrownCount$MC)
Nac <- sum(CrownCount$AC)
rownames(CrownCount)<- paste0("P",CrownCount$plotID)
CCtrans <- as.data.frame(t(as.matrix(CrownCount)))
Acc[[i]] <- Overall_Accuracy
nMC[[i]] <- Nmc
nAC[[i]] <- Nac
if((length(CCtrans$P1) == 0)){P1[[i]]<-0}else {P1[[i]]<-CCtrans$P1[[5]]}
if((length(CCtrans$P2) == 0)){P2[[i]]<-0}else {P2[[i]]<-CCtrans$P2[[5]]}
if((length(CCtrans$P3) == 0)){P3[[i]]<-0}else {P3[[i]]<-CCtrans$P3[[5]]}
if((length(CCtrans$P4) == 0)){P4[[i]]<-0}else {P4[[i]]<-CCtrans$P4[[5]]}
if((length(CCtrans$P5) == 0)){P5[[i]]<-0}else {P5[[i]]<-CCtrans$P5[[5]]}
if((length(CCtrans$P6) == 0)){P6[[i]]<-0}else {P6[[i]]<-CCtrans$P6[[5]]}
if((length(CCtrans$P7) == 0)){P7[[i]]<-0}else {P7[[i]]<-CCtrans$P7[[5]]}
if((length(CCtrans$P8) == 0)){P8[[i]]<-0}else {P8[[i]]<-CCtrans$P8[[5]]}
if((length(CCtrans$P9) == 0)){P9[[i]]<-0}else {P9[[i]]<-CCtrans$P9[[5]]}
if((length(CCtrans$P10) == 0)){P10[[i]]<-0}else {P10[[i]]<-CCtrans$P10[[5]]}
if((length(CCtrans$P11) == 0)){P11[[i]]<-0}else {P11[[i]]<-CCtrans$P11[[5]]}
if((length(CCtrans$P12) == 0)){P12[[i]]<-0}else {P12[[i]]<-CCtrans$P12[[5]]}
if((length(CCtrans$P13) == 0)){P13[[i]]<-0}else {P13[[i]]<-CCtrans$P13[[5]]}
if((length(CCtrans$P14) == 0)){P14[[i]]<-0}else {P14[[i]]<-CCtrans$P14[[5]]}
if((length(CCtrans$P15) == 0)){P15[[i]]<-0}else {P15[[i]]<-CCtrans$P15[[5]]}
## compile parameters to predefined lists
iterate[[i]] <- i
TH_Tree[[i]] <- thTree
EXT[[i]] <- ext
TOL[[i]] <- tol
}
ws_tune <- as.data.frame(cbind(iterate, TH_Tree, EXT, TOL,Acc, nMC, nAC,
P1,P2,P3,P4,P5,P6,P7,P8,P9,
P10,P11,P12,P13,P14,P15))
write.csv(ws_tune, "simpleWS_tune_300buf.csv")
| /simplewatershed_tune.R | no_license | Jack-Hastings/Crown-Tuning-Masters-Thesis | R | false | false | 8,930 | r | #!/usr/bin/env Rscript
##The intent of this script is to 'tune' crown delineation parameters
## all parameters will be varied randomly withing set range
## and run 10000 times
## Author:Jack
## Start date: 2/21/19
#All methods are based on a seed/marker generated from lidR::tree_detection()
## All methods are from the lidR package:
# https://cran.r-project.org/web/packages/lidR/lidR.pdf
# https://github.com/Jean-Romain/lidR/wiki
## set seed so that the random parameter tuning is reproducible
# set.seed(1)
##Load in required packages
library(lidR)
library(raster)
library(rgdal)
library(sp)
library(rgeos)
library(sp)
library(maptools)
library(dplyr)
library(rgeos)
library(snow)
library(cleangeo)
max_iteration = 300 ## how many loops?
#load in CHM, LAS, and plots
chm <- raster("CHM_trim.tif")
##chm <- raster::focal(chm, w=matrix(1,3,3), fun=mean)
las <- readLAS("las_trim.las")
plots <- readOGR("20m_plots.shp"); names(plots) <- c("plotID")
#_____MANUAL CROWN PREP
manual <- readOGR("manual_crown_delineation_fixed_geom.shp")
man_geom_report <- clgeo_CollectionReport(manual)
if(length(unique(man_geom_report$valid == 2))){
manual <- clgeo_Clean(manual)
}
# clean it up a bit
manual@data$crownID <-NULL
manual$man.ID <- 1:length(manual) # give unique ID
manual$man.area <- area(manual) # calculate area
manual <- manual[,c(2,1,3)] # reorder my columns
# -- calculate manual centroid, and remove any polygons where the centroid falls outside plot bounds
man_cent <- gCentroid(manual, byid = T)
manIN <-over(man_cent, plots) # calculate if centroids fall WITHIN plot boundaries; this will also assign plot IDs
manIN$man.ID <-1:length(manIN$plotID) # make identifer column, with same name as manual to join on
manual@data <- dplyr::left_join(manual@data, manIN) ## joined
## now we need to remove the polygons outside the plot
manual <- manual[!is.na(manual@data$plotID),]
# remove manual excess -- left with just manual
rm(man_cent, manIN, man_geom_report); gc()
#
#
#
#
accuracy_assessment <- function(auto, manual, plots){
## Auto crowns will be spit out with just one column: Tree ID
auto$aut.ID <- 1:length(auto@data$treeID) # assign polygon ID
auto <- auto[,-1]# remove treeID -- useless?
if(identicalCRS(auto, manual) == FALSE){ # I think CRS will always be wrong -- this checks and corrects
auto <- spTransform(auto, crs(manual))
}
plot_buf <- gBuffer(plots, width = 3, byid = T)
aut_cent <- gCentroid(auto, byid = T)# calculate centroid
autIN <- over(aut_cent, plot_buf) #find centroids in polygons -- this also assigns plot ID
autIN$aut.ID <- 1:length(autIN$plotID) # assign polygon ID to be joined on.
auto@data <- dplyr::left_join(auto@data, autIN) # join centroid 'over' data with auto
auto <- auto[!is.na(auto@data$plotID),] # remove crowns that are not in plot
auto$aut.area <- area(auto) # calculate area of autos
row.names(auto@data)<-NULL
auto<- gBuffer(auto,byid = T, width = 0 )
aut_geom_report <- clgeo_CollectionReport(auto)
if (length(unique(aut_geom_report$valid == 2))){
print("Fixing broken geometry")
auto <- clgeo_Clean(auto)
}
#intersection between auto and manual
inter <- raster::intersect(auto, manual)
inter <- inter[,c(4,1,2,6,3,5)] # rearrange, and drop duplicate colums
colnames(inter@data)[colnames(inter@data)=="plotID.1"] <- "plot.ID" # clean up colnames
# calculate intersection area
inter$int.area <- area(inter)
for (i in 1:length(inter@polygons)){
inter@polygons[[i]]@ID <- as.character(i)
}
inter <- gBuffer(inter, byid=TRUE, width=0)
inter$int.man <- inter$int.area/inter$man.area # ratio of intersected area to manual area
inter$int.aut <- inter$int.area/inter$aut.area # ratio of intersected area to auto area
#determine number of true positives
#currently this will just classify an intersection as a true positive (1) or a miss (0)
inter$TP <- ifelse(inter$int.man >= 0.5 & inter$int.aut >= 0.5, 1, 0)
inter@data$TP <- as.factor(inter@data$TP)
#compile data frame with slots for TP count
tpX <- as.data.frame(inter@data %>%
group_by(plot.ID, TP)%>%
tally())
tpX$TP <- as.numeric(as.character(tpX$TP))
#this unfortunate chunk of code is for the circumstances when a plot as no TP
for(i in 1:NROW(tpX)){
if(length(tpX[tpX$TP[[i]] == 1])== 0){
tpX[i, c(2:3)] <- c(1,0)}
}
TP_agg <- as.data.frame(tpX %>%
group_by(plot.ID, TP, n) %>%
tally())
TP_agg <- TP_agg[order(TP_agg$plot.ID, TP_agg$n, decreasing = T),]
TP_agg <- TP_agg[!duplicated(TP_agg$plot.ID),c(1:3)]
TP_agg <- TP_agg[,c(1,3)]
names(TP_agg) <- c("plotID", "TP")
#compile data.frame with counts of manual crowns per plot
man_agg <- as.data.frame(count(manual@data, plotID))
names(man_agg) <- c("plotID", "MC")
#compile data.frames with counts of auto crowns per plot
aut_agg <- as.data.frame(count(auto@data, plotID))
names(aut_agg) <- c("plotID", "AC")
# merge to one data.frame
CrownCount <- merge(TP_agg, man_agg, by = "plotID")
CrownCount <- merge(CrownCount, aut_agg, by = "plotID")
#add new column for "true positive accuracy
# simply the ratio of true positives to manually delineated crowns
CrownCount$accuracy <- CrownCount$TP/CrownCount$MC
CrownCount$plotID <- as.integer(as.character(CrownCount$plotID))
CrownCount <- CrownCount[order(CrownCount$plotID), ]
#Overall_Accuracy <- (sum(CrownCount$TP))/(sum(CrownCount$CC)) # this should likely just be left as a post function analysis
return(CrownCount) ## Can easily change this to return a table to look at individual plots
}
## delineation method: simple watershed
# Cite:
## build empty lists before loop -- these will be populated with iteration, parameters, and accuracy score
iterate <- c() #necessary? counts rep
TH_Tree <- c()
EXT <- c()
TOL <- c()
#-error
nMC <-c() #number of manual crowns
nAC <-c() # number of automatic crowns
Acc <-c() #overall accuracy
#plot 1-15 accuracies
P1 <-c()
P2 <-c()
P3 <-c()
P4 <-c()
P5 <-c()
P6 <-c()
P7 <-c()
P8 <-c()
P9 <-c()
P10 <-c()
P11 <-c()
P12 <-c()
P13 <-c()
P14 <-c()
P15 <-c()
for (i in 1:max_iteration){
print(i)
##define parameter variablity
thTree <-signif(runif(1,2,10),4)
ext <-sample(1:3,1)
tol <-signif(runif(1,0.001,1),4)
#________________________________________________________________
ws_crowns <- lastrees(las, watershed(chm = chm, th_tree = thTree, tol = tol, ext = ext))
auto <- tree_hulls(ws_crowns, type = "concave")
#_________________________________________________________________
##Error
CrownCount<- accuracy_assessment(auto, manual,plots); print (CrownCount)
Overall_Accuracy <- (sum(CrownCount$TP))/(sum(CrownCount$MC))
Nmc <- sum(CrownCount$MC)
Nac <- sum(CrownCount$AC)
rownames(CrownCount)<- paste0("P",CrownCount$plotID)
CCtrans <- as.data.frame(t(as.matrix(CrownCount)))
Acc[[i]] <- Overall_Accuracy
nMC[[i]] <- Nmc
nAC[[i]] <- Nac
if((length(CCtrans$P1) == 0)){P1[[i]]<-0}else {P1[[i]]<-CCtrans$P1[[5]]}
if((length(CCtrans$P2) == 0)){P2[[i]]<-0}else {P2[[i]]<-CCtrans$P2[[5]]}
if((length(CCtrans$P3) == 0)){P3[[i]]<-0}else {P3[[i]]<-CCtrans$P3[[5]]}
if((length(CCtrans$P4) == 0)){P4[[i]]<-0}else {P4[[i]]<-CCtrans$P4[[5]]}
if((length(CCtrans$P5) == 0)){P5[[i]]<-0}else {P5[[i]]<-CCtrans$P5[[5]]}
if((length(CCtrans$P6) == 0)){P6[[i]]<-0}else {P6[[i]]<-CCtrans$P6[[5]]}
if((length(CCtrans$P7) == 0)){P7[[i]]<-0}else {P7[[i]]<-CCtrans$P7[[5]]}
if((length(CCtrans$P8) == 0)){P8[[i]]<-0}else {P8[[i]]<-CCtrans$P8[[5]]}
if((length(CCtrans$P9) == 0)){P9[[i]]<-0}else {P9[[i]]<-CCtrans$P9[[5]]}
if((length(CCtrans$P10) == 0)){P10[[i]]<-0}else {P10[[i]]<-CCtrans$P10[[5]]}
if((length(CCtrans$P11) == 0)){P11[[i]]<-0}else {P11[[i]]<-CCtrans$P11[[5]]}
if((length(CCtrans$P12) == 0)){P12[[i]]<-0}else {P12[[i]]<-CCtrans$P12[[5]]}
if((length(CCtrans$P13) == 0)){P13[[i]]<-0}else {P13[[i]]<-CCtrans$P13[[5]]}
if((length(CCtrans$P14) == 0)){P14[[i]]<-0}else {P14[[i]]<-CCtrans$P14[[5]]}
if((length(CCtrans$P15) == 0)){P15[[i]]<-0}else {P15[[i]]<-CCtrans$P15[[5]]}
## compile parameters to predefined lists
iterate[[i]] <- i
TH_Tree[[i]] <- thTree
EXT[[i]] <- ext
TOL[[i]] <- tol
}
ws_tune <- as.data.frame(cbind(iterate, TH_Tree, EXT, TOL,Acc, nMC, nAC,
P1,P2,P3,P4,P5,P6,P7,P8,P9,
P10,P11,P12,P13,P14,P15))
write.csv(ws_tune, "simpleWS_tune_300buf.csv")
|
# Process Picarro data for Peyton's DWP lab experiment
# Ben Bond-Lamberty April 2015
source("0-functions.R")
SCRIPTNAME <- "3-fluxes.R"
summarydata <- file.path(OUTPUT_DIR, "summarydata.csv") # output from script 2
# ==============================================================================
# Main
sink(file.path(outputdir(), paste0(SCRIPTNAME, ".log.txt")), split=T) # open log
printlog("Welcome to", SCRIPTNAME)
printlog("Reading in summary data...")
fluxdata <- read_csv(summarydata)
print_dims(fluxdata)
printlog("Computing fluxes...")
# At this point, `fluxdata` has min and max CO2/CH4 concentrations,
# and the times those occured at. Computing a slope (ppm or ppb/s) is thus easy.
# We want to convert this to mg C/g soil/s, using
# A = dC/dt * V/M * Pa/RT (cf. Steduto et al. 2002), where
# A is the flux (µmol/g/s)
# dC/dt is raw respiration as above (mole fraction/s)
# V is total chamber volume (cm3)
# M is [dry] soil mass (g)
# Pa is atmospheric pressure (kPa)
# R is universal gas constant (8.3 x 10^3 cm3 kPa mol-1 K-1)
# T is air temperature (K)
# The instrument tubing is 455 cm long by ID 1/16"
V_tubing <- (1/16 * 2.54 / 2 ) ^ 2 * pi * 455
# Headspace on the core is 7.3 cm diameter by 4 cm height.
V_headspace <- fluxdata$HEADSPACE_VOL_CM3
# Internal volume of Picarro?
V_picarro <- 9 # Assume same as PP-Systems
V_cm3 <- V_tubing + V_headspace + V_picarro
Pa <- 101 # kPa (Richland is ~120 m asl)
R <- 8.3145e+3 # cm3 kPa K−1 mol−1
Tair <- 273.1 + 20 # unknown
m_CO2 <- with(fluxdata, (max_CO2 - min_CO2) / (max_CO2_time - min_CO2_time)) # ppm/s
m_CH4 <- with(fluxdata, (max_CH4 - min_CH4) / (max_CH4_time - min_CH4_time)) # ppb/s
fluxdata$V_cm3 <- V_cm3
# Calculate mass-corrected respiration, µmol/g soil/s
fluxdata$CO2_flux_umol_g_s <- m_CO2 / 1 * # from ppm/s to µmol/s
V_cm3 / fluxdata$DRYWT_SOIL_G * Pa / (R * Tair) # ideal gas law
fluxdata$CH4_flux_umol_g_s <- m_CH4 / 1000 * # from ppb/s to µmol/s
V_cm3 / fluxdata$DRYWT_SOIL_G * Pa / (R * Tair) # ideal gas law
# Calculate total flux of mg C/s
fluxdata$CO2_flux_mgC_hr <- with(fluxdata, CO2_flux_umol_g_s * DRYWT_SOIL_G) / # get rid of /g soil
1e6 * # to mol
12 * # to g C
1000 * # to mg C
60 * 60 # to /hr
fluxdata$CH4_flux_mgC_hr <- with(fluxdata, CH4_flux_umol_g_s * DRYWT_SOIL_G) / # get rid of /g soil
1e6 * # to mol
16 * # to g C
1000 * # to mg C
60 * 60 # to /hr
# Compute cumulative C respired
printlog("Computing cumulative C respired...")
fd_notcum <- filter(fluxdata, elapsed_minutes < 0.0)
fluxdata <- fluxdata %>%
filter(elapsed_minutes >= 0.0) %>%
# Interpolate missing flux values
mutate(CO2_flux_mgC_hr_interp = approx(elapsed_minutes, CO2_flux_mgC_hr, xout = elapsed_minutes, rule = 2)$y,
CH4_flux_mgC_hr_interp = approx(elapsed_minutes, CH4_flux_mgC_hr, xout = elapsed_minutes, rule = 2)$y) %>%
group_by(CORE, WETTING, MOISTURE, STRUCTURE) %>%
arrange(elapsed_minutes) %>%
mutate(deltahrs = (elapsed_minutes - lag(elapsed_minutes)) / 60,
CO2_flux_mgC = CO2_flux_mgC_hr_interp * deltahrs,
cumCO2_flux_mgC = c(0, cumsum(CO2_flux_mgC[-1])),
CH4_flux_mgC = CH4_flux_mgC_hr_interp * deltahrs,
cumCH4_flux_mgC = c(0, cumsum(CH4_flux_mgC[-1]))
) %>%
bind_rows(fd_notcum) %>%
select(-CO2_flux_mgC, -CH4_flux_mgC, -STARTDATETIME, -deltahrs) %>%
arrange(STARTDATE, CORE, WETTING, MOISTURE, STRUCTURE, elapsed_minutes)
#fluxdata <- fluxdata[complete.cases(fluxdata),]
save_data(fluxdata, scriptfolder=FALSE)
printlog("All done with", SCRIPTNAME)
print(sessionInfo())
sink() # close log
| /3-fluxes.R | no_license | janefudyma/dwp_peyton | R | false | false | 3,649 | r | # Process Picarro data for Peyton's DWP lab experiment
# Ben Bond-Lamberty April 2015
source("0-functions.R")
SCRIPTNAME <- "3-fluxes.R"
summarydata <- file.path(OUTPUT_DIR, "summarydata.csv") # output from script 2
# ==============================================================================
# Main
sink(file.path(outputdir(), paste0(SCRIPTNAME, ".log.txt")), split=T) # open log
printlog("Welcome to", SCRIPTNAME)
printlog("Reading in summary data...")
fluxdata <- read_csv(summarydata)
print_dims(fluxdata)
printlog("Computing fluxes...")
# At this point, `fluxdata` has min and max CO2/CH4 concentrations,
# and the times those occured at. Computing a slope (ppm or ppb/s) is thus easy.
# We want to convert this to mg C/g soil/s, using
# A = dC/dt * V/M * Pa/RT (cf. Steduto et al. 2002), where
# A is the flux (µmol/g/s)
# dC/dt is raw respiration as above (mole fraction/s)
# V is total chamber volume (cm3)
# M is [dry] soil mass (g)
# Pa is atmospheric pressure (kPa)
# R is universal gas constant (8.3 x 10^3 cm3 kPa mol-1 K-1)
# T is air temperature (K)
# The instrument tubing is 455 cm long by ID 1/16"
V_tubing <- (1/16 * 2.54 / 2 ) ^ 2 * pi * 455
# Headspace on the core is 7.3 cm diameter by 4 cm height.
V_headspace <- fluxdata$HEADSPACE_VOL_CM3
# Internal volume of Picarro?
V_picarro <- 9 # Assume same as PP-Systems
V_cm3 <- V_tubing + V_headspace + V_picarro
Pa <- 101 # kPa (Richland is ~120 m asl)
R <- 8.3145e+3 # cm3 kPa K−1 mol−1
Tair <- 273.1 + 20 # unknown
m_CO2 <- with(fluxdata, (max_CO2 - min_CO2) / (max_CO2_time - min_CO2_time)) # ppm/s
m_CH4 <- with(fluxdata, (max_CH4 - min_CH4) / (max_CH4_time - min_CH4_time)) # ppb/s
fluxdata$V_cm3 <- V_cm3
# Calculate mass-corrected respiration, µmol/g soil/s
fluxdata$CO2_flux_umol_g_s <- m_CO2 / 1 * # from ppm/s to µmol/s
V_cm3 / fluxdata$DRYWT_SOIL_G * Pa / (R * Tair) # ideal gas law
fluxdata$CH4_flux_umol_g_s <- m_CH4 / 1000 * # from ppb/s to µmol/s
V_cm3 / fluxdata$DRYWT_SOIL_G * Pa / (R * Tair) # ideal gas law
# Calculate total flux of mg C/s
fluxdata$CO2_flux_mgC_hr <- with(fluxdata, CO2_flux_umol_g_s * DRYWT_SOIL_G) / # get rid of /g soil
1e6 * # to mol
12 * # to g C
1000 * # to mg C
60 * 60 # to /hr
fluxdata$CH4_flux_mgC_hr <- with(fluxdata, CH4_flux_umol_g_s * DRYWT_SOIL_G) / # get rid of /g soil
1e6 * # to mol
16 * # to g C
1000 * # to mg C
60 * 60 # to /hr
# Compute cumulative C respired
printlog("Computing cumulative C respired...")
fd_notcum <- filter(fluxdata, elapsed_minutes < 0.0)
fluxdata <- fluxdata %>%
filter(elapsed_minutes >= 0.0) %>%
# Interpolate missing flux values
mutate(CO2_flux_mgC_hr_interp = approx(elapsed_minutes, CO2_flux_mgC_hr, xout = elapsed_minutes, rule = 2)$y,
CH4_flux_mgC_hr_interp = approx(elapsed_minutes, CH4_flux_mgC_hr, xout = elapsed_minutes, rule = 2)$y) %>%
group_by(CORE, WETTING, MOISTURE, STRUCTURE) %>%
arrange(elapsed_minutes) %>%
mutate(deltahrs = (elapsed_minutes - lag(elapsed_minutes)) / 60,
CO2_flux_mgC = CO2_flux_mgC_hr_interp * deltahrs,
cumCO2_flux_mgC = c(0, cumsum(CO2_flux_mgC[-1])),
CH4_flux_mgC = CH4_flux_mgC_hr_interp * deltahrs,
cumCH4_flux_mgC = c(0, cumsum(CH4_flux_mgC[-1]))
) %>%
bind_rows(fd_notcum) %>%
select(-CO2_flux_mgC, -CH4_flux_mgC, -STARTDATETIME, -deltahrs) %>%
arrange(STARTDATE, CORE, WETTING, MOISTURE, STRUCTURE, elapsed_minutes)
#fluxdata <- fluxdata[complete.cases(fluxdata),]
save_data(fluxdata, scriptfolder=FALSE)
printlog("All done with", SCRIPTNAME)
print(sessionInfo())
sink() # close log
|
library(lpSolveAPI)
questionB4 <- make.lp(5,2)
set.column(questionB4,1,c(1,1,1,0,0))
set.column(questionB4,2,c(1,0,0,1,1))
set.constr.type(questionB4,c(">=",">=","<=",">=","<="))
set.rhs(questionB4,c(20,5,12,6,10))
set.objfn(questionB4,c(1,1))
lp.control(questionB4, sense="min")
questionB4
solve(questionB4)
get.objective(questionB4)
get.variables(questionB4)
plot(questionB4)
get.sensitivity.rhs(questionB4) | /questionB4.R | no_license | wilbertnw/UPH_OR_2018 | R | false | false | 436 | r | library(lpSolveAPI)
questionB4 <- make.lp(5,2)
set.column(questionB4,1,c(1,1,1,0,0))
set.column(questionB4,2,c(1,0,0,1,1))
set.constr.type(questionB4,c(">=",">=","<=",">=","<="))
set.rhs(questionB4,c(20,5,12,6,10))
set.objfn(questionB4,c(1,1))
lp.control(questionB4, sense="min")
questionB4
solve(questionB4)
get.objective(questionB4)
get.variables(questionB4)
plot(questionB4)
get.sensitivity.rhs(questionB4) |
subroutine qsbart(penalt,dofoff,xs,ys,ws,n,knot,nk,
coef,sz,lev,
crit,iparms,spar,parms,
isetup,
scrtch,
ld4,ldnk,ier)
implicit double precision(a-h,o-z)
integer n,nk,isetup,iparms(2),ld4,ldnk,ier
double precision penalt,dofoff,xs(n),ys(n),ws(n),
knot(nk+4),
coef(nk),sz(n),lev(n),
crit,spar,parms(3),
scrtch(1)# dimension (9+2*ld4+nk)*nk of reals
call sbart(penalt,dofoff,xs,ys,ws,n,knot,nk,
coef,sz,lev,
crit,iparms(1),spar,iparms(2),parms(1),parms(2),parms(3),
isetup,
scrtch(1),
scrtch(nk+1),scrtch(2*nk+1),scrtch(3*nk+1),scrtch(4*nk+1),
scrtch(5*nk+1),scrtch(6*nk+1),scrtch(7*nk+1),scrtch(8*nk+1),
scrtch(9*nk+1),scrtch(9*nk+ld4*nk+1),scrtch(9*nk+2*ld4*nk+1),
ld4,ldnk,ier)
return
end
| /src/qsbart.r | no_license | cran/ppr | R | false | false | 761 | r | subroutine qsbart(penalt,dofoff,xs,ys,ws,n,knot,nk,
coef,sz,lev,
crit,iparms,spar,parms,
isetup,
scrtch,
ld4,ldnk,ier)
implicit double precision(a-h,o-z)
integer n,nk,isetup,iparms(2),ld4,ldnk,ier
double precision penalt,dofoff,xs(n),ys(n),ws(n),
knot(nk+4),
coef(nk),sz(n),lev(n),
crit,spar,parms(3),
scrtch(1)# dimension (9+2*ld4+nk)*nk of reals
call sbart(penalt,dofoff,xs,ys,ws,n,knot,nk,
coef,sz,lev,
crit,iparms(1),spar,iparms(2),parms(1),parms(2),parms(3),
isetup,
scrtch(1),
scrtch(nk+1),scrtch(2*nk+1),scrtch(3*nk+1),scrtch(4*nk+1),
scrtch(5*nk+1),scrtch(6*nk+1),scrtch(7*nk+1),scrtch(8*nk+1),
scrtch(9*nk+1),scrtch(9*nk+ld4*nk+1),scrtch(9*nk+2*ld4*nk+1),
ld4,ldnk,ier)
return
end
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coef_diff_by_group.R
\name{coef_diff_by_group}
\alias{coef_diff_by_group}
\alias{coef_diff_by_group.breathteststangroupfit}
\title{Tabulates breath test parameter differences of groups from Stan group fit}
\usage{
coef_diff_by_group.breathteststangroupfit(fit, mcp_group = NULL,
reference_group = NULL, ...)
}
\arguments{
\item{fit}{Object of class \code{breathteststangroupfit} from \code{\link[breathteststan]{stan_fit}}}
\item{mcp_group}{Not used, always all pairs are compared}
\item{reference_group}{Not used}
\item{...}{Not used}
}
\value{
A \code{tibble} of class \code{coef_diff_by_group_stan} with columns
\describe{
\item{parameter}{Parameter of fit, e.g. \code{beta, k, m, t50}}
\item{method}{Method used to compute parameter. \code{exp_beta} refers to primary
fit parameters \code{beta, k, m}.}
\item{groups}{Which pairwise difference, e.g \code{solid - liquid}}
\item{estimate}{Point estimate (chain mean) of the difference}
\item{cred.low, cred.high}{Lower and upper 95 percent credible interval of difference.}
}
The chains of pairwise differences are returned as a attribute \code{chain}
for use in plotting. See example below how to use these to display difference histograms.
}
\description{
Given a Stan fit with grouping to 13C breath test curves,
computes point estimated and Bayesian credible intervals for all group pair
differences, for examples of the half emptying time \code{t50}.
}
\examples{
\donttest{
library(dplyr)
library(breathtestcore)
data("usz_13c", package = "breathtestcore")
data = usz_13c \%>\%
dplyr::filter( patient_id \%in\%
c("norm_001", "norm_002", "norm_003", "norm_004", "pat_001", "pat_002","pat_003")) \%>\%
cleanup_data()
fit = stan_group_fit(data, iter = 300, chains = 1) # Use more iterations!
cf = coef_diff_by_group(fit)
cc = attr(cf, "chain") \%>\%
filter(key == "t50_maes_ghoos", abs(diff) < 200) \%>\%
mutate(
groups = paste(group2, group1, sep = " - ")
)
str(cc)
if (require(ggplot2)) {
ggplot(cc, aes(x = diff)) + geom_histogram() + facet_wrap(~groups)
}
# For comparison
fit = nlme_fit(data)
coef_diff_by_group(fit)
}
}
| /man/coef_diff_by_group.Rd | no_license | bgoodri/breathteststan | R | false | true | 2,201 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coef_diff_by_group.R
\name{coef_diff_by_group}
\alias{coef_diff_by_group}
\alias{coef_diff_by_group.breathteststangroupfit}
\title{Tabulates breath test parameter differences of groups from Stan group fit}
\usage{
coef_diff_by_group.breathteststangroupfit(fit, mcp_group = NULL,
reference_group = NULL, ...)
}
\arguments{
\item{fit}{Object of class \code{breathteststangroupfit} from \code{\link[breathteststan]{stan_fit}}}
\item{mcp_group}{Not used, always all pairs are compared}
\item{reference_group}{Not used}
\item{...}{Not used}
}
\value{
A \code{tibble} of class \code{coef_diff_by_group_stan} with columns
\describe{
\item{parameter}{Parameter of fit, e.g. \code{beta, k, m, t50}}
\item{method}{Method used to compute parameter. \code{exp_beta} refers to primary
fit parameters \code{beta, k, m}.}
\item{groups}{Which pairwise difference, e.g \code{solid - liquid}}
\item{estimate}{Point estimate (chain mean) of the difference}
\item{cred.low, cred.high}{Lower and upper 95 percent credible interval of difference.}
}
The chains of pairwise differences are returned as a attribute \code{chain}
for use in plotting. See example below how to use these to display difference histograms.
}
\description{
Given a Stan fit with grouping to 13C breath test curves,
computes point estimated and Bayesian credible intervals for all group pair
differences, for examples of the half emptying time \code{t50}.
}
\examples{
\donttest{
library(dplyr)
library(breathtestcore)
data("usz_13c", package = "breathtestcore")
data = usz_13c \%>\%
dplyr::filter( patient_id \%in\%
c("norm_001", "norm_002", "norm_003", "norm_004", "pat_001", "pat_002","pat_003")) \%>\%
cleanup_data()
fit = stan_group_fit(data, iter = 300, chains = 1) # Use more iterations!
cf = coef_diff_by_group(fit)
cc = attr(cf, "chain") \%>\%
filter(key == "t50_maes_ghoos", abs(diff) < 200) \%>\%
mutate(
groups = paste(group2, group1, sep = " - ")
)
str(cc)
if (require(ggplot2)) {
ggplot(cc, aes(x = diff)) + geom_histogram() + facet_wrap(~groups)
}
# For comparison
fit = nlme_fit(data)
coef_diff_by_group(fit)
}
}
|
#######################
##Risk measure - DP
##Following Jiang et. al 2015
#######################
library(FactoMineR)
library(VGAM)
numObs = 100
numVar = 1000
beta = c(0.5, 3)
x = rnorm(numObs, mean = 2, sd = 1)
y = matrix(NA, nrow = numObs, ncol = numVar)
for(i in 1:ncol(y)){
y[, i] = beta[1] + beta[2]*x + rnorm(numObs)
}
#####
## Laplace Simulation
#####
d = c(2, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90,
99, 100, 101, 200, 300, 400, 500, 600, 700, 800, 900, 1000)
numSim = 100
resultsR = vector("list", length(d))
resultsE = vector("list", length(d))
for(a in 1:length(d)){
DF = y[ , 1:d[a]]
DFE = DF
epsilon = seq(0.01, 1, 0.09)
resultsR[[a]] = matrix(NA, nrow = d[a], ncol = length(epsilon))
resultsE[[a]] = matrix(NA, nrow = d[a], ncol = length(epsilon))
for(b in 1:length(epsilon)){
loadsR = rep(NA, d[a])
loadsE = rep(NA, d[a])
for(i in 1:ncol(DF)){
DFE[, i] = DF[, i] + rlaplace(numObs, 0, (3*d[a]) / epsilon[b] )
}
boo = PCA(DF, graph = F)
foo = PCA(DFE, graph = F)
loadsR = dimdesc(boo, axes = 1, proba = 0.05)$Dim.1$quanti[, 1]
loadsE = dimdesc(foo, axes = 1, proba = 1)$Dim.1$quanti[, 1]
for(c in 2:numSim){
for(i in 1:ncol(DF)){
DFE[, i] = DF[, i] + rlaplace(numObs, 0, (3*d[a]) / epsilon[b] )
}
foo = PCA(DFE, graph = F)
loadsE = (loadsE * (c - 1) + dimdesc(foo, axes = 1, proba = 1)$Dim.1$quanti[, 1]) / c
}
resultsR[[a]][, b] = loadsR
resultsE[[a]][, b] = loadsE
cat(b)
}
cat(a, "\n")
}
#boxplot(c(resultsR[[c]]))
#boxplot(c(resultsE[[c]]))
par(mfcol = c(2, 6))
for(c in 17:22){
plot(epsilon, colMeans(resultsR[[c]]), type = "b", main = paste("dim =", d[c]),
ylab = "mean correlation with factor")
plot(epsilon, colMeans(resultsE[[c]]), type = "b", main = paste("dim =", d[c]),
ylab = "mean correlation with factor")
}
#factanal(DF, 1)
#factanal(DFE, 1)
#####
## Gaussian Simulation
#####
d = c(2, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90,
99, 100, 101, 200, 300, 400, 500, 600, 700, 800, 900, 1000)
numSim = 100
resultsR = vector("list", length(d))
resultsE = vector("list", length(d))
for(a in 1:length(d)){
DF = y[ , 1:d[a]]
DFE = DF
epsilon = seq(0.01, 1, 0.09)
resultsR[[a]] = matrix(NA, nrow = d[a], ncol = length(epsilon))
resultsE[[a]] = matrix(NA, nrow = d[a], ncol = length(epsilon))
for(b in 1:length(epsilon)){
loadsR = rep(NA, d[a])
loadsE = rep(NA, d[a])
for(i in 1:ncol(DF)){
DFE[, i] = DF[, i] + rnorm(numObs, 0, (10.5 * sqrt(d[a])) / epsilon[b] )
}
boo = PCA(DF, graph = F)
foo = PCA(DFE, graph = F)
loadsR = dimdesc(boo, axes = 1, proba = 0.05)$Dim.1$quanti[, 1]
loadsE = dimdesc(foo, axes = 1, proba = 1)$Dim.1$quanti[, 1]
for(c in 2:numSim){
for(i in 1:ncol(DF)){
DFE[, i] = DF[, i] + rnorm(numObs, 0, (10.5 * sqrt(d[a])) / epsilon[b] )
}
foo = PCA(DFE, graph = F)
loadsE = (loadsE * (c - 1) + dimdesc(foo, axes = 1, proba = 1)$Dim.1$quanti[, 1]) / c
}
resultsR[[a]][, b] = loadsR
resultsE[[a]][, b] = loadsE
cat(b)
}
cat(a, "\n")
}
par(mfcol = c(2, 5))
for(c in 1:5){
plot(epsilon, colMeans(resultsR[[c]]), type = "b", main = paste("dim =", d[c]),
ylab = "mean correlation with factor")
plot(epsilon, colMeans(resultsE[[c]]), type = "b", main = paste("dim =", d[c]),
ylab = "mean correlation with factor")
}
#####
## Decomp Simulation
#####
x = mvrnorm(numObs, mu = rep(2, 2), Sigma = matrix(c(1, 0, 0, 1), nrow = 2) )
y = matrix(NA, nrow = numObs, ncol = numVar)
for(i in 1:ncol(y)){
if(runif(1) > 0.5)
y[, i] = beta[1] + beta[2]*x[, 1] + rnorm(numObs)
else
y[, i] = beta[2] + beta[1]*x[, 2] + rnorm(numObs)
}
d = c(2, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90,
99, 100, 101, 200, 300, 400, 500, 600, 700, 800, 900, 1000)
numSim = 10
resultsR = vector("list", length(d))
resultsE = vector("list", length(d))
for(a in 1:length(d)){
DF = y[ , 1:d[a]]
origSVD = svd(DF)
gsU = c(sqrt(numObs) * 3 * sqrt(d[a]) / (origSVD$d[1] - origSVD$d[2]))
gsLam = sqrt(1) * 3 * sqrt(d[a])
epsilon = seq(0.01, 1, 0.09)
resultsR[[a]] = matrix(NA, nrow = d[a], ncol = length(epsilon))
resultsE[[a]] = matrix(NA, nrow = d[a], ncol = length(epsilon))
for(b in 1:length(epsilon)){
loadsR = rep(NA, d[a])
loadsE = rep(NA, d[a])
noiseU = matrix(NA, nrow = numObs, ncol = 2)
noiseLam = rep(NA, 2)
for(i in 1:2){
noiseU[, i] = origSVD$u[, i, drop = F] + rlaplace(numObs, scale = gsU / epsilon[b] )
noiseLam[i] = origSVD$d[i] + rlaplace(1, scale = gsLam / epsilon[b] )
}
tmp = qr(noiseU)
orthU = qr.Q(tmp, complete = FALSE)
DFE = orthU %*% diag(noiseLam, 2) %*% origSVD$v[1:2, ]
foo = PCA(DFE, graph = F)
boo = PCA(DF, graph = F)
loadsR = c(dimdesc(boo, axes = 1, proba = 1)$Dim.1$quanti[, 1],
dimdesc(boo, axes = 1:2, proba = 1)$Dim.2$quanti[, 1])
loadsE = c(dimdesc(foo, axes = 1, proba = 1)$Dim.1$quanti[, 1],
dimdesc(foo, axes = 1:2, proba = 1)$Dim.2$quanti[, 1])
for(c in 2:numSim){
noiseU = matrix(NA, nrow = numObs, ncol = 2)
noiseLam = rep(NA, 2)
for(i in 1:2){
noiseU[, i] = origSVD$u[, i, drop = F] + rlaplace(numObs, scale = gsU / epsilon[b] )
noiseLam[i] = origSVD$d[i] + rlaplace(1, scale = gsLam / epsilon[b] )
}
tmp = qr(noiseU)
orthU = qr.Q(tmp, complete = FALSE)
DFE = orthU %*% diag(noiseLam, 2) %*% origSVD$v[1:2, ]
foo = PCA(DFE, graph = F)
loadsE = (loadsE * (c - 1) + dimdesc(foo, axes = 1, proba = 1)$Dim.1$quanti[, 1]) / c
}
resultsR[[a]][, b] = loadsR
resultsE[[a]][, b] = loadsE
cat(b)
}
cat(a, "\n")
}
par(mfcol = c(2, 6))
for(c in 1:6){
plot(epsilon, colMeans(resultsR[[c]]), type = "b", main = paste("dim =", d[c]),
ylab = "mean correlation with factor")
plot(epsilon, colMeans(resultsE[[c]]), type = "b", main = paste("dim =", d[c]),
ylab = "mean correlation with factor")
}
#####
##
#####
epsilon = 0.1
noiseL = matrix(NA, nrow = d, ncol = d)
noiseL[upper.tri(noiseL, diag = T)] = rlaplace( ((d^2 + d) / 2), 0, (2 * d) / (numObs*epsilon) )
for(i in 1:d){
for(j in 1:d){
noiseL[j, i] = noiseL[i, j]
}
}
noiseCOV = (t(XDF) %*% XDF) / numObs + noiseL
noiseDecomp = eigen(noiseCOV)
noiseX = origSVD$u %*% diag(noiseDecomp$values) %*% t(noiseDecomp$vectors)
#eigen(cov(XDF))
#eigen(noiseCOV)
origPCA = princomp(XDF)
cor(origPCA$scores[,1], x1)
## wishart method
noiseCOV2 = rWishart(1, d, C)[, , 1] + cov(XDF)
noisePCA = princomp(covmat = noiseCOV2)
##### make orthogonal
setEign = 3 / (2 * numObs * epsilon)
tmp <- rnorm(2)
tmp.qr <- qr(tmp)
tmp.complete <- qr.Q(tmp.qr, complete=TRUE)
C = tmp.complete %*% diag(x = setEign, 2) %*% t(tmp.complete)
#############
scaleData = scale(XDF)
origSVD = svd(scaleData)
gsU = c(sqrt(numObs) / abs((origSVD$d[1] - origSVD$d[2])))
gsLam = sqrt(2*ncol(scaleData))
epsilon = 1
noiseU = origSVD$u[, 2:3, drop = F] + rlaplace(numObs, scale = gsU / epsilon )
noiseLam = origSVD$d[2:3] + rlaplace(1, scale = gsLam / epsilon )
tmp = qr(noiseU)
orthU = qr.Q(tmp, complete=TRUE)[,1:2]
summary(orthU %*% diag(noiseLam, 2) %*% t(origSVD$v[, 2:3, drop = F]))
summary(origSVD$u[, 2:3] %*% diag(origSVD$d[2:3], 2) %*% t(origSVD$v[, 2:3]))
plot(orthU %*% diag(noiseLam, 2) %*% t(origSVD$v[, 2:3, drop = F]))
plot(origSVD$u[, 2:3] %*% diag(origSVD$d[2:3], 2) %*% t(origSVD$v[, 2:3]))
cor(orthU %*% diag(noiseLam, 2) %*% t(origSVD$v[, 2:3, drop = F]))
cor(origSVD$u[, 2:3] %*% diag(origSVD$d[2:3], 2) %*% t(origSVD$v[, 2:3]))
| /dpPCA.R | no_license | jsnoke/altman_HD | R | false | false | 8,274 | r | #######################
##Risk measure - DP
##Following Jiang et. al 2015
#######################
library(FactoMineR)
library(VGAM)
numObs = 100
numVar = 1000
beta = c(0.5, 3)
x = rnorm(numObs, mean = 2, sd = 1)
y = matrix(NA, nrow = numObs, ncol = numVar)
for(i in 1:ncol(y)){
y[, i] = beta[1] + beta[2]*x + rnorm(numObs)
}
#####
## Laplace Simulation
#####
d = c(2, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90,
99, 100, 101, 200, 300, 400, 500, 600, 700, 800, 900, 1000)
numSim = 100
resultsR = vector("list", length(d))
resultsE = vector("list", length(d))
for(a in 1:length(d)){
DF = y[ , 1:d[a]]
DFE = DF
epsilon = seq(0.01, 1, 0.09)
resultsR[[a]] = matrix(NA, nrow = d[a], ncol = length(epsilon))
resultsE[[a]] = matrix(NA, nrow = d[a], ncol = length(epsilon))
for(b in 1:length(epsilon)){
loadsR = rep(NA, d[a])
loadsE = rep(NA, d[a])
for(i in 1:ncol(DF)){
DFE[, i] = DF[, i] + rlaplace(numObs, 0, (3*d[a]) / epsilon[b] )
}
boo = PCA(DF, graph = F)
foo = PCA(DFE, graph = F)
loadsR = dimdesc(boo, axes = 1, proba = 0.05)$Dim.1$quanti[, 1]
loadsE = dimdesc(foo, axes = 1, proba = 1)$Dim.1$quanti[, 1]
for(c in 2:numSim){
for(i in 1:ncol(DF)){
DFE[, i] = DF[, i] + rlaplace(numObs, 0, (3*d[a]) / epsilon[b] )
}
foo = PCA(DFE, graph = F)
loadsE = (loadsE * (c - 1) + dimdesc(foo, axes = 1, proba = 1)$Dim.1$quanti[, 1]) / c
}
resultsR[[a]][, b] = loadsR
resultsE[[a]][, b] = loadsE
cat(b)
}
cat(a, "\n")
}
#boxplot(c(resultsR[[c]]))
#boxplot(c(resultsE[[c]]))
par(mfcol = c(2, 6))
for(c in 17:22){
plot(epsilon, colMeans(resultsR[[c]]), type = "b", main = paste("dim =", d[c]),
ylab = "mean correlation with factor")
plot(epsilon, colMeans(resultsE[[c]]), type = "b", main = paste("dim =", d[c]),
ylab = "mean correlation with factor")
}
#factanal(DF, 1)
#factanal(DFE, 1)
#####
## Gaussian Simulation
#####
d = c(2, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90,
99, 100, 101, 200, 300, 400, 500, 600, 700, 800, 900, 1000)
numSim = 100
resultsR = vector("list", length(d))
resultsE = vector("list", length(d))
for(a in 1:length(d)){
DF = y[ , 1:d[a]]
DFE = DF
epsilon = seq(0.01, 1, 0.09)
resultsR[[a]] = matrix(NA, nrow = d[a], ncol = length(epsilon))
resultsE[[a]] = matrix(NA, nrow = d[a], ncol = length(epsilon))
for(b in 1:length(epsilon)){
loadsR = rep(NA, d[a])
loadsE = rep(NA, d[a])
for(i in 1:ncol(DF)){
DFE[, i] = DF[, i] + rnorm(numObs, 0, (10.5 * sqrt(d[a])) / epsilon[b] )
}
boo = PCA(DF, graph = F)
foo = PCA(DFE, graph = F)
loadsR = dimdesc(boo, axes = 1, proba = 0.05)$Dim.1$quanti[, 1]
loadsE = dimdesc(foo, axes = 1, proba = 1)$Dim.1$quanti[, 1]
for(c in 2:numSim){
for(i in 1:ncol(DF)){
DFE[, i] = DF[, i] + rnorm(numObs, 0, (10.5 * sqrt(d[a])) / epsilon[b] )
}
foo = PCA(DFE, graph = F)
loadsE = (loadsE * (c - 1) + dimdesc(foo, axes = 1, proba = 1)$Dim.1$quanti[, 1]) / c
}
resultsR[[a]][, b] = loadsR
resultsE[[a]][, b] = loadsE
cat(b)
}
cat(a, "\n")
}
par(mfcol = c(2, 5))
for(c in 1:5){
plot(epsilon, colMeans(resultsR[[c]]), type = "b", main = paste("dim =", d[c]),
ylab = "mean correlation with factor")
plot(epsilon, colMeans(resultsE[[c]]), type = "b", main = paste("dim =", d[c]),
ylab = "mean correlation with factor")
}
#####
## Decomp Simulation
#####
x = mvrnorm(numObs, mu = rep(2, 2), Sigma = matrix(c(1, 0, 0, 1), nrow = 2) )
y = matrix(NA, nrow = numObs, ncol = numVar)
for(i in 1:ncol(y)){
if(runif(1) > 0.5)
y[, i] = beta[1] + beta[2]*x[, 1] + rnorm(numObs)
else
y[, i] = beta[2] + beta[1]*x[, 2] + rnorm(numObs)
}
d = c(2, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90,
99, 100, 101, 200, 300, 400, 500, 600, 700, 800, 900, 1000)
numSim = 10
resultsR = vector("list", length(d))
resultsE = vector("list", length(d))
for(a in 1:length(d)){
DF = y[ , 1:d[a]]
origSVD = svd(DF)
gsU = c(sqrt(numObs) * 3 * sqrt(d[a]) / (origSVD$d[1] - origSVD$d[2]))
gsLam = sqrt(1) * 3 * sqrt(d[a])
epsilon = seq(0.01, 1, 0.09)
resultsR[[a]] = matrix(NA, nrow = d[a], ncol = length(epsilon))
resultsE[[a]] = matrix(NA, nrow = d[a], ncol = length(epsilon))
for(b in 1:length(epsilon)){
loadsR = rep(NA, d[a])
loadsE = rep(NA, d[a])
noiseU = matrix(NA, nrow = numObs, ncol = 2)
noiseLam = rep(NA, 2)
for(i in 1:2){
noiseU[, i] = origSVD$u[, i, drop = F] + rlaplace(numObs, scale = gsU / epsilon[b] )
noiseLam[i] = origSVD$d[i] + rlaplace(1, scale = gsLam / epsilon[b] )
}
tmp = qr(noiseU)
orthU = qr.Q(tmp, complete = FALSE)
DFE = orthU %*% diag(noiseLam, 2) %*% origSVD$v[1:2, ]
foo = PCA(DFE, graph = F)
boo = PCA(DF, graph = F)
loadsR = c(dimdesc(boo, axes = 1, proba = 1)$Dim.1$quanti[, 1],
dimdesc(boo, axes = 1:2, proba = 1)$Dim.2$quanti[, 1])
loadsE = c(dimdesc(foo, axes = 1, proba = 1)$Dim.1$quanti[, 1],
dimdesc(foo, axes = 1:2, proba = 1)$Dim.2$quanti[, 1])
for(c in 2:numSim){
noiseU = matrix(NA, nrow = numObs, ncol = 2)
noiseLam = rep(NA, 2)
for(i in 1:2){
noiseU[, i] = origSVD$u[, i, drop = F] + rlaplace(numObs, scale = gsU / epsilon[b] )
noiseLam[i] = origSVD$d[i] + rlaplace(1, scale = gsLam / epsilon[b] )
}
tmp = qr(noiseU)
orthU = qr.Q(tmp, complete = FALSE)
DFE = orthU %*% diag(noiseLam, 2) %*% origSVD$v[1:2, ]
foo = PCA(DFE, graph = F)
loadsE = (loadsE * (c - 1) + dimdesc(foo, axes = 1, proba = 1)$Dim.1$quanti[, 1]) / c
}
resultsR[[a]][, b] = loadsR
resultsE[[a]][, b] = loadsE
cat(b)
}
cat(a, "\n")
}
par(mfcol = c(2, 6))
for(c in 1:6){
plot(epsilon, colMeans(resultsR[[c]]), type = "b", main = paste("dim =", d[c]),
ylab = "mean correlation with factor")
plot(epsilon, colMeans(resultsE[[c]]), type = "b", main = paste("dim =", d[c]),
ylab = "mean correlation with factor")
}
#####
##
#####
epsilon = 0.1
noiseL = matrix(NA, nrow = d, ncol = d)
noiseL[upper.tri(noiseL, diag = T)] = rlaplace( ((d^2 + d) / 2), 0, (2 * d) / (numObs*epsilon) )
for(i in 1:d){
for(j in 1:d){
noiseL[j, i] = noiseL[i, j]
}
}
noiseCOV = (t(XDF) %*% XDF) / numObs + noiseL
noiseDecomp = eigen(noiseCOV)
noiseX = origSVD$u %*% diag(noiseDecomp$values) %*% t(noiseDecomp$vectors)
#eigen(cov(XDF))
#eigen(noiseCOV)
origPCA = princomp(XDF)
cor(origPCA$scores[,1], x1)
## wishart method
noiseCOV2 = rWishart(1, d, C)[, , 1] + cov(XDF)
noisePCA = princomp(covmat = noiseCOV2)
##### make orthogonal
setEign = 3 / (2 * numObs * epsilon)
tmp <- rnorm(2)
tmp.qr <- qr(tmp)
tmp.complete <- qr.Q(tmp.qr, complete=TRUE)
C = tmp.complete %*% diag(x = setEign, 2) %*% t(tmp.complete)
#############
scaleData = scale(XDF)
origSVD = svd(scaleData)
gsU = c(sqrt(numObs) / abs((origSVD$d[1] - origSVD$d[2])))
gsLam = sqrt(2*ncol(scaleData))
epsilon = 1
noiseU = origSVD$u[, 2:3, drop = F] + rlaplace(numObs, scale = gsU / epsilon )
noiseLam = origSVD$d[2:3] + rlaplace(1, scale = gsLam / epsilon )
tmp = qr(noiseU)
orthU = qr.Q(tmp, complete=TRUE)[,1:2]
summary(orthU %*% diag(noiseLam, 2) %*% t(origSVD$v[, 2:3, drop = F]))
summary(origSVD$u[, 2:3] %*% diag(origSVD$d[2:3], 2) %*% t(origSVD$v[, 2:3]))
plot(orthU %*% diag(noiseLam, 2) %*% t(origSVD$v[, 2:3, drop = F]))
plot(origSVD$u[, 2:3] %*% diag(origSVD$d[2:3], 2) %*% t(origSVD$v[, 2:3]))
cor(orthU %*% diag(noiseLam, 2) %*% t(origSVD$v[, 2:3, drop = F]))
cor(origSVD$u[, 2:3] %*% diag(origSVD$d[2:3], 2) %*% t(origSVD$v[, 2:3]))
|
\name{ScoresDIA}
\alias{ScoresDIA}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Statistical analysis for a pair of peak grouped metabolites from LC-MS/MS DIA analysis.
}
\description{
Peak-to-peak Pearson correlation coefficient, peak-to-peak shape ratio and product/precursor ion intensity ratios are calculated for a product and precursor metabolites from LC-MS/MS DIA experiment.
}
\usage{
ScoresDIA(input,file,ID1,ID2,CE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{input}{
Peak grouped for a particular metabolite obtained with the \link[MetaboList]{PeakGroup}.
}
\item{file}{
LC-MS/MS DIA file processed by the \link[MetaboList]{AIF}.
}
\item{ID1}{
PeakID of the precursor ion metabolite.}
\item{ID2}{
PeakID of the product ion metabolite.}
\item{CE}{
numeric. Collision energy for the file processed.}
}
\value{
\item{Score}{Peak-to-peak Pearson correlation coefficient for a pair of EIC peaks.}
\item{IntensityRatio}{ Peak intensity ratio between product and precursor ion metabolite.}
\item{AssymetriRatio}{Score for the chromatogram peak shape based on assymmetry factor.}
}
\author{Manuel D Peris Diaz}
\references{
1. R-MetaboList: a flexible tool for metabolite extraction from high-resolution data-independent acquisition mass spectrometry analysis. Metabolites. Soon
2. A Survey of Orbitrap All Ion Fragmentation Analysis Assessed by an R MetaboList Package to Study Small-Molecule Metabolites. Chromatographia. 2018, 81, 981-994.
}
\examples{
library(MetaboList)
#CE.isolation("AIFpos1000-AIF.mzXML","fileposB")
#Reading the database.csv file:
# database<- read.csv("C:/database.csv")
#Processing peak-picking and annotation with default parameters
#aif5<-AIF(fileMS,fileMS2CE5,database,CE=5, ion_mode = "positive")
#aif10<-AIF(fileMS,fileMS2CE10,database,CE=10, ion_mode = "positive")
#Peakgroup<-PeakGroup(aif5,aif10)
#Scores5<-ScoresDIA(Peakgroup$Glutamine,aif5,ID1=90, ID2 = 95,CE=5)
}
| /man/ScoresDIA.rd | no_license | cran/MetaboList | R | false | false | 2,076 | rd | \name{ScoresDIA}
\alias{ScoresDIA}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Statistical analysis for a pair of peak grouped metabolites from LC-MS/MS DIA analysis.
}
\description{
Peak-to-peak Pearson correlation coefficient, peak-to-peak shape ratio and product/precursor ion intensity ratios are calculated for a product and precursor metabolites from LC-MS/MS DIA experiment.
}
\usage{
ScoresDIA(input,file,ID1,ID2,CE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{input}{
Peak grouped for a particular metabolite obtained with the \link[MetaboList]{PeakGroup}.
}
\item{file}{
LC-MS/MS DIA file processed by the \link[MetaboList]{AIF}.
}
\item{ID1}{
PeakID of the precursor ion metabolite.}
\item{ID2}{
PeakID of the product ion metabolite.}
\item{CE}{
numeric. Collision energy for the file processed.}
}
\value{
\item{Score}{Peak-to-peak Pearson correlation coefficient for a pair of EIC peaks.}
\item{IntensityRatio}{ Peak intensity ratio between product and precursor ion metabolite.}
\item{AssymetriRatio}{Score for the chromatogram peak shape based on assymmetry factor.}
}
\author{Manuel D Peris Diaz}
\references{
1. R-MetaboList: a flexible tool for metabolite extraction from high-resolution data-independent acquisition mass spectrometry analysis. Metabolites. Soon
2. A Survey of Orbitrap All Ion Fragmentation Analysis Assessed by an R MetaboList Package to Study Small-Molecule Metabolites. Chromatographia. 2018, 81, 981-994.
}
\examples{
library(MetaboList)
#CE.isolation("AIFpos1000-AIF.mzXML","fileposB")
#Reading the database.csv file:
# database<- read.csv("C:/database.csv")
#Processing peak-picking and annotation with default parameters
#aif5<-AIF(fileMS,fileMS2CE5,database,CE=5, ion_mode = "positive")
#aif10<-AIF(fileMS,fileMS2CE10,database,CE=10, ion_mode = "positive")
#Peakgroup<-PeakGroup(aif5,aif10)
#Scores5<-ScoresDIA(Peakgroup$Glutamine,aif5,ID1=90, ID2 = 95,CE=5)
}
|
############################################################################
## 1. makeCacheMatrix: This function creates a special "matrix" object ##
## that can cache its inverse. ##
## 2. cacheSolve: This function computes the inverse of the special ##
## "matrix" returned by makeCacheMatrix above. If the inverse has already ##
## been calculated (and the matrix has not changed), then the cachesolve ##
## should retrieve the inverse from the cache. ##
############################################################################
## Following two functions are used to cache the inverse of a matrix.
makeCacheMatrix <- function(x = matrix())
{
## Initialize the inverse matrix value
cache <- NULL
## Set the value of the matrix
setMatrix <- function(newvalue)
{
x <<- newvalue
## Since the matrix is assigned a new value, flush the cache
cache <<- NULL
}
## getMatrix() returns the stored matrix encapsulated by the object
getMatrix <- function()
{
## Return the matrix
x
}
## setInverse() saves the inverse of the matrix
setInverse <- function(auxiliar)
{
cache <<- auxiliar
}
## getInverse() returns the cached inverse
getInverse <- function()
{
cache ## return the inverse property
}
## Return a list of all the above functions. Each named element of
## the list is a function
list(setMatrix = setMatrix, getMatrix = getMatrix,
setInverse = setInverse,
getInverse = getInverse)
}
## Compute the inverse of the special matrix returned by "makeCacheMatrix"
## This function assumes that the matrix is always invertible.
cacheSolve <- function(y, ...)
{
## Return the inverse from the cache if it has been cached already
inverse <- y$getInverse()
## Just return the inverse if its already set
if(!is.null(inverse))
{
message("getting cached inverse")
return(inverse)
}
## Calculate the inverse, cache it, and return it
## else, we first get the matrix
data <- y$getMatrix()
## And calculate the inverse
inverse <- solve(data, ...)
## Cache the inverse of the matrix
y$setInverse(inverse)
## Return the result
inverse
}
## Sample run:
## > source("CacheMatrix.R")
## > x = rbind(c(1,2),c(3,4))
## > m = makeCacheMatrix(x)
## > m$getMatrix()
## [,1] [,2]
## [1,] 1 2
## [2,] 3 4
## > cacheSolve(m)
## [,1] [,2]
## [1,] -2.0 1.0
## [2,] 1.5 -0.5
| /cachematrix.R | no_license | M0nd4/ProgrammingAssignment2 | R | false | false | 2,685 | r | ############################################################################
## 1. makeCacheMatrix: This function creates a special "matrix" object ##
## that can cache its inverse. ##
## 2. cacheSolve: This function computes the inverse of the special ##
## "matrix" returned by makeCacheMatrix above. If the inverse has already ##
## been calculated (and the matrix has not changed), then the cachesolve ##
## should retrieve the inverse from the cache. ##
############################################################################
## Following two functions are used to cache the inverse of a matrix.
makeCacheMatrix <- function(x = matrix())
{
## Initialize the inverse matrix value
cache <- NULL
## Set the value of the matrix
setMatrix <- function(newvalue)
{
x <<- newvalue
## Since the matrix is assigned a new value, flush the cache
cache <<- NULL
}
## getMatrix() returns the stored matrix encapsulated by the object
getMatrix <- function()
{
## Return the matrix
x
}
## setInverse() saves the inverse of the matrix
setInverse <- function(auxiliar)
{
cache <<- auxiliar
}
## getInverse() returns the cached inverse
getInverse <- function()
{
cache ## return the inverse property
}
## Return a list of all the above functions. Each named element of
## the list is a function
list(setMatrix = setMatrix, getMatrix = getMatrix,
setInverse = setInverse,
getInverse = getInverse)
}
## Compute the inverse of the special matrix returned by "makeCacheMatrix"
## This function assumes that the matrix is always invertible.
cacheSolve <- function(y, ...)
{
## Return the inverse from the cache if it has been cached already
inverse <- y$getInverse()
## Just return the inverse if its already set
if(!is.null(inverse))
{
message("getting cached inverse")
return(inverse)
}
## Calculate the inverse, cache it, and return it
## else, we first get the matrix
data <- y$getMatrix()
## And calculate the inverse
inverse <- solve(data, ...)
## Cache the inverse of the matrix
y$setInverse(inverse)
## Return the result
inverse
}
## Sample run:
## > source("CacheMatrix.R")
## > x = rbind(c(1,2),c(3,4))
## > m = makeCacheMatrix(x)
## > m$getMatrix()
## [,1] [,2]
## [1,] 1 2
## [2,] 3 4
## > cacheSolve(m)
## [,1] [,2]
## [1,] -2.0 1.0
## [2,] 1.5 -0.5
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rcppPointer.R
\name{rcppValue}
\alias{rcppValue}
\title{rcpp Value}
\usage{
rcppValue(vectorSize)
}
\arguments{
\item{vectorSize}{integer}
}
\description{
function to update a logical vector
}
| /man/rcppValue.Rd | no_license | SymbolixAU/rcppTests | R | false | true | 272 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rcppPointer.R
\name{rcppValue}
\alias{rcppValue}
\title{rcpp Value}
\usage{
rcppValue(vectorSize)
}
\arguments{
\item{vectorSize}{integer}
}
\description{
function to update a logical vector
}
|
\name{retriever-package}
\alias{retriever-package}
\alias{retriever}
\docType{package}
\title{
\packageTitle{retriever}
}
\description{
\packageDescription{retriever}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{retriever}
\packageIndices{retriever}
}
\author{
\packageAuthor{retriever}
Maintainer: \packageMaintainer{retriever}
}
\keyword{ package }
| /retriever/man/retriever-package.Rd | no_license | thiph169/ARP-lab-5 | R | false | false | 361 | rd | \name{retriever-package}
\alias{retriever-package}
\alias{retriever}
\docType{package}
\title{
\packageTitle{retriever}
}
\description{
\packageDescription{retriever}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{retriever}
\packageIndices{retriever}
}
\author{
\packageAuthor{retriever}
Maintainer: \packageMaintainer{retriever}
}
\keyword{ package }
|
#install packages
install.packages("devtools")
library(devtools)
install_github("vqv/ggbiplot")
#read in data
data <- read.table("rpm.counts.txt",header = TRUE, row.names = 1)
#create a data matrix
Mymatrix <- data.matrix(data)
#transpose the data matrix
TransposedMatrix <- t(Mymatrix)
#prcomp takes data as input, usually has SCALE=TRUE
results_pca <- prcomp(TransposedMatrix)
names <- rownames(TransposedMatrix)
#load function
library(ggbiplot)
#image compression to produce a pca jpeg
jpeg("PCA.jpg",quality=100,width = 800,height = 800)
#plot the pca
pca<-ggbiplot(results_pca,groups=names,labels = names,var.axes = FALSE)
dev.off()
ggbiplot(results_pca)
| /pca.R | no_license | roisinks/Dissertation | R | false | false | 663 | r | #install packages
install.packages("devtools")
library(devtools)
install_github("vqv/ggbiplot")
#read in data
data <- read.table("rpm.counts.txt",header = TRUE, row.names = 1)
#create a data matrix
Mymatrix <- data.matrix(data)
#transpose the data matrix
TransposedMatrix <- t(Mymatrix)
#prcomp takes data as input, usually has SCALE=TRUE
results_pca <- prcomp(TransposedMatrix)
names <- rownames(TransposedMatrix)
#load function
library(ggbiplot)
#image compression to produce a pca jpeg
jpeg("PCA.jpg",quality=100,width = 800,height = 800)
#plot the pca
pca<-ggbiplot(results_pca,groups=names,labels = names,var.axes = FALSE)
dev.off()
ggbiplot(results_pca)
|
fix_reference <- function(ref_path = "docs/reference/", is_test = FALSE) {
if(is_test) {
writeLines("test", file.path(ref_path, "index.html"))
writeLines("reference", file.path(ref_path, "ref.rd"))
}
rds <- list.files(ref_path, pattern = "rd")
new_html <- paste0(substr(rds, 1, nchar(rds) - 2), "html")
rds <- paste0(ref_path, "/", rds)
new_html <- paste0(ref_path, "/", new_html)
file.rename(rds, new_html)
index_file <- file.path(ref_path, "index.html")
index <- readLines(index_file)
index <- gsub("\\.rd", ".html", index)
writeLines(index, index_file)
}
data_script <- function(script_path = "data/data.R",
script_target = "inst/scripts",
spec_path = "inst/specs",
is_test = FALSE) {
specs <- list.files(spec_path)
asp <- lapply(file.path(spec_path, specs), yaml::read_yaml)
anm <- as.character(lapply(asp, function(x) x$df$name))
anm_tr <- as.character(lapply(asp, function(x) x$help$usage))
code <- lapply(
seq_along(anm),
function(x)
paste0(
"delayedAssign('", anm_tr[x], "',
eval(parse(file.path(system.file('scripts','",
anm[x], ".txt', package = 'veriler')))))"
))
code <- as.character(code)
if (file.exists(script_path)) unlink(script_path, force = TRUE)
writeLines(code, script_path)
unlink(script_target, recursive = TRUE)
dir.create(script_target)
script <- ""
script <- if(! is_test)readLines("R/translate.R")
lapply(
seq_along(anm),
function(x)
writeLines(
c(script, paste0("translate('", specs[x], "')"), ""),
con = file.path(script_target, paste0(anm[x], ".txt"))
))
}
| /R/utils.R | permissive | botan/veriler | R | false | false | 1,696 | r | fix_reference <- function(ref_path = "docs/reference/", is_test = FALSE) {
if(is_test) {
writeLines("test", file.path(ref_path, "index.html"))
writeLines("reference", file.path(ref_path, "ref.rd"))
}
rds <- list.files(ref_path, pattern = "rd")
new_html <- paste0(substr(rds, 1, nchar(rds) - 2), "html")
rds <- paste0(ref_path, "/", rds)
new_html <- paste0(ref_path, "/", new_html)
file.rename(rds, new_html)
index_file <- file.path(ref_path, "index.html")
index <- readLines(index_file)
index <- gsub("\\.rd", ".html", index)
writeLines(index, index_file)
}
data_script <- function(script_path = "data/data.R",
script_target = "inst/scripts",
spec_path = "inst/specs",
is_test = FALSE) {
specs <- list.files(spec_path)
asp <- lapply(file.path(spec_path, specs), yaml::read_yaml)
anm <- as.character(lapply(asp, function(x) x$df$name))
anm_tr <- as.character(lapply(asp, function(x) x$help$usage))
code <- lapply(
seq_along(anm),
function(x)
paste0(
"delayedAssign('", anm_tr[x], "',
eval(parse(file.path(system.file('scripts','",
anm[x], ".txt', package = 'veriler')))))"
))
code <- as.character(code)
if (file.exists(script_path)) unlink(script_path, force = TRUE)
writeLines(code, script_path)
unlink(script_target, recursive = TRUE)
dir.create(script_target)
script <- ""
script <- if(! is_test)readLines("R/translate.R")
lapply(
seq_along(anm),
function(x)
writeLines(
c(script, paste0("translate('", specs[x], "')"), ""),
con = file.path(script_target, paste0(anm[x], ".txt"))
))
}
|
## Places to eat
Shanghai
## Cities to visit
Hangzhou
| /GuiyuanLei/Christmas.Rd | no_license | githubteacher/welwyn-dec-2016 | R | false | false | 55 | rd | ## Places to eat
Shanghai
## Cities to visit
Hangzhou
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CombineCollinearRowsCols.R
\name{CombineCollinearRowsCols}
\alias{CombineCollinearRowsCols}
\title{Removes rows and columns of zeros and optionnally, row or column duplicates}
\usage{
CombineCollinearRowsCols(Y, rows = F, cols = F)
}
\arguments{
\item{Y}{A matrix or an object that can be coerced to a matrix}
\item{rows}{Logical: Will duplicate rows be removed?}
\item{cols}{Logical: Will duplicate columns be removed?}
}
\value{
A matrix with rows and columns removed as requested
}
\description{
Removes rows and columns of zeros and optionnally, row or column duplicates
}
\details{
Rows and columns of zeros will be removed.
A matrix of zeros will be returned as matrix with 0 row and 0 column.
If rows 1,2,3 are combined, the name of row 1 is kept. Similarly for columns.
}
\examples{
CombineCollinearRowsCols(matrix(1:3,nrow=3,ncol=2),cols=TRUE)
CombineCollinearRowsCols(cbind(matrix(1:3,nrow=3,ncol=2),rep(0,3)),cols=TRUE)
CombineCollinearRowsCols(cbind(matrix(1:3,nrow=3,ncol=2),rep(0,3)))
CombineCollinearRowsCols(matrix(0,nrow=3,ncol=3))
CombineCollinearRowsCols(rodent,TRUE,FALSE)
}
| /man/CombineCollinearRowsCols.Rd | no_license | cran/TaxicabCA | R | false | true | 1,182 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CombineCollinearRowsCols.R
\name{CombineCollinearRowsCols}
\alias{CombineCollinearRowsCols}
\title{Removes rows and columns of zeros and optionnally, row or column duplicates}
\usage{
CombineCollinearRowsCols(Y, rows = F, cols = F)
}
\arguments{
\item{Y}{A matrix or an object that can be coerced to a matrix}
\item{rows}{Logical: Will duplicate rows be removed?}
\item{cols}{Logical: Will duplicate columns be removed?}
}
\value{
A matrix with rows and columns removed as requested
}
\description{
Removes rows and columns of zeros and optionnally, row or column duplicates
}
\details{
Rows and columns of zeros will be removed.
A matrix of zeros will be returned as matrix with 0 row and 0 column.
If rows 1,2,3 are combined, the name of row 1 is kept. Similarly for columns.
}
\examples{
CombineCollinearRowsCols(matrix(1:3,nrow=3,ncol=2),cols=TRUE)
CombineCollinearRowsCols(cbind(matrix(1:3,nrow=3,ncol=2),rep(0,3)),cols=TRUE)
CombineCollinearRowsCols(cbind(matrix(1:3,nrow=3,ncol=2),rep(0,3)))
CombineCollinearRowsCols(matrix(0,nrow=3,ncol=3))
CombineCollinearRowsCols(rodent,TRUE,FALSE)
}
|
#ranking
library(jsonlite)
liste <- read.csv("liste.csv", stringsAsFactors = FALSE, fileEncoding="UTF-8")
band <- sort(unique(liste[,1]))
ant.band <- length(band)
dato <- Sys.Date()
dato <- as.integer(substring(dato,3,4))
##Algoritme for en enkelt poengberegning
##Siddis-poeng er 80% av NM-poeng
poeng <- function(rad){
konk <- ifelse(liste[rad,4] == "NM", 1, ifelse(liste[rad,4] == "Siddis", 0.8, 0))
(liste[rad,2]^4) / (dato^4) * (11-(2*liste[rad,3])) * (22 - liste[rad,5]) * konk
}
##Algoritme for poengberegning av ett korps
score <- function(navn) {
rows <- subset(liste, liste[,1] == navn)
rows <- as.integer(row.names(rows))
res <- sum(poeng(rows))
return(res)
}
## Lager rankinglisten
rank <- data.frame()
for (i in 1:ant.band) {
musikklag <- band[i]
res <- score(band[i])
korps <- t(data.frame(c(musikklag, res)))
rank <- rbind(rank, korps)
}
## Fikser rankinglisten
row.names(rank) <- NULL
rank[,2] <- round(as.numeric(as.character(rank[,2])), digits=2)
rank[,1] <- as.character(rank[,1])
rank <- rank[order(-rank[,2]),]
rank[,3] <- 1:ant.band
rank <- data.frame(rank[,3], rank[,1], rank[,2])
names(rank) <- c("plass", "korps", "poeng")
##Lagrer rankinglisten
write.csv(rank, "ranking/ranking.csv", row.names=FALSE, fileEncoding="UTF-8")
## Lage JSON
rank.js <- toJSON(rank)
rank.js <- prettify(rank.js)
write_json(rank.js, "ranking/rank.js", fileEncoding="UTF-8") | /rank.R | no_license | chrilur/brassranking | R | false | false | 1,484 | r | #ranking
library(jsonlite)
liste <- read.csv("liste.csv", stringsAsFactors = FALSE, fileEncoding="UTF-8")
band <- sort(unique(liste[,1]))
ant.band <- length(band)
dato <- Sys.Date()
dato <- as.integer(substring(dato,3,4))
##Algoritme for en enkelt poengberegning
##Siddis-poeng er 80% av NM-poeng
poeng <- function(rad){
konk <- ifelse(liste[rad,4] == "NM", 1, ifelse(liste[rad,4] == "Siddis", 0.8, 0))
(liste[rad,2]^4) / (dato^4) * (11-(2*liste[rad,3])) * (22 - liste[rad,5]) * konk
}
##Algoritme for poengberegning av ett korps
score <- function(navn) {
rows <- subset(liste, liste[,1] == navn)
rows <- as.integer(row.names(rows))
res <- sum(poeng(rows))
return(res)
}
## Lager rankinglisten
rank <- data.frame()
for (i in 1:ant.band) {
musikklag <- band[i]
res <- score(band[i])
korps <- t(data.frame(c(musikklag, res)))
rank <- rbind(rank, korps)
}
## Fikser rankinglisten
row.names(rank) <- NULL
rank[,2] <- round(as.numeric(as.character(rank[,2])), digits=2)
rank[,1] <- as.character(rank[,1])
rank <- rank[order(-rank[,2]),]
rank[,3] <- 1:ant.band
rank <- data.frame(rank[,3], rank[,1], rank[,2])
names(rank) <- c("plass", "korps", "poeng")
##Lagrer rankinglisten
write.csv(rank, "ranking/ranking.csv", row.names=FALSE, fileEncoding="UTF-8")
## Lage JSON
rank.js <- toJSON(rank)
rank.js <- prettify(rank.js)
write_json(rank.js, "ranking/rank.js", fileEncoding="UTF-8") |
#
# Functions for analysing A. Thaliana Tiling Arrays
# last modified: 27-08-2013
# first written: 27-08-2013
# (c) 2013 GBIC Yalan Bi, Danny Arends, R.C. Jansen
#
#********************************************* this is the final version for testing interaction regulated AS at 5 site ^_^ **********************************************#
#******************************************************************** testing algorithm: Wilcox.test! ********************************************************************#
#main idea:
#minimum of 2*P probes in this exon; minimum of (2*P+1) probes in this gene
#Test how to split (using highest difference between two groups)
#Split into two groups
#test if every group has P probes
#YES -> T-Test the test group (5" start) against (the rest part of first exon + all the probes from the other expExons in the gene)
#NO -> continue
setwd("D:/Arabidopsis Arrays")
#load environment file
menvironment <- read.table("Data/ann_env.txt", sep="\t")[ ,2]
#load genotype file
geno <- read.table("refined map/genotypes.txt",sep="\t", row.names=1, header=TRUE)
#load exp genes
load(file="Data/ExpGenes/expGenes_final.Rdata")
#direction selection
probesDir <- function(exp_data=rawexp){
if(unique(exp_data[ ,"strand"]) == "sense"){
direction_id <- which(exp_data[ ,"direction"] == "reverse")
}
if(unique(exp_data[ ,"strand"]) == "complement"){
direction_id <- which(exp_data[ ,"direction"] == "forward")
}
return(direction_id)
}
#to find max difference between each probe in exp, for grouping
findSepPoint <- function(toGroup=ind, exp_data=rawexp[ ,17:164], P=2, verbose=FALSE){
dffs <- NULL
for(n in (P+1):(length(toGroup)-P+1)){
dff <- sum(apply(exp_data[toGroup[length(toGroup):n], ], 2, median)-apply(exp_data[toGroup[1:(n-1)], ], 2, median))
dffs <- c(dffs, dff)
if(verbose) cat("difference between probe(", toGroup[length(toGroup):n], ") and probe(", toGroup[1:(n-1)], ")is", dff, "\n")
}
if(min(dffs) < 0){
if(verbose) cat("so dffList is:", dffs, "\n", "and edge probes are p", toGroup[which.min(dffs)+P-1], "and p", toGroup[which.min(dffs)+P], ", dff =", min(dffs), "\n")
return(which.min(dffs)+P-1)
}else return() #we want the testPart is lower than the other part of this exon, otherwise it is decay/decrease
}
#findSepPoint(toGroup=ind, exp_data=rawexp[ ,17:164], verbose=TRUE)
#test the difference between 2 groups
#annotation: unlist -> use all individuals to do test, better than mean/median
testDffBtwParts <- function(exp_data=rawexp[ ,ind_env+16], testProbes, restProbes, verbose=FALSE){
testPart <- as.numeric(unlist(exp_data[testProbes, ]))
if(verbose) cat("We are testProbes:", testProbes, "\n")
restPart <- as.numeric(unlist(exp_data[restProbes, ]))
if(verbose) cat("We are restProbes:", restProbes, "\n")
return(-log10(wilcox.test(testPart, restPart, alternative="less") $ p.value))
}
#testDffBtwParts(exp_data=rawexp[ ,ind_env+16], testProbes, restProbes, verbose=TRUE)
#5'site IAS test
test5siteIAS <- function(filename, threTest=11.6, P=2, verbose=FALSE){
chr <- as.numeric(gsub("AT", "", strsplit(filename, "G")[[1]][1]))
rawexp <- read.table(paste0("Data/Raw/chr", chr, "_norm_hf_cor/", filename, ".txt"), row.names=1, header=TRUE)
int <- read.table(paste0("Data/FullModel/chr", chr, "_norm_hf_cor_FM/", filename, "_FM_Int.txt"), row.names=1, header=TRUE)
probes_dir <- probesDir(rawexp)
#if(verbose) cat("We have rightDir probes:", probes_dir, "\n")
exonID <- probes_dir[grepl("tu", rawexp[probes_dir,"tu"])]
#if(verbose) cat("We have exon probes:", exonID, "\n")
uniqueExon <- unique(grep("tu", rawexp[ ,"tu"], value=TRUE))
#if(verbose) cat("We have exons:", uniqueExon, "\n")
#for interaction regulated alternative splicing 5'site, at least 2 exons in a gene!!!
if(length(uniqueExon) < 2){
if(verbose) cat(filename, "has", length(uniqueExon), "exons, not enough T^T\n")
return(c(0, 0, rep(0, 8)))
}else{
if(verbose) cat(filename, "has", length(uniqueExon), "exons!\n")
ind <- exonID[rawexp[exonID, "tu"] == uniqueExon[1]]
if(length(ind) >= 2*P){
if(verbose) cat("first exon", uniqueExon[1], "has probes", ind, ";")
sepPoint <- findSepPoint(toGroup=ind, exp_data=rawexp[ ,17:164], P=P, verbose=FALSE)
}else{
if(verbose) cat("first exon", uniqueExon[1], "has", length(ind), "probes, not enough T^T\n")
return(c(0, 0, rep(0, 8)))
}
if(is.null(sepPoint)){
if(verbose) cat("but no right sep point T^T\n")
return(c(0, 0, rep(0, 8)))
}else{
if(verbose) cat("and sep point is after p", ind[sepPoint], "\t")
partQTL <- int[ind[-(1:sepPoint)], ]
if(any(apply(partQTL >= threTest, 2, sum) > 0)){
m <- which.max(apply(partQTL >= threTest, 2, sum))
#NOTE: min(ind[sepPoint], ind[sepPoint+1]) <- the probe just before the gap, for making plot.
# in 5'AS, it is the last probe of higher part in the first exon; in 3'AS, it is the last probe of the lower part in the last exon
res <- c(ind[sepPoint], m)
if(verbose) cat("and topMarker is", m, ", continue test!\n")
for(env in 1:4){
ind_env <- which(as.numeric(menvironment) == env)
envGT1 <- ind_env[ind_env %in% which(geno[ ,m] == 1)]
envGT2 <- ind_env[ind_env %in% which(geno[ ,m] == 2)]
if(length(envGT1) > 0 && length(envGT2) > 0){
for(gt in 1:2){
gtInEnv <- ind_env[ind_env %in% which(geno[ ,m] == gt)]
#check the part before sepPoint before get background set, if the median >= 5, continue!
if(median(unlist(rawexp[ind[1:sepPoint], gtInEnv+16])) < 5){
if(verbose) cat("*in Env", env, "first half median =", median(unlist(rawexp[ind[1:sepPoint], gtInEnv+16])), "< 5, too low T^T\n")
res <- c(res, 0)
}else{
if(verbose) cat("*in Env", env, "first half median =", median(unlist(rawexp[ind[1:sepPoint], gtInEnv+16])), ">= 5, continue to get bgSet!\n")
#get bgSet for each genotype in each Env; bgSet---the combination of TUs, the median of which is >= 5 of this genotype and in this Env
bg <- NULL
for(tu_bg in uniqueExon[-1]){
ind_bg <- exonID[rawexp[exonID, "tu"] == tu_bg]
if(length(ind_bg) > 0 && median(unlist(rawexp[ind_bg, gtInEnv+16])) >= 5){
if(verbose) cat("\tin Env", env, ": put", tu_bg, "into bgSet: median =", median(unlist(rawexp[ind_bg, gtInEnv+16])), ">= 5\n")
bg <- c(bg, ind_bg)
}
}
if(length(bg) < P){
if(verbose) cat("*in Env", env, "last half of bgSet has", length(bg), "exonProbes, <", P, " not enough to continue with test T^T\n")
res <- c(res, 0)
}else{
bg <- c(ind[1:sepPoint], bg)
if(verbose) cat("*in Env", env, "bgSet is p", bg, ", continue with test!\n")
res <- c(res, testDffBtwParts(exp_data=rawexp[ ,gtInEnv+16], testProbes=ind[-(1:sepPoint)], restProbes=bg, verbose=FALSE))
}
}
}
}else{
if(verbose) cat("in one gt, there's no RILs in Env", env, "T^T\n")
res <- c(res, 0, 0)
}
}
}else{
res <- c(ind[sepPoint], 0, rep(0, 8))
if(verbose) cat("but no topMarker T^T\n")
}
return(res)
}
}
}
#test5siteIAS(filename, threTest=11.6, P=2, verbose=TRUE)
#test 5'IAS for chr 1-5
for(chr in 1:5){
st <- proc.time()[3]
cat("chr", chr, "starts...\n")
genenames <- expGeneList[[chr]]
resmatrix <- NULL
#filename = "AT1G01010"
for(filename in genenames){
res <- test5siteIAS(filename, threTest=11.6, P=2)
resmatrix <- rbind(resmatrix, res)
cat(filename, "is tested\n")
}
rownames(resmatrix) <- genenames
colnames(resmatrix) <- c("sepProbe", "topMarker", "6H/gt1", "6H/gt2", "Dry_AR/gt1", "Dry_AR/gt2", "Dry_Fresh/gt1", "Dry_Fresh/gt2", "RP/gt1", "RP/gt2")
write.table(resmatrix, file=paste0("Data/geneticsAS/splicing5'siteByI_chr", chr, "_wt_p2.txt"), sep="\t")
et <- proc.time()[3]
cat("chr", chr, "finished in", et-st, "s\n\n")
}
| /functions/5'siteAS_HF_byI.r | no_license | YalanBi/AA | R | false | false | 8,369 | r | #
# Functions for analysing A. Thaliana Tiling Arrays
# last modified: 27-08-2013
# first written: 27-08-2013
# (c) 2013 GBIC Yalan Bi, Danny Arends, R.C. Jansen
#
#********************************************* this is the final version for testing interaction regulated AS at 5 site ^_^ **********************************************#
#******************************************************************** testing algorithm: Wilcox.test! ********************************************************************#
#main idea:
#minimum of 2*P probes in this exon; minimum of (2*P+1) probes in this gene
#Test how to split (using highest difference between two groups)
#Split into two groups
#test if every group has P probes
#YES -> T-Test the test group (5" start) against (the rest part of first exon + all the probes from the other expExons in the gene)
#NO -> continue
setwd("D:/Arabidopsis Arrays")
#load environment file
menvironment <- read.table("Data/ann_env.txt", sep="\t")[ ,2]
#load genotype file
geno <- read.table("refined map/genotypes.txt",sep="\t", row.names=1, header=TRUE)
#load exp genes
load(file="Data/ExpGenes/expGenes_final.Rdata")
#direction selection
probesDir <- function(exp_data=rawexp){
if(unique(exp_data[ ,"strand"]) == "sense"){
direction_id <- which(exp_data[ ,"direction"] == "reverse")
}
if(unique(exp_data[ ,"strand"]) == "complement"){
direction_id <- which(exp_data[ ,"direction"] == "forward")
}
return(direction_id)
}
#to find max difference between each probe in exp, for grouping
findSepPoint <- function(toGroup=ind, exp_data=rawexp[ ,17:164], P=2, verbose=FALSE){
dffs <- NULL
for(n in (P+1):(length(toGroup)-P+1)){
dff <- sum(apply(exp_data[toGroup[length(toGroup):n], ], 2, median)-apply(exp_data[toGroup[1:(n-1)], ], 2, median))
dffs <- c(dffs, dff)
if(verbose) cat("difference between probe(", toGroup[length(toGroup):n], ") and probe(", toGroup[1:(n-1)], ")is", dff, "\n")
}
if(min(dffs) < 0){
if(verbose) cat("so dffList is:", dffs, "\n", "and edge probes are p", toGroup[which.min(dffs)+P-1], "and p", toGroup[which.min(dffs)+P], ", dff =", min(dffs), "\n")
return(which.min(dffs)+P-1)
}else return() #we want the testPart is lower than the other part of this exon, otherwise it is decay/decrease
}
#findSepPoint(toGroup=ind, exp_data=rawexp[ ,17:164], verbose=TRUE)
#test the difference between 2 groups
#annotation: unlist -> use all individuals to do test, better than mean/median
testDffBtwParts <- function(exp_data=rawexp[ ,ind_env+16], testProbes, restProbes, verbose=FALSE){
testPart <- as.numeric(unlist(exp_data[testProbes, ]))
if(verbose) cat("We are testProbes:", testProbes, "\n")
restPart <- as.numeric(unlist(exp_data[restProbes, ]))
if(verbose) cat("We are restProbes:", restProbes, "\n")
return(-log10(wilcox.test(testPart, restPart, alternative="less") $ p.value))
}
#testDffBtwParts(exp_data=rawexp[ ,ind_env+16], testProbes, restProbes, verbose=TRUE)
#5'site IAS test
test5siteIAS <- function(filename, threTest=11.6, P=2, verbose=FALSE){
chr <- as.numeric(gsub("AT", "", strsplit(filename, "G")[[1]][1]))
rawexp <- read.table(paste0("Data/Raw/chr", chr, "_norm_hf_cor/", filename, ".txt"), row.names=1, header=TRUE)
int <- read.table(paste0("Data/FullModel/chr", chr, "_norm_hf_cor_FM/", filename, "_FM_Int.txt"), row.names=1, header=TRUE)
probes_dir <- probesDir(rawexp)
#if(verbose) cat("We have rightDir probes:", probes_dir, "\n")
exonID <- probes_dir[grepl("tu", rawexp[probes_dir,"tu"])]
#if(verbose) cat("We have exon probes:", exonID, "\n")
uniqueExon <- unique(grep("tu", rawexp[ ,"tu"], value=TRUE))
#if(verbose) cat("We have exons:", uniqueExon, "\n")
#for interaction regulated alternative splicing 5'site, at least 2 exons in a gene!!!
if(length(uniqueExon) < 2){
if(verbose) cat(filename, "has", length(uniqueExon), "exons, not enough T^T\n")
return(c(0, 0, rep(0, 8)))
}else{
if(verbose) cat(filename, "has", length(uniqueExon), "exons!\n")
ind <- exonID[rawexp[exonID, "tu"] == uniqueExon[1]]
if(length(ind) >= 2*P){
if(verbose) cat("first exon", uniqueExon[1], "has probes", ind, ";")
sepPoint <- findSepPoint(toGroup=ind, exp_data=rawexp[ ,17:164], P=P, verbose=FALSE)
}else{
if(verbose) cat("first exon", uniqueExon[1], "has", length(ind), "probes, not enough T^T\n")
return(c(0, 0, rep(0, 8)))
}
if(is.null(sepPoint)){
if(verbose) cat("but no right sep point T^T\n")
return(c(0, 0, rep(0, 8)))
}else{
if(verbose) cat("and sep point is after p", ind[sepPoint], "\t")
partQTL <- int[ind[-(1:sepPoint)], ]
if(any(apply(partQTL >= threTest, 2, sum) > 0)){
m <- which.max(apply(partQTL >= threTest, 2, sum))
#NOTE: min(ind[sepPoint], ind[sepPoint+1]) <- the probe just before the gap, for making plot.
# in 5'AS, it is the last probe of higher part in the first exon; in 3'AS, it is the last probe of the lower part in the last exon
res <- c(ind[sepPoint], m)
if(verbose) cat("and topMarker is", m, ", continue test!\n")
for(env in 1:4){
ind_env <- which(as.numeric(menvironment) == env)
envGT1 <- ind_env[ind_env %in% which(geno[ ,m] == 1)]
envGT2 <- ind_env[ind_env %in% which(geno[ ,m] == 2)]
if(length(envGT1) > 0 && length(envGT2) > 0){
for(gt in 1:2){
gtInEnv <- ind_env[ind_env %in% which(geno[ ,m] == gt)]
#check the part before sepPoint before get background set, if the median >= 5, continue!
if(median(unlist(rawexp[ind[1:sepPoint], gtInEnv+16])) < 5){
if(verbose) cat("*in Env", env, "first half median =", median(unlist(rawexp[ind[1:sepPoint], gtInEnv+16])), "< 5, too low T^T\n")
res <- c(res, 0)
}else{
if(verbose) cat("*in Env", env, "first half median =", median(unlist(rawexp[ind[1:sepPoint], gtInEnv+16])), ">= 5, continue to get bgSet!\n")
#get bgSet for each genotype in each Env; bgSet---the combination of TUs, the median of which is >= 5 of this genotype and in this Env
bg <- NULL
for(tu_bg in uniqueExon[-1]){
ind_bg <- exonID[rawexp[exonID, "tu"] == tu_bg]
if(length(ind_bg) > 0 && median(unlist(rawexp[ind_bg, gtInEnv+16])) >= 5){
if(verbose) cat("\tin Env", env, ": put", tu_bg, "into bgSet: median =", median(unlist(rawexp[ind_bg, gtInEnv+16])), ">= 5\n")
bg <- c(bg, ind_bg)
}
}
if(length(bg) < P){
if(verbose) cat("*in Env", env, "last half of bgSet has", length(bg), "exonProbes, <", P, " not enough to continue with test T^T\n")
res <- c(res, 0)
}else{
bg <- c(ind[1:sepPoint], bg)
if(verbose) cat("*in Env", env, "bgSet is p", bg, ", continue with test!\n")
res <- c(res, testDffBtwParts(exp_data=rawexp[ ,gtInEnv+16], testProbes=ind[-(1:sepPoint)], restProbes=bg, verbose=FALSE))
}
}
}
}else{
if(verbose) cat("in one gt, there's no RILs in Env", env, "T^T\n")
res <- c(res, 0, 0)
}
}
}else{
res <- c(ind[sepPoint], 0, rep(0, 8))
if(verbose) cat("but no topMarker T^T\n")
}
return(res)
}
}
}
#test5siteIAS(filename, threTest=11.6, P=2, verbose=TRUE)
#test 5'IAS for chr 1-5
for(chr in 1:5){
st <- proc.time()[3]
cat("chr", chr, "starts...\n")
genenames <- expGeneList[[chr]]
resmatrix <- NULL
#filename = "AT1G01010"
for(filename in genenames){
res <- test5siteIAS(filename, threTest=11.6, P=2)
resmatrix <- rbind(resmatrix, res)
cat(filename, "is tested\n")
}
rownames(resmatrix) <- genenames
colnames(resmatrix) <- c("sepProbe", "topMarker", "6H/gt1", "6H/gt2", "Dry_AR/gt1", "Dry_AR/gt2", "Dry_Fresh/gt1", "Dry_Fresh/gt2", "RP/gt1", "RP/gt2")
write.table(resmatrix, file=paste0("Data/geneticsAS/splicing5'siteByI_chr", chr, "_wt_p2.txt"), sep="\t")
et <- proc.time()[3]
cat("chr", chr, "finished in", et-st, "s\n\n")
}
|
#' Coalesce Multiple Columns
#'
#' @param df a data frame
#' @param pattern pattern to split column names along using \code{stringr::str_split_fixed}
#' @param noisy Do you want messages about the columns being coalesced?
#' @description Coalesce columns matching the LHS when splitting by \code{pattern}. Columns
#' are coalesced from left to right as they appear in \code{df}
#' @note Columns that do not contain \code{pattern} but match another column after splitting
#' will STILL be coalesced. In the example, the columns \code{c(value, value.x, value.y)} are
#' coalesced when \code{(pattern = stringr::fixed('.')}.
#' @return a data frame with coalesced columns
#' @export
#'
#' @examples
#' # Let's say you have two two data sets about birds
#' # and you want to combine them to make a more complete version
#' # while prioritizing the woods data over the feeder data
#' woods = tibble::tibble(
#' bird = c('Northern Flicker', 'Chesnut-backed Chickadee', 'California Quail'),
#' group_size = c(NA, NA, 2L),
#' food = c('bugs', NA, 'seeds')
#' )
#'
#' feeder = tibble::tibble(
#' bird = c('Northern Flicker','Chesnut-backed Chickadee', 'Evening Grosbeak'),
#' group_size = c(1L, 8L, 13L),
#' food = c('seeds', NA, NA)
#' )
#'
#' # See what they look like when joined on "bird"
#' dplyr::full_join(
#' x = woods,
#' y = feeder,
#' by = 'bird'
#' )
#'
#' # When we coalesce multi, it first looks for non-missing values
#' # from the woods (.x) and then from the feeder (.y):
#' dplyr::full_join(
#' x = woods,
#' y = feeder,
#' by = 'bird'
#' ) |>
#' coalesce_multi()
#'
#' # Note that it can coalesce values with
#' # different separators and even no suffix:
#' dplyr::full_join(
#' x = woods,
#' y = feeder,
#' by = 'bird',
#' suffix = c('', '~feeder')
#' ) |>
#' coalesce_multi(pattern = '~')
coalesce_multi = function(
df,
pattern = stringr::fixed('.'),
noisy = TRUE
){
stopifnot(
"`df` must be a data.frame" = checkmate::test_data_frame(df),
"pattern must be a character" = checkmate::test_character(pattern)
)
# first figure out what columns should be coalesced:
colname_df = stringr::str_split_fixed(
string = colnames(df),
pattern = pattern,
n = 2
)
colnames(colname_df) = c('prefix', 'suffix')
colname_df = colname_df |>
tibble::as_tibble() |>
dplyr::mutate(
colname = colnames(df)
) |>
dplyr::group_by(prefix) |>
dplyr::filter(dplyr::n() > 1) |>
dplyr::ungroup()
if(nrow(colname_df) == 0){
warning('No columns coalesced by coalesce_multi')
return(df)
} else{
for(column_prefix in unique(colname_df[['prefix']])){
sub_df = dplyr::filter(colname_df, prefix == column_prefix)
if(noisy){
message(
paste0(
'Coalescing c(',
paste(sub_df[['colname']], collapse = ', '),
') into ',
column_prefix,
'\n\n'
)
)
}
new_col = df |>
dplyr::select(
tidyselect::all_of(sub_df[['colname']])
) |>
as.list()
drop_cols = setdiff(sub_df[['colname']], column_prefix)
df[[column_prefix]] = dplyr::coalesce(!!!new_col)
df = dplyr::select(df, -tidyselect::all_of(drop_cols))
}
}
df
}
| /R/coalesce_multi.R | no_license | svenhalvorson/SvenR | R | false | false | 3,297 | r | #' Coalesce Multiple Columns
#'
#' @param df a data frame
#' @param pattern pattern to split column names along using \code{stringr::str_split_fixed}
#' @param noisy Do you want messages about the columns being coalesced?
#' @description Coalesce columns matching the LHS when splitting by \code{pattern}. Columns
#' are coalesced from left to right as they appear in \code{df}
#' @note Columns that do not contain \code{pattern} but match another column after splitting
#' will STILL be coalesced. In the example, the columns \code{c(value, value.x, value.y)} are
#' coalesced when \code{(pattern = stringr::fixed('.')}.
#' @return a data frame with coalesced columns
#' @export
#'
#' @examples
#' # Let's say you have two two data sets about birds
#' # and you want to combine them to make a more complete version
#' # while prioritizing the woods data over the feeder data
#' woods = tibble::tibble(
#' bird = c('Northern Flicker', 'Chesnut-backed Chickadee', 'California Quail'),
#' group_size = c(NA, NA, 2L),
#' food = c('bugs', NA, 'seeds')
#' )
#'
#' feeder = tibble::tibble(
#' bird = c('Northern Flicker','Chesnut-backed Chickadee', 'Evening Grosbeak'),
#' group_size = c(1L, 8L, 13L),
#' food = c('seeds', NA, NA)
#' )
#'
#' # See what they look like when joined on "bird"
#' dplyr::full_join(
#' x = woods,
#' y = feeder,
#' by = 'bird'
#' )
#'
#' # When we coalesce multi, it first looks for non-missing values
#' # from the woods (.x) and then from the feeder (.y):
#' dplyr::full_join(
#' x = woods,
#' y = feeder,
#' by = 'bird'
#' ) |>
#' coalesce_multi()
#'
#' # Note that it can coalesce values with
#' # different separators and even no suffix:
#' dplyr::full_join(
#' x = woods,
#' y = feeder,
#' by = 'bird',
#' suffix = c('', '~feeder')
#' ) |>
#' coalesce_multi(pattern = '~')
coalesce_multi = function(
df,
pattern = stringr::fixed('.'),
noisy = TRUE
){
stopifnot(
"`df` must be a data.frame" = checkmate::test_data_frame(df),
"pattern must be a character" = checkmate::test_character(pattern)
)
# first figure out what columns should be coalesced:
colname_df = stringr::str_split_fixed(
string = colnames(df),
pattern = pattern,
n = 2
)
colnames(colname_df) = c('prefix', 'suffix')
colname_df = colname_df |>
tibble::as_tibble() |>
dplyr::mutate(
colname = colnames(df)
) |>
dplyr::group_by(prefix) |>
dplyr::filter(dplyr::n() > 1) |>
dplyr::ungroup()
if(nrow(colname_df) == 0){
warning('No columns coalesced by coalesce_multi')
return(df)
} else{
for(column_prefix in unique(colname_df[['prefix']])){
sub_df = dplyr::filter(colname_df, prefix == column_prefix)
if(noisy){
message(
paste0(
'Coalescing c(',
paste(sub_df[['colname']], collapse = ', '),
') into ',
column_prefix,
'\n\n'
)
)
}
new_col = df |>
dplyr::select(
tidyselect::all_of(sub_df[['colname']])
) |>
as.list()
drop_cols = setdiff(sub_df[['colname']], column_prefix)
df[[column_prefix]] = dplyr::coalesce(!!!new_col)
df = dplyr::select(df, -tidyselect::all_of(drop_cols))
}
}
df
}
|
library('dplyr') # data manipulation
library('ggplot2') # Data Visualization
library('ggthemes') # Data Visualization
options(warn = -1)
# load train.csv
train <- read.csv('../input/train.csv', stringsAsFactors = F)
# load test.csv
test <- read.csv('../input/test.csv', stringsAsFactors = F)
# combine them as a whole
test$Survived <- NA
full <- rbind(train,test)
# show first several rows of the data
head(full)
# check the data
str(full)
# Process Age Column
# create a new data set age
age <- full$Age
n = length(age)
# replace missing value with a random sample from raw data
set.seed(123)
for(i in 1:n){
if(is.na(age[i])){
age[i] = sample(na.omit(full$Age),1)
}
}
# check effect
par(mfrow=c(1,2))
hist(full$Age, freq=F, main='Before Replacement',
col='lightblue', ylim=c(0,0.04),xlab = "age")
hist(age, freq=F, main='After Replacement',
col='darkblue', ylim=c(0,0.04))
# Process Cabin Column to show number of cabins passenger has
cabin <- full$Cabin
n = length(cabin)
for(i in 1:n){
if(nchar(cabin[i]) == 0){
cabin[i] = 0
} else{
s = strsplit(cabin[i]," ")
cabin[i] = length(s[[1]])
}
}
table(cabin)
# process fare column
# check missing
full$PassengerId[is.na(full$Fare)]
full[1044,]
ggplot(full[full$Pclass == '3' & full$Embarked == 'S', ],
aes(x = Fare)) +
geom_density(fill = '#99d6ff', alpha=0.4) +
geom_vline(aes(xintercept=median(Fare, na.rm=T)),
colour='red', linetype='dashed', lwd=1)
# we can see that fare is clustered around mode. we just repace the missing value with
# median fare of according Pclass and Embarked
full$Fare[1044] <- median(full[full$Pclass == '3' & full$Embarked == 'S', ]$Fare, na.rm = TRUE)
# process embarked column
embarked <- full$Embarked
n = length(embarked)
for(i in 1:n){
if(embarked[i] != "S" && embarked[i] != "C" && embarked[i] != "Q"){
embarked[i] = "S"
}
}
table(embarked)
# number of survivals and nonsurvivals across different age
d <- data.frame(Age = age[1:891], Survived = train$Survived)
ggplot(d, aes(Age,fill = factor(Survived))) +
geom_histogram()
# create bar chart to show relationship between survival rate and age intervals
cuts <- cut(d$Age,hist(d$Age,10,plot = F)$breaks)
rate <- tapply(d$Survived,cuts,mean)
d2 <- data.frame(age = names(rate),rate)
barplot(d2$rate, xlab = "age",ylab = "survival rate")
# create histgram to show effect of Sex on survival
ggplot(train, aes(Sex,fill = factor(Survived))) +
geom_histogram(stat = "count")
# calculate survival rate
tapply(train$Survived,train$Sex,mean)
# extract title from Name
# (here I process full data set but only plot title vs survival in train
# data set because there is no survival value for test data set)
n = length(full$Survived)
title = rep(NA,n)
for (i in 1:n){
lastname = strsplit(full$Name[i],", ")[[1]][2]
title[i] = strsplit(lastname,". ")[[1]][1]
}
# make a histogram of title v.s survival
d <- data.frame(title = title[1:891],Survived = train$Survived)
ggplot(d, aes(title,fill = factor(Survived))) +
geom_histogram(stat = "count")
# count of title
table(title)
# survival rate
tapply(d$Survived,d$title,mean)
# replace rare titles to 'Rare'
title[title != 'Mr' & title != 'Miss' & title != 'Mrs' & title != 'Master'] <- 'Rare'
table(title)
# make a histogram
ggplot(train, aes(Pclass,fill = factor(Survived))) +
geom_histogram(stat = "count")
# calculate survival rate
tapply(train$Survived,train$Pclass,mean)
# histogram of Parch
ggplot(train, aes(Parch,fill = factor(Survived))) +
geom_histogram(stat = "count")
# histogram of SibSp
ggplot(train, aes(SibSp,fill = factor(Survived))) +
geom_histogram(stat = "count")
# combine SibSp and Parch
family <- full$SibSp + full$Parch
d <- data.frame(family = family[1:891],Survived = train$Survived)
ggplot(d, aes(family,fill = factor(Survived))) +
geom_histogram(stat = "count")
tapply(d$Survived,d$family,mean)
# create histogram
d <- data.frame(Cabin = cabin[1:891],Survived = train$Survived)
ggplot(d, aes(Cabin,fill = factor(Survived))) +
geom_histogram(stat = "count")
# calculate survival rate
tapply(d$Survived,d$Cabin,mean)
# make a histogram
ggplot(train, aes(Fare,fill = factor(Survived))) +
geom_histogram()
# calculate
cuts <- cut(train$Fare,hist(train$Fare,10,plot = F)$breaks)
rate <- tapply(train$Survived,cuts,mean)
d <- data.frame(fare = names(rate),rate)
barplot(d$rate, xlab = "fare",ylab = "survival rate")
# make histogram
d <- data.frame(Embarked = embarked[1:891], Survived = train$Survived)
ggplot(d, aes(Embarked,fill = factor(Survived))) +
geom_histogram(stat = "count")
# make table
tapply(train$Survived,train$Embarked,mean)
# response variable
f.survived = train$Survived
# feature
# 1. age
f.age = age[1:891] # for training
t.age = age[892:1309] # for testing
# 2. fare
f.fare = full$Fare[1:891]
t.fare = full$Fare[892:1309]
# 3. cabin
f.cabin = cabin[1:891]
t.cabin = cabin[892:1309]
# 4. title
f.title = title[1:891]
t.title = title[892:1309]
# 5. family
family <- full$SibSp + full$Parch
f.family = family[1:891]
t.family = family[892:1309]
# 6. plcass
f.pclass = train$Pclass
t.pclass = test$Pclass
# 7. sex
f.sex = train$Sex
t.sex = test$Sex
# 8. embarked
f.embarked = embarked[1:891]
t.embarked = embarked[892:1309]
# construct training data frame
new_train = data.frame(survived = f.survived, age = f.age, fare = f.fare , sex = f.sex,
embarked = f.embarked ,family = f.family ,title = f.title ,cabin = f.cabin, pclass= f.pclass)
# logistic regression
fit_logit <- glm(factor(survived) ~ age + fare + sex + embarked + family
+ title + cabin + pclass,data = new_train,family = binomial)
# predicted result of regression
ans_logit = rep(NA,891)
for(i in 1:891){
ans_logit[i] = round(fit_logit$fitted.values[[i]],0)
}
# check result
mean(ans_logit == train$Survived)
table(ans_logit)
# random forest
library('randomForest')
set.seed(123)
fit_rf <- randomForest(factor(survived) ~ age + fare + sex + embarked + family
+ title + cabin + pclass,data = new_train)
# predicted result of regression
rf.fitted = predict(fit_rf)
ans_rf = rep(NA,891)
for(i in 1:891){
ans_rf[i] = as.integer(rf.fitted[[i]]) - 1
}
# check result
mean(ans_rf == train$Survived)
table(ans_rf)
# decision tree
library(rpart)
fit_dt <- rpart(factor(survived) ~ age + fare + sex + embarked + family
+ title + cabin + pclass,data = new_train)
# predicted result of regression
dt.fitted = predict(fit_dt)
ans_dt = rep(NA,891)
for(i in 1:891){
if(dt.fitted[i,1] >= dt.fitted[i,2] ){
ans_dt[i] = 0
} else{
ans_dt[i] = 1
}
}
# check result
mean(ans_dt == train$Survived)
table(ans_dt)
# svm
library(e1071)
fit_svm <- svm(factor(survived) ~ age + fare + sex + embarked + family
+ title + cabin + pclass,data = new_train)
# predicted result of regression
svm.fitted = predict(fit_svm)
ans_svm = rep(NA,891)
for(i in 1:891){
ans_svm[i] = as.integer(svm.fitted[[i]]) - 1
}
# check result
mean(ans_svm == train$Survived)
table(ans_svm)
# logistic
a = sum(ans_logit ==1 & f.survived == 1)
b = sum(ans_logit ==1 & f.survived == 0)
c = sum(ans_logit ==0 & f.survived == 1)
d = sum(ans_logit ==0 & f.survived == 0)
data.frame(a,b,c,d)
# Random Forest
a = sum(ans_rf ==1 & f.survived == 1)
b = sum(ans_rf ==1 & f.survived == 0)
c = sum(ans_rf ==0 & f.survived == 1)
d = sum(ans_rf ==0 & f.survived == 0)
data.frame(a,b,c,d)
# Decision Tree
a = sum(ans_dt ==1 & f.survived == 1)
b = sum(ans_dt ==1 & f.survived == 0)
c = sum(ans_dt ==0 & f.survived == 1)
d = sum(ans_dt ==0 & f.survived == 0)
data.frame(a,b,c,d)
# SVM
a = sum(ans_svm ==1 & f.survived == 1)
b = sum(ans_svm ==1 & f.survived == 0)
c = sum(ans_svm ==0 & f.survived == 1)
d = sum(ans_svm ==0 & f.survived == 0)
data.frame(a,b,c,d)
# construct testing data frame
test_data_set <- data.frame(age = t.age, fare = t.fare, sex = t.sex, embarked = t.embarked,
family = t.family, title = t.title,cabin = t.cabin, pclass = t.pclass)
# make prediction
svm_predict = predict(fit_svm,newdata = test_data_set )
ans_svm_predict = rep(NA,418)
for(i in 1:418){
ans_svm_predict[i] = as.integer(svm_predict[[i]]) - 1
}
table(ans_svm_predict)
# create a csv file for submittion
d<-data.frame(PassengerId = test$PassengerId, Survived = ans_svm_predict)
write.csv(d,file = "TitanicResult.csv",row.names = F)
| /r/kernels/vasuls-predictive-analysis-of-survival-rate-on-titanic/script/predictive-analysis-of-survival-rate-on-titanic.r | no_license | helenaK/trustworthy-titanic | R | false | false | 8,472 | r |
library('dplyr') # data manipulation
library('ggplot2') # Data Visualization
library('ggthemes') # Data Visualization
options(warn = -1)
# load train.csv
train <- read.csv('../input/train.csv', stringsAsFactors = F)
# load test.csv
test <- read.csv('../input/test.csv', stringsAsFactors = F)
# combine them as a whole
test$Survived <- NA
full <- rbind(train,test)
# show first several rows of the data
head(full)
# check the data
str(full)
# Process Age Column
# create a new data set age
age <- full$Age
n = length(age)
# replace missing value with a random sample from raw data
set.seed(123)
for(i in 1:n){
if(is.na(age[i])){
age[i] = sample(na.omit(full$Age),1)
}
}
# check effect
par(mfrow=c(1,2))
hist(full$Age, freq=F, main='Before Replacement',
col='lightblue', ylim=c(0,0.04),xlab = "age")
hist(age, freq=F, main='After Replacement',
col='darkblue', ylim=c(0,0.04))
# Process Cabin Column to show number of cabins passenger has
cabin <- full$Cabin
n = length(cabin)
for(i in 1:n){
if(nchar(cabin[i]) == 0){
cabin[i] = 0
} else{
s = strsplit(cabin[i]," ")
cabin[i] = length(s[[1]])
}
}
table(cabin)
# process fare column
# check missing
full$PassengerId[is.na(full$Fare)]
full[1044,]
ggplot(full[full$Pclass == '3' & full$Embarked == 'S', ],
aes(x = Fare)) +
geom_density(fill = '#99d6ff', alpha=0.4) +
geom_vline(aes(xintercept=median(Fare, na.rm=T)),
colour='red', linetype='dashed', lwd=1)
# we can see that fare is clustered around mode. we just repace the missing value with
# median fare of according Pclass and Embarked
full$Fare[1044] <- median(full[full$Pclass == '3' & full$Embarked == 'S', ]$Fare, na.rm = TRUE)
# process embarked column
embarked <- full$Embarked
n = length(embarked)
for(i in 1:n){
if(embarked[i] != "S" && embarked[i] != "C" && embarked[i] != "Q"){
embarked[i] = "S"
}
}
table(embarked)
# number of survivals and nonsurvivals across different age
d <- data.frame(Age = age[1:891], Survived = train$Survived)
ggplot(d, aes(Age,fill = factor(Survived))) +
geom_histogram()
# create bar chart to show relationship between survival rate and age intervals
cuts <- cut(d$Age,hist(d$Age,10,plot = F)$breaks)
rate <- tapply(d$Survived,cuts,mean)
d2 <- data.frame(age = names(rate),rate)
barplot(d2$rate, xlab = "age",ylab = "survival rate")
# create histgram to show effect of Sex on survival
ggplot(train, aes(Sex,fill = factor(Survived))) +
geom_histogram(stat = "count")
# calculate survival rate
tapply(train$Survived,train$Sex,mean)
# extract title from Name
# (here I process full data set but only plot title vs survival in train
# data set because there is no survival value for test data set)
n = length(full$Survived)
title = rep(NA,n)
for (i in 1:n){
lastname = strsplit(full$Name[i],", ")[[1]][2]
title[i] = strsplit(lastname,". ")[[1]][1]
}
# make a histogram of title v.s survival
d <- data.frame(title = title[1:891],Survived = train$Survived)
ggplot(d, aes(title,fill = factor(Survived))) +
geom_histogram(stat = "count")
# count of title
table(title)
# survival rate
tapply(d$Survived,d$title,mean)
# replace rare titles to 'Rare'
title[title != 'Mr' & title != 'Miss' & title != 'Mrs' & title != 'Master'] <- 'Rare'
table(title)
# make a histogram
ggplot(train, aes(Pclass,fill = factor(Survived))) +
geom_histogram(stat = "count")
# calculate survival rate
tapply(train$Survived,train$Pclass,mean)
# histogram of Parch
ggplot(train, aes(Parch,fill = factor(Survived))) +
geom_histogram(stat = "count")
# histogram of SibSp
ggplot(train, aes(SibSp,fill = factor(Survived))) +
geom_histogram(stat = "count")
# combine SibSp and Parch
family <- full$SibSp + full$Parch
d <- data.frame(family = family[1:891],Survived = train$Survived)
ggplot(d, aes(family,fill = factor(Survived))) +
geom_histogram(stat = "count")
tapply(d$Survived,d$family,mean)
# create histogram
d <- data.frame(Cabin = cabin[1:891],Survived = train$Survived)
ggplot(d, aes(Cabin,fill = factor(Survived))) +
geom_histogram(stat = "count")
# calculate survival rate
tapply(d$Survived,d$Cabin,mean)
# make a histogram
ggplot(train, aes(Fare,fill = factor(Survived))) +
geom_histogram()
# calculate
cuts <- cut(train$Fare,hist(train$Fare,10,plot = F)$breaks)
rate <- tapply(train$Survived,cuts,mean)
d <- data.frame(fare = names(rate),rate)
barplot(d$rate, xlab = "fare",ylab = "survival rate")
# make histogram
d <- data.frame(Embarked = embarked[1:891], Survived = train$Survived)
ggplot(d, aes(Embarked,fill = factor(Survived))) +
geom_histogram(stat = "count")
# make table
tapply(train$Survived,train$Embarked,mean)
# response variable
f.survived = train$Survived
# feature
# 1. age
f.age = age[1:891] # for training
t.age = age[892:1309] # for testing
# 2. fare
f.fare = full$Fare[1:891]
t.fare = full$Fare[892:1309]
# 3. cabin
f.cabin = cabin[1:891]
t.cabin = cabin[892:1309]
# 4. title
f.title = title[1:891]
t.title = title[892:1309]
# 5. family
family <- full$SibSp + full$Parch
f.family = family[1:891]
t.family = family[892:1309]
# 6. plcass
f.pclass = train$Pclass
t.pclass = test$Pclass
# 7. sex
f.sex = train$Sex
t.sex = test$Sex
# 8. embarked
f.embarked = embarked[1:891]
t.embarked = embarked[892:1309]
# construct training data frame
new_train = data.frame(survived = f.survived, age = f.age, fare = f.fare , sex = f.sex,
embarked = f.embarked ,family = f.family ,title = f.title ,cabin = f.cabin, pclass= f.pclass)
# logistic regression
fit_logit <- glm(factor(survived) ~ age + fare + sex + embarked + family
+ title + cabin + pclass,data = new_train,family = binomial)
# predicted result of regression
ans_logit = rep(NA,891)
for(i in 1:891){
ans_logit[i] = round(fit_logit$fitted.values[[i]],0)
}
# check result
mean(ans_logit == train$Survived)
table(ans_logit)
# random forest
library('randomForest')
set.seed(123)
fit_rf <- randomForest(factor(survived) ~ age + fare + sex + embarked + family
+ title + cabin + pclass,data = new_train)
# predicted result of regression
rf.fitted = predict(fit_rf)
ans_rf = rep(NA,891)
for(i in 1:891){
ans_rf[i] = as.integer(rf.fitted[[i]]) - 1
}
# check result
mean(ans_rf == train$Survived)
table(ans_rf)
# decision tree
library(rpart)
fit_dt <- rpart(factor(survived) ~ age + fare + sex + embarked + family
+ title + cabin + pclass,data = new_train)
# predicted result of regression
dt.fitted = predict(fit_dt)
ans_dt = rep(NA,891)
for(i in 1:891){
if(dt.fitted[i,1] >= dt.fitted[i,2] ){
ans_dt[i] = 0
} else{
ans_dt[i] = 1
}
}
# check result
mean(ans_dt == train$Survived)
table(ans_dt)
# svm
library(e1071)
fit_svm <- svm(factor(survived) ~ age + fare + sex + embarked + family
+ title + cabin + pclass,data = new_train)
# predicted result of regression
svm.fitted = predict(fit_svm)
ans_svm = rep(NA,891)
for(i in 1:891){
ans_svm[i] = as.integer(svm.fitted[[i]]) - 1
}
# check result
mean(ans_svm == train$Survived)
table(ans_svm)
# logistic
a = sum(ans_logit ==1 & f.survived == 1)
b = sum(ans_logit ==1 & f.survived == 0)
c = sum(ans_logit ==0 & f.survived == 1)
d = sum(ans_logit ==0 & f.survived == 0)
data.frame(a,b,c,d)
# Random Forest
a = sum(ans_rf ==1 & f.survived == 1)
b = sum(ans_rf ==1 & f.survived == 0)
c = sum(ans_rf ==0 & f.survived == 1)
d = sum(ans_rf ==0 & f.survived == 0)
data.frame(a,b,c,d)
# Decision Tree
a = sum(ans_dt ==1 & f.survived == 1)
b = sum(ans_dt ==1 & f.survived == 0)
c = sum(ans_dt ==0 & f.survived == 1)
d = sum(ans_dt ==0 & f.survived == 0)
data.frame(a,b,c,d)
# SVM
a = sum(ans_svm ==1 & f.survived == 1)
b = sum(ans_svm ==1 & f.survived == 0)
c = sum(ans_svm ==0 & f.survived == 1)
d = sum(ans_svm ==0 & f.survived == 0)
data.frame(a,b,c,d)
# construct testing data frame
test_data_set <- data.frame(age = t.age, fare = t.fare, sex = t.sex, embarked = t.embarked,
family = t.family, title = t.title,cabin = t.cabin, pclass = t.pclass)
# make prediction
svm_predict = predict(fit_svm,newdata = test_data_set )
ans_svm_predict = rep(NA,418)
for(i in 1:418){
ans_svm_predict[i] = as.integer(svm_predict[[i]]) - 1
}
table(ans_svm_predict)
# create a csv file for submittion
d<-data.frame(PassengerId = test$PassengerId, Survived = ans_svm_predict)
write.csv(d,file = "TitanicResult.csv",row.names = F)
|
##San Diego Bay Turtle Movement Analysis
## Original R Code, JT Froeschke, December 29, 2015
## Data modified from previous versions by SE Graham, June 2016, May 2018
## Filtered data utilize GPS, and Argos LC = 1,2,3
## Filtered data do not include points on land
## Filtered data only allow for 1 relocation every 4 horus
## Purpose of the script is to compute homerange (area) using
## least squares cross-validation including 50% and 95% contours.
## An analysis of each turtle and an aggregate pre and post will be computed
## h values are chosen based on best judgment and gst behavior
rm(list = ls())
#getwd()
#list.files()
#Section 1: Load libraries and set wd
library(readxl)
library(dplyr)
library(adehabitatHR)
library(readxl)
library(rgdal)
library(leaflet)
#note: development version in use
##Section 2: read in data
## Section 2.1: Pre
d <- "data/files_Apr2018_withNewTags/"
tag37616 <-read.csv(paste0(d, "pre/37616_inside_DayNight_4hrs_2018-04-20.csv"))
tag37623 <-read.csv(paste0(d, "pre/37623_inside_DayNight_4hrs_2018-04-20.csv"))
tag44366 <-read.csv(paste0(d, "pre/44366_inside_DayNight_4hrs_2018-04-20.csv"))
tag52674 <-read.csv(paste0(d, "pre/52674_inside_DayNight_4hrs_2018-04-20.csv"))
tag52675 <-read.csv(paste0(d, "pre/52675_inside_DayNight_4hrs_2018-04-20.csv"))
tag78500 <-read.csv(paste0(d, "pre/78500_inside_DayNight_4hrs_2018-04-20.csv"))
tag79786 <-read.csv(paste0(d, "pre/79786_inside_DayNight_4hrs_2018-04-20.csv"))
##Section 2.2, r
Pre.all <- rbind(tag79786,
tag78500,
tag52675,
tag52674,
tag37616,
tag37623,
tag44366)
write.csv(Pre.all, "outputs2/Pre_GPS_LC.all.csv", row.names=FALSE)
#View(Pre.all)
##Note I checked total of pre.all equals number of rows of individual tags
## e.g., 40+128+125+283+13+120+133+7
## Section 2.2: Post
tag12607106 <-read.csv(paste0(d, "post/12607106_inside_DayNight_4hrs_2018-04-20.csv"))
tag12607107 <-read.csv(paste0(d, "post/12607107_inside_DayNight_4hrs_2018-04-20.csv"))
tag126070 <-read.csv(paste0(d, "post/126070_inside_DayNight_4hrs_2018-04-20.csv"))
tag12606905 <-read.csv(paste0(d, "post/12606905_inside_DayNight_4hrs_2018-04-20.csv"))
tag12606907 <-read.csv(paste0(d, "post/12606907_inside_DayNight_4hrs_2018-04-20.csv"))
tag126068 <-read.csv(paste0(d, "post/126068_inside_DayNight_4hrs_2018-04-20.csv"))
tag126067 <-read.csv(paste0(d, "post/126067_inside_DayNight_4hrs_2018-04-20.csv"))
tag126066 <-read.csv(paste0(d, "post/126066_inside_DayNight_4hrs_2018-04-20.csv"))
tag126065 <-read.csv(paste0(d, "post/126065_inside_DayNight_4hrs_2018-04-20.csv"))
tag126064 <-read.csv(paste0(d, "post/126064_inside_DayNight_4hrs_2018-04-20.csv"))
tag44359 <-read.csv(paste0(d, "post/44359_inside_DayNight_4hrs_2018-04-20.csv"))
tag151375 <-read.csv(paste0(d, "new/151375_inside_DayNight_4hrs_2018-04-20.csv"))
tag151377 <-read.csv(paste0(d, "new/151377_inside_DayNight_4hrs_2018-04-20.csv"))
tag151378 <-read.csv(paste0(d, "new/151378_inside_DayNight_4hrs_2018-04-20.csv"))
tag151380 <-read.csv(paste0(d, "new/151380_inside_DayNight_4hrs_2018-04-20.csv"))
tag151381 <-read.csv(paste0(d, "new/151381_inside_DayNight_4hrs_2018-04-20.csv"))
tag151384 <-read.csv(paste0(d, "new/151384_inside_DayNight_4hrs_2018-04-20.csv"))
tag152313 <-read.csv(paste0(d, "new/152313_inside_DayNight_4hrs_2018-04-20.csv"))
tag152314 <-read.csv(paste0(d, "new/152314_inside_DayNight_4hrs_2018-04-20.csv"))
tag152315 <-read.csv(paste0(d, "new/152315_inside_DayNight_4hrs_2018-04-20.csv"))
tag152319 <-read.csv(paste0(d, "new/152319_inside_DayNight_4hrs_2018-04-20.csv"))
tag152322 <-read.csv(paste0(d, "new/152322_inside_DayNight_4hrs_2018-04-20.csv"))
tag152323 <-read.csv(paste0(d, "new/152323_inside_DayNight_4hrs_2018-04-20.csv"))
Post.all <- rbind(tag12607106,
tag12607107,
tag126070,
tag12606905,
tag12606907,
tag126068,
tag126067,
tag126066,
tag126065,
tag126064,
tag44359,
tag151375,
tag151377,
tag151378,
tag151380,
tag151381,
tag151384,
tag152313,
tag152314,
tag152315,
tag152319,
tag152322,
tag152323)
write.csv(Post.all, "outputs2/Post_GPS_LC.all.csv", row.names=FALSE)
##note: can ignore warnings
#View(Post.all)
## Section 3: Compute HR models
## Section 3.1: Pre
## get coordinates as a dataframe and make a spatial object
Pre.all.coords <- data.frame(x=Pre.all$Lon, y=Pre.all$Lat)
coordinates(Pre.all.coords) <- ~ x + y
class(Pre.all.coords)
plot(Pre.all.coords, axes=TRUE) ## sanity check
Post.all.coords <- data.frame(x=Post.all$Lon, y=Post.all$Lat)
#Post.all.coords <- subset(Post.all.coords, y < 32.66)
coordinates(Post.all.coords) <- ~ x + y
class(Post.all.coords)
plot(Post.all.coords, axes=TRUE) ## sanity check
##Section 3.1.2: Project data
## Import previous file to get projection
## Project sample data following this example
## http://www.maths.lancs.ac.uk/~rowlings/Teaching/UseR2012/cheatsheet.html
## use spTransform to convert new data to tag example projection
library(rgdal)
#tagprj <- readOGR("/Users/sgraham/R/gst_hr_analysis2018/Tag_065_UTMzone11n", "tag_065_project")
tagprj <- readOGR("Tag_065_UTMzone11n", "tag_065_project")
plot(tagprj, axes=TRUE)
saveproj <- proj4string(tagprj)
Pre.all.proj <- Pre.all.coords
latlong = "+init=epsg:4326"
proj4string(Pre.all.proj) = CRS(latlong)
Pre.all.utm <- spTransform(Pre.all.proj, saveproj)
plot(Pre.all.utm, axes=TRUE) ## sanity check
Post.all.proj <- Post.all.coords
latlong = "+init=epsg:4326"
proj4string(Post.all.proj) = CRS(latlong)
Post.all.utm <- spTransform(Post.all.proj, saveproj)
plot(Post.all.utm, axes=TRUE) ## sanity check
## Section 3.1.2
##Visually optimized hlim = c(0.565,1.5), grid=300
Pre.kd <- kernelUD(Pre.all.utm, h="LSCV", hlim = c(0.03, 1.5), grid=300)
plotLSCV(Pre.kd)
Pre.Area <- kernel.area(Pre.kd, percent = seq(20, 95, by = 5),
unin = c("m"),
unout = c("km"), standardize = FALSE)
Pre.Area
##start here:
Pre.bw <- Pre.kd@h[[3]] ##bandwidth estimate
Pre.Area.50 <- round(Pre.Area[7],2)
Pre.Area.95 <- round(Pre.Area[16],2)
##repeat for each and export data: bw, 50, 95, and plot
## Section 3.1.3: Plot and export
library(png)
Pre.ver.50 <- getverticeshr(Pre.kd, 50)
Pre.ver.95 <- getverticeshr(Pre.kd, 95)
plot(Pre.ver.50)
plot(Pre.ver.95)
png(filename="plots2/Pre.all.png")
plot(Pre.ver.95, axes=TRUE#, main=paste("Pre.all ", "Area = ", Area.95, " km2\n", "bandwidth = ", round(Pre.bw,1), sep="")
)
plot(getverticeshr(Pre.kd, 95), add=TRUE, lwd=2)
plot(Pre.all.utm, add=TRUE, col="blue")
dev.off()
##Write shapefile
#Empty shapefile folder
writeOGR(Pre.ver.50, "shapefiles2/Pre.ver.50.shp",layer="Pre.ver.50", driver="ESRI Shapefile", overwrite_layer=TRUE)
writeOGR(Pre.ver.95, "shapefiles2/Pre.ver.95.shp",layer="Pre.ver.95", driver="ESRI Shapefile", overwrite_layer=TRUE)
###get projection of Tag_78500.bw.id
##reproject points
#proj4string(Pre.all) <- proj4string(all3)
Pre.leafpoints <-spTransform(Pre.all.utm,CRS("+proj=longlat"))
##reproject vertices
proj4string(Pre.ver.50) <- proj4string(Pre.all.utm)
Pre.leafver.50 <-spTransform(Pre.ver.50,CRS("+proj=longlat"))
proj4string(Pre.ver.95) <- proj4string(Pre.all.utm)
Pre.leafver.95 <-spTransform(Pre.ver.95,CRS("+proj=longlat"))
library(leaflet)
m <- leaflet() %>% setView(lng = -117.1, lat = 32.65, zoom = 11)
m %>%
addTiles('http://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}',
options = providerTileOptions(noWrap = TRUE)) %>%
addTiles('http://server.arcgisonline.com/ArcGIS/rest/services/Reference/World_Boundaries_and_Places/Mapserver/tile/{z}/{y}/{x}',
options = providerTileOptions(noWrap = TRUE)) %>%
addPolygons(data=Pre.leafver.95, stroke=TRUE, color="#2ca25f", fillOpacity=0.75, group="Home range 95%")%>%
addPolygons(data=Pre.leafver.50, stroke=TRUE, color="#99d8c9", fillOpacity=0.75, group="Home range 50%")%>%
addCircles(data=Pre.leafpoints, group="Location data", color="yellow", fillOpacity=0.3, stroke=FALSE) %>%
addLayersControl(
overlayGroups = c("Home range 95%", "Home range 50%", "Location data"),
options = layersControlOptions(collapsed = FALSE)
)
Pre.outputs <- data.frame(Period="Pre", Source="All", Method="LSCV", Bandwidth=Pre.bw, A50=Pre.Area.50, A95=Pre.Area.95)
write.csv(Pre.outputs, "outputs2/PreOutputs.csv", row.names=FALSE)
############Post
## Section 3.2.2
#hlim and grid visually optimized = 0.525/375 m
Post.kd <- kernelUD(Post.all.utm, h="LSCV", hlim = c(.35, 1.5), grid=175) #set grid=100
plotLSCV(Post.kd)
Post.Area <- kernel.area(Post.kd, percent = seq(20, 95, by = 5),
unin = c("m"),
unout = c("km"), standardize = FALSE)
Post.Area
##start here:
Post.bw <- Post.kd@h[[3]] ##bandwidth estimate
Post.Area.50 <- round(Post.Area[7],2)
Post.Area.95 <- round(Post.Area[16],2)
##repeat for each and export data: bw, 50, 95, and plot
## Section 3.2.3: Plot and export
Post.ver.50 <- getverticeshr(Post.kd, 50)
Post.ver.95 <- getverticeshr(Post.kd, 95)
plot(Post.ver.50)
plot(Post.ver.95)
png(filename="plots2/Post.all.png")
plot(Post.ver.95, axes=TRUE#, main=paste("Post.all ", "Area = ", Area.95, " km2\n", "bandwidth = ", round(Post.bw,1), sep="")#
)
plot(getverticeshr(Post.kd, 95), add=TRUE, lwd=2)
plot(Post.all.utm, add=TRUE, col="blue")
dev.off()
##Write shapefile
writeOGR(Post.ver.50, "shapefiles2/Post.ver.50.shp",layer="Post.ver.50", driver="ESRI Shapefile", overwrite_layer=TRUE)
writeOGR(Post.ver.95, "shapefiles2/Post.ver.95.shp",layer="Post.ver.95", driver="ESRI Shapefile", overwrite_layer=TRUE)
Post.outputs <- data.frame(Period="Post", Source="All", Method="LSCV", Bandwidth=Post.bw, A50=Post.Area.50, A95=Post.Area.95)
write.csv(Post.outputs, "outputs2/PostOutputs.csv", row.names=FALSE)
###get projection of Tag_78500.bw.id
##reproject points
#proj4string(Post.all) <- proj4string(all3)
Post.leafpoints <-spTransform(Post.all.utm,CRS("+proj=longlat"))
##reproject vertices
proj4string(Post.ver.50) <- proj4string(Post.all.utm)
Post.leafver.50 <-spTransform(Post.ver.50,CRS("+proj=longlat"))
proj4string(Post.ver.95) <- proj4string(Post.all.utm)
Post.leafver.95 <-spTransform(Post.ver.95,CRS("+proj=longlat"))
library(leaflet)
m <- leaflet() %>% setView(lng = -117.1, lat = 32.65, zoom = 11)
m %>%
addTiles('http://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}',
options = providerTileOptions(noWrap = TRUE)) %>%
addTiles('http://server.arcgisonline.com/ArcGIS/rest/services/Reference/World_Boundaries_and_Places/Mapserver/tile/{z}/{y}/{x}',
options = providerTileOptions(noWrap = TRUE)) %>%
addPolygons(data=Post.leafver.95, stroke=TRUE, color="#2ca25f", fillOpacity=0.75, group="Home range 95%")%>%
addPolygons(data=Post.leafver.50, stroke=TRUE, color="#99d8c9", fillOpacity=0.75, group="Home range 50%")%>%
addCircles(data=Post.leafpoints, group="Location data", color="yellow", fillOpacity=0.3, stroke=FALSE) %>%
addLayersControl(
overlayGroups = c("Home range 95%", "Home range 50%", "Location data"),
options = layersControlOptions(collapsed = FALSE)
)
## Section 3.2.4: leaflet
###get projection of Tag_78500.bw.id
##reproject points
#proj4string(Post.all) <- proj4string(all3)
Post.leafpoints <-spTransform(Post.all.utm,CRS("+proj=longlat"))
############## end
##Section 4: Iterate through each tag in a loop
#outputs: 50 and 95 shapefiles and spreadsheet
# ##Data sources
# Pre.all.utm
# Post.all.utm
## get coordinates as a dataframe and make a spatial object
#bind a new set of pretags that does not include small relocation values for turtles ##called pre.some
Pre.some <- rbind(tag78500,
tag52675,
tag37616,
tag37623,
tag44366)
Pre.unique <- unique(Pre.some$ArgosID)
Pre.ind.outputs <- c() #to hold results
###clear all files from output folders before running new script
for(i in 1:length(Pre.unique)){
Pre.tmp <- subset(Pre.some, ArgosID==Pre.unique[i])
print(i)
Pre.tmp.coords <- data.frame(x=Pre.tmp$Lon, y=Pre.tmp$Lat)
coordinates(Pre.tmp.coords) <- ~ x + y
class(Pre.tmp.coords)
#plot(Pre.tmp.coords, axes=TRUE) ## sanity check
# library(rgdal)
# tagprj <- readOGR("C:/Users/Costco/Documents/Dropbox/John Business/Business/sdbay/homerange/shps/Tag_065.shp",
# layer="Tag_065")
#plot(tagprj, axes=TRUE)
#saveproj <- proj4string(tagprj)
Pre.tmp.proj <- Pre.tmp.coords
#latlong = "+init=epsg:4326"
proj4string(Pre.tmp.proj) = CRS(latlong)
Pre.tmp.utm <- spTransform(Pre.tmp.proj, saveproj)
#plot(Pre.tmp.utm, axes=TRUE) ## sanity check
## Section 4.1.2
Pre.tmp.kd <- kernelUD(Pre.tmp.utm, h="LSCV", hlim = c(0.565, 1.5), grid=125) #set values from pre.all
#plotLSCV(Pre.kd)
Pre.tmp.Area <- kernel.area(Pre.tmp.kd, percent = seq(20, 95, by = 5),
unin = c("m"),
unout = c("km"), standardize = FALSE)
Pre.tmp.Area
##start here:
Pre.tmp.bw <- Pre.tmp.kd@h[[3]] ##bandwidth estimate
Pre.tmp.Area.50 <- round(Pre.tmp.Area[7],2)
Pre.tmp.Area.95 <- round(Pre.tmp.Area[16],2)
Pre.ver.tmp.50 <- getverticeshr(Pre.tmp.kd, 50)
Pre.ver.tmp.95 <- getverticeshr(Pre.tmp.kd, 95)
filename50 <- paste("shapefiles2/", "tag",Pre.unique[i],"percent50th", ".shp", sep="")
filename95 <- paste("shapefiles2/", "tag",Pre.unique[i],"percent95th", ".shp", sep="")
##Write shapefile
writeOGR(Pre.ver.tmp.50, filename50,layer="Pre.ver.tmp.50", driver="ESRI Shapefile", overwrite_layer=TRUE)
writeOGR(Pre.ver.tmp.95, filename95,layer="Pre.ver.tmp.95", driver="ESRI Shapefile", overwrite_layer=TRUE)
pre.tmp.outputs <- data.frame(Period="Pre", Source=Pre.unique[i], Method="LSCV",
Bandwidth=Pre.tmp.bw,
A50=Pre.tmp.Area.50, A95=Pre.tmp.Area.95)
Pre.ind.outputs <- rbind(Pre.ind.outputs, pre.tmp.outputs)
}
write.csv(Pre.ind.outputs , "outputs2/Pre.ind.outputs .csv", row.names=FALSE)
###########
## Section 5: Post
## get coordinates as a dataframe and make a spatial object
Post.unique <- unique(Post.all$ArgosID)
Post.ind.outputs <- c() #to hold results
for(i in 1:length(Post.unique)){
Post.tmp <- subset(Post.all, ArgosID==Post.unique[i])
print(i)
Post.tmp.coords <- data.frame(x=Post.tmp$Lon, y=Post.tmp$Lat)
coordinates(Post.tmp.coords) <- ~ x + y
class(Post.tmp.coords)
#plot(Post.tmp.coords, axes=TRUE) ## sanity check
# library(rgdal)
# tagprj <- readOGR("C:/Users/Costco/Documents/Dropbox/John Business/Business/sdbay/homerange/shps/Tag_065.shp",
# layer="Tag_065")
#plot(tagprj, axes=TRUE)
#saveproj <- proj4string(tagprj)
Post.tmp.proj <- Post.tmp.coords
#latlong = "+init=epsg:4326"
proj4string(Post.tmp.proj) = CRS(latlong)
Post.tmp.utm <- spTransform(Post.tmp.proj, saveproj)
#plot(Post.tmp.utm, axes=TRUE) ## sanity check
## Section 4.1.2
Post.tmp.kd <- kernelUD(Post.tmp.utm, h="LSCV", hlim = c(0.525, 1.5), grid=375) #set grid=300
#plotLSCV(Post.kd)
Post.tmp.Area <- kernel.area(Post.tmp.kd, percent = seq(20, 95, by = 5),
unin = c("m"),
unout = c("km"), standardize = FALSE)
Post.tmp.Area
##start here:
Post.tmp.bw <- Post.tmp.kd@h[[3]] ##bandwidth estimate
Post.tmp.Area.50 <- round(Post.tmp.Area[7],2)
Post.tmp.Area.95 <- round(Post.tmp.Area[16],2)
Post.ver.tmp.50 <- getverticeshr(Post.tmp.kd, 50)
Post.ver.tmp.95 <- getverticeshr(Post.tmp.kd, 95)
filename50 <- paste("shapefiles2/", "tag",Post.unique[i],"percent50th", ".shp", sep="")
filename95 <- paste("shapefiles2/", "tag",Post.unique[i],"percent95th", ".shp", sep="")
##Write shapefile
writeOGR(Post.ver.tmp.50, filename50,layer="Post.ver.tmp.50", driver="ESRI Shapefile", overwrite_layer=TRUE)
writeOGR(Post.ver.tmp.95, filename95,layer="Post.ver.tmp.95", driver="ESRI Shapefile", overwrite_layer=TRUE)
Post.tmp.outputs <- data.frame(Period="Post", Source=Post.unique[i], Method="LSCV",
Bandwidth=Post.tmp.bw,
A50=Post.tmp.Area.50, A95=Post.tmp.Area.95)
Post.ind.outputs <- rbind(Post.ind.outputs, Post.tmp.outputs)
}
write.csv(Post.ind.outputs , "outputs/Post.ind.outputs .csv", row.names=FALSE)
##combine data
## dt table
## leaflet
Pre.outputs2 <- Pre.outputs
Pre.outputs2$Source <- factor(Pre.outputs2$Source)
Pre.ind.outputs2 <- Pre.ind.outputs
Pre.ind.outputs2$Source <- factor(Pre.ind.outputs2$Source)
Post.outputs2 <- Post.outputs
Post.outputs2$Source <- factor(Post.outputs2$Source)
Post.ind.outputs2 <- Post.ind.outputs
Post.ind.outputs2$Source <- factor(Post.ind.outputs2$Source)
summary.all <- rbind(Pre.outputs2,Pre.ind.outputs2, Post.outputs2, Post.ind.outputs2)
write.csv(summary.all , "outputs2/summaryall.csv", row.names=FALSE)
save.image("homerangegpsLC.RData")
##Post all leaflet map
setwd("/Users/sgraham/R/gst_hr_analysis2016")
#load("homerange5.RData")
library(leaflet)
m <- leaflet() %>% setView(lng = -117.1, lat = 32.65, zoom = 11)
m %>%
addTiles('http://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}',
options = providerTileOptions(noWrap = TRUE)) %>%
addTiles('http://server.arcgisonline.com/ArcGIS/rest/services/Reference/World_Boundaries_and_Places/Mapserver/tile/{z}/{y}/{x}',
options = providerTileOptions(noWrap = TRUE)) %>%
addPolygons(data=Post.leafver.95, stroke=TRUE, color="#2ca25f", fillOpacity=0.75, group="Post home range 95%")%>%
addPolygons(data=Post.leafver.50, stroke=TRUE, color="#99d8c9", fillOpacity=0.75, group="Post home range 50%")%>%
addCircles(data=Post.leafpoints, group="Location data", color="yellow", fillOpacity=0.3, stroke=FALSE) %>%
addLayersControl(
overlayGroups = c("Post home range 95%", "Post home range 50%", "Location data"),
options = layersControlOptions(collapsed = FALSE)
)
| /hr_analysis_GPS_LC_newtags_24apr2018.R | no_license | mteguchi/Cm_SDB_PowerPlant | R | false | false | 18,454 | r | ##San Diego Bay Turtle Movement Analysis
## Original R Code, JT Froeschke, December 29, 2015
## Data modified from previous versions by SE Graham, June 2016, May 2018
## Filtered data utilize GPS, and Argos LC = 1,2,3
## Filtered data do not include points on land
## Filtered data only allow for 1 relocation every 4 horus
## Purpose of the script is to compute homerange (area) using
## least squares cross-validation including 50% and 95% contours.
## An analysis of each turtle and an aggregate pre and post will be computed
## h values are chosen based on best judgment and gst behavior
rm(list = ls())
#getwd()
#list.files()
#Section 1: Load libraries and set wd
library(readxl)
library(dplyr)
library(adehabitatHR)
library(readxl)
library(rgdal)
library(leaflet)
#note: development version in use
##Section 2: read in data
## Section 2.1: Pre
d <- "data/files_Apr2018_withNewTags/"
tag37616 <-read.csv(paste0(d, "pre/37616_inside_DayNight_4hrs_2018-04-20.csv"))
tag37623 <-read.csv(paste0(d, "pre/37623_inside_DayNight_4hrs_2018-04-20.csv"))
tag44366 <-read.csv(paste0(d, "pre/44366_inside_DayNight_4hrs_2018-04-20.csv"))
tag52674 <-read.csv(paste0(d, "pre/52674_inside_DayNight_4hrs_2018-04-20.csv"))
tag52675 <-read.csv(paste0(d, "pre/52675_inside_DayNight_4hrs_2018-04-20.csv"))
tag78500 <-read.csv(paste0(d, "pre/78500_inside_DayNight_4hrs_2018-04-20.csv"))
tag79786 <-read.csv(paste0(d, "pre/79786_inside_DayNight_4hrs_2018-04-20.csv"))
##Section 2.2, r
Pre.all <- rbind(tag79786,
tag78500,
tag52675,
tag52674,
tag37616,
tag37623,
tag44366)
write.csv(Pre.all, "outputs2/Pre_GPS_LC.all.csv", row.names=FALSE)
#View(Pre.all)
##Note I checked total of pre.all equals number of rows of individual tags
## e.g., 40+128+125+283+13+120+133+7
## Section 2.2: Post
tag12607106 <-read.csv(paste0(d, "post/12607106_inside_DayNight_4hrs_2018-04-20.csv"))
tag12607107 <-read.csv(paste0(d, "post/12607107_inside_DayNight_4hrs_2018-04-20.csv"))
tag126070 <-read.csv(paste0(d, "post/126070_inside_DayNight_4hrs_2018-04-20.csv"))
tag12606905 <-read.csv(paste0(d, "post/12606905_inside_DayNight_4hrs_2018-04-20.csv"))
tag12606907 <-read.csv(paste0(d, "post/12606907_inside_DayNight_4hrs_2018-04-20.csv"))
tag126068 <-read.csv(paste0(d, "post/126068_inside_DayNight_4hrs_2018-04-20.csv"))
tag126067 <-read.csv(paste0(d, "post/126067_inside_DayNight_4hrs_2018-04-20.csv"))
tag126066 <-read.csv(paste0(d, "post/126066_inside_DayNight_4hrs_2018-04-20.csv"))
tag126065 <-read.csv(paste0(d, "post/126065_inside_DayNight_4hrs_2018-04-20.csv"))
tag126064 <-read.csv(paste0(d, "post/126064_inside_DayNight_4hrs_2018-04-20.csv"))
tag44359 <-read.csv(paste0(d, "post/44359_inside_DayNight_4hrs_2018-04-20.csv"))
tag151375 <-read.csv(paste0(d, "new/151375_inside_DayNight_4hrs_2018-04-20.csv"))
tag151377 <-read.csv(paste0(d, "new/151377_inside_DayNight_4hrs_2018-04-20.csv"))
tag151378 <-read.csv(paste0(d, "new/151378_inside_DayNight_4hrs_2018-04-20.csv"))
tag151380 <-read.csv(paste0(d, "new/151380_inside_DayNight_4hrs_2018-04-20.csv"))
tag151381 <-read.csv(paste0(d, "new/151381_inside_DayNight_4hrs_2018-04-20.csv"))
tag151384 <-read.csv(paste0(d, "new/151384_inside_DayNight_4hrs_2018-04-20.csv"))
tag152313 <-read.csv(paste0(d, "new/152313_inside_DayNight_4hrs_2018-04-20.csv"))
tag152314 <-read.csv(paste0(d, "new/152314_inside_DayNight_4hrs_2018-04-20.csv"))
tag152315 <-read.csv(paste0(d, "new/152315_inside_DayNight_4hrs_2018-04-20.csv"))
tag152319 <-read.csv(paste0(d, "new/152319_inside_DayNight_4hrs_2018-04-20.csv"))
tag152322 <-read.csv(paste0(d, "new/152322_inside_DayNight_4hrs_2018-04-20.csv"))
tag152323 <-read.csv(paste0(d, "new/152323_inside_DayNight_4hrs_2018-04-20.csv"))
Post.all <- rbind(tag12607106,
tag12607107,
tag126070,
tag12606905,
tag12606907,
tag126068,
tag126067,
tag126066,
tag126065,
tag126064,
tag44359,
tag151375,
tag151377,
tag151378,
tag151380,
tag151381,
tag151384,
tag152313,
tag152314,
tag152315,
tag152319,
tag152322,
tag152323)
write.csv(Post.all, "outputs2/Post_GPS_LC.all.csv", row.names=FALSE)
##note: can ignore warnings
#View(Post.all)
## Section 3: Compute HR models
## Section 3.1: Pre
## get coordinates as a dataframe and make a spatial object
Pre.all.coords <- data.frame(x=Pre.all$Lon, y=Pre.all$Lat)
coordinates(Pre.all.coords) <- ~ x + y
class(Pre.all.coords)
plot(Pre.all.coords, axes=TRUE) ## sanity check
Post.all.coords <- data.frame(x=Post.all$Lon, y=Post.all$Lat)
#Post.all.coords <- subset(Post.all.coords, y < 32.66)
coordinates(Post.all.coords) <- ~ x + y
class(Post.all.coords)
plot(Post.all.coords, axes=TRUE) ## sanity check
##Section 3.1.2: Project data
## Import previous file to get projection
## Project sample data following this example
## http://www.maths.lancs.ac.uk/~rowlings/Teaching/UseR2012/cheatsheet.html
## use spTransform to convert new data to tag example projection
library(rgdal)
#tagprj <- readOGR("/Users/sgraham/R/gst_hr_analysis2018/Tag_065_UTMzone11n", "tag_065_project")
tagprj <- readOGR("Tag_065_UTMzone11n", "tag_065_project")
plot(tagprj, axes=TRUE)
saveproj <- proj4string(tagprj)
Pre.all.proj <- Pre.all.coords
latlong = "+init=epsg:4326"
proj4string(Pre.all.proj) = CRS(latlong)
Pre.all.utm <- spTransform(Pre.all.proj, saveproj)
plot(Pre.all.utm, axes=TRUE) ## sanity check
Post.all.proj <- Post.all.coords
latlong = "+init=epsg:4326"
proj4string(Post.all.proj) = CRS(latlong)
Post.all.utm <- spTransform(Post.all.proj, saveproj)
plot(Post.all.utm, axes=TRUE) ## sanity check
## Section 3.1.2
##Visually optimized hlim = c(0.565,1.5), grid=300
Pre.kd <- kernelUD(Pre.all.utm, h="LSCV", hlim = c(0.03, 1.5), grid=300)
plotLSCV(Pre.kd)
Pre.Area <- kernel.area(Pre.kd, percent = seq(20, 95, by = 5),
unin = c("m"),
unout = c("km"), standardize = FALSE)
Pre.Area
##start here:
Pre.bw <- Pre.kd@h[[3]] ##bandwidth estimate
Pre.Area.50 <- round(Pre.Area[7],2)
Pre.Area.95 <- round(Pre.Area[16],2)
##repeat for each and export data: bw, 50, 95, and plot
## Section 3.1.3: Plot and export
library(png)
Pre.ver.50 <- getverticeshr(Pre.kd, 50)
Pre.ver.95 <- getverticeshr(Pre.kd, 95)
plot(Pre.ver.50)
plot(Pre.ver.95)
png(filename="plots2/Pre.all.png")
plot(Pre.ver.95, axes=TRUE#, main=paste("Pre.all ", "Area = ", Area.95, " km2\n", "bandwidth = ", round(Pre.bw,1), sep="")
)
plot(getverticeshr(Pre.kd, 95), add=TRUE, lwd=2)
plot(Pre.all.utm, add=TRUE, col="blue")
dev.off()
##Write shapefile
#Empty shapefile folder
writeOGR(Pre.ver.50, "shapefiles2/Pre.ver.50.shp",layer="Pre.ver.50", driver="ESRI Shapefile", overwrite_layer=TRUE)
writeOGR(Pre.ver.95, "shapefiles2/Pre.ver.95.shp",layer="Pre.ver.95", driver="ESRI Shapefile", overwrite_layer=TRUE)
###get projection of Tag_78500.bw.id
##reproject points
#proj4string(Pre.all) <- proj4string(all3)
Pre.leafpoints <-spTransform(Pre.all.utm,CRS("+proj=longlat"))
##reproject vertices
proj4string(Pre.ver.50) <- proj4string(Pre.all.utm)
Pre.leafver.50 <-spTransform(Pre.ver.50,CRS("+proj=longlat"))
proj4string(Pre.ver.95) <- proj4string(Pre.all.utm)
Pre.leafver.95 <-spTransform(Pre.ver.95,CRS("+proj=longlat"))
library(leaflet)
m <- leaflet() %>% setView(lng = -117.1, lat = 32.65, zoom = 11)
m %>%
addTiles('http://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}',
options = providerTileOptions(noWrap = TRUE)) %>%
addTiles('http://server.arcgisonline.com/ArcGIS/rest/services/Reference/World_Boundaries_and_Places/Mapserver/tile/{z}/{y}/{x}',
options = providerTileOptions(noWrap = TRUE)) %>%
addPolygons(data=Pre.leafver.95, stroke=TRUE, color="#2ca25f", fillOpacity=0.75, group="Home range 95%")%>%
addPolygons(data=Pre.leafver.50, stroke=TRUE, color="#99d8c9", fillOpacity=0.75, group="Home range 50%")%>%
addCircles(data=Pre.leafpoints, group="Location data", color="yellow", fillOpacity=0.3, stroke=FALSE) %>%
addLayersControl(
overlayGroups = c("Home range 95%", "Home range 50%", "Location data"),
options = layersControlOptions(collapsed = FALSE)
)
Pre.outputs <- data.frame(Period="Pre", Source="All", Method="LSCV", Bandwidth=Pre.bw, A50=Pre.Area.50, A95=Pre.Area.95)
write.csv(Pre.outputs, "outputs2/PreOutputs.csv", row.names=FALSE)
############Post
## Section 3.2.2
#hlim and grid visually optimized = 0.525/375 m
Post.kd <- kernelUD(Post.all.utm, h="LSCV", hlim = c(.35, 1.5), grid=175) #set grid=100
plotLSCV(Post.kd)
Post.Area <- kernel.area(Post.kd, percent = seq(20, 95, by = 5),
unin = c("m"),
unout = c("km"), standardize = FALSE)
Post.Area
##start here:
Post.bw <- Post.kd@h[[3]] ##bandwidth estimate
Post.Area.50 <- round(Post.Area[7],2)
Post.Area.95 <- round(Post.Area[16],2)
##repeat for each and export data: bw, 50, 95, and plot
## Section 3.2.3: Plot and export
Post.ver.50 <- getverticeshr(Post.kd, 50)
Post.ver.95 <- getverticeshr(Post.kd, 95)
plot(Post.ver.50)
plot(Post.ver.95)
png(filename="plots2/Post.all.png")
plot(Post.ver.95, axes=TRUE#, main=paste("Post.all ", "Area = ", Area.95, " km2\n", "bandwidth = ", round(Post.bw,1), sep="")#
)
plot(getverticeshr(Post.kd, 95), add=TRUE, lwd=2)
plot(Post.all.utm, add=TRUE, col="blue")
dev.off()
##Write shapefile
writeOGR(Post.ver.50, "shapefiles2/Post.ver.50.shp",layer="Post.ver.50", driver="ESRI Shapefile", overwrite_layer=TRUE)
writeOGR(Post.ver.95, "shapefiles2/Post.ver.95.shp",layer="Post.ver.95", driver="ESRI Shapefile", overwrite_layer=TRUE)
Post.outputs <- data.frame(Period="Post", Source="All", Method="LSCV", Bandwidth=Post.bw, A50=Post.Area.50, A95=Post.Area.95)
write.csv(Post.outputs, "outputs2/PostOutputs.csv", row.names=FALSE)
###get projection of Tag_78500.bw.id
##reproject points
#proj4string(Post.all) <- proj4string(all3)
Post.leafpoints <-spTransform(Post.all.utm,CRS("+proj=longlat"))
##reproject vertices
proj4string(Post.ver.50) <- proj4string(Post.all.utm)
Post.leafver.50 <-spTransform(Post.ver.50,CRS("+proj=longlat"))
proj4string(Post.ver.95) <- proj4string(Post.all.utm)
Post.leafver.95 <-spTransform(Post.ver.95,CRS("+proj=longlat"))
library(leaflet)
m <- leaflet() %>% setView(lng = -117.1, lat = 32.65, zoom = 11)
m %>%
addTiles('http://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}',
options = providerTileOptions(noWrap = TRUE)) %>%
addTiles('http://server.arcgisonline.com/ArcGIS/rest/services/Reference/World_Boundaries_and_Places/Mapserver/tile/{z}/{y}/{x}',
options = providerTileOptions(noWrap = TRUE)) %>%
addPolygons(data=Post.leafver.95, stroke=TRUE, color="#2ca25f", fillOpacity=0.75, group="Home range 95%")%>%
addPolygons(data=Post.leafver.50, stroke=TRUE, color="#99d8c9", fillOpacity=0.75, group="Home range 50%")%>%
addCircles(data=Post.leafpoints, group="Location data", color="yellow", fillOpacity=0.3, stroke=FALSE) %>%
addLayersControl(
overlayGroups = c("Home range 95%", "Home range 50%", "Location data"),
options = layersControlOptions(collapsed = FALSE)
)
## Section 3.2.4: leaflet
###get projection of Tag_78500.bw.id
##reproject points
#proj4string(Post.all) <- proj4string(all3)
Post.leafpoints <-spTransform(Post.all.utm,CRS("+proj=longlat"))
############## end
##Section 4: Iterate through each tag in a loop
#outputs: 50 and 95 shapefiles and spreadsheet
# ##Data sources
# Pre.all.utm
# Post.all.utm
## get coordinates as a dataframe and make a spatial object
#bind a new set of pretags that does not include small relocation values for turtles ##called pre.some
Pre.some <- rbind(tag78500,
tag52675,
tag37616,
tag37623,
tag44366)
Pre.unique <- unique(Pre.some$ArgosID)
Pre.ind.outputs <- c() #to hold results
###clear all files from output folders before running new script
for(i in 1:length(Pre.unique)){
Pre.tmp <- subset(Pre.some, ArgosID==Pre.unique[i])
print(i)
Pre.tmp.coords <- data.frame(x=Pre.tmp$Lon, y=Pre.tmp$Lat)
coordinates(Pre.tmp.coords) <- ~ x + y
class(Pre.tmp.coords)
#plot(Pre.tmp.coords, axes=TRUE) ## sanity check
# library(rgdal)
# tagprj <- readOGR("C:/Users/Costco/Documents/Dropbox/John Business/Business/sdbay/homerange/shps/Tag_065.shp",
# layer="Tag_065")
#plot(tagprj, axes=TRUE)
#saveproj <- proj4string(tagprj)
Pre.tmp.proj <- Pre.tmp.coords
#latlong = "+init=epsg:4326"
proj4string(Pre.tmp.proj) = CRS(latlong)
Pre.tmp.utm <- spTransform(Pre.tmp.proj, saveproj)
#plot(Pre.tmp.utm, axes=TRUE) ## sanity check
## Section 4.1.2
Pre.tmp.kd <- kernelUD(Pre.tmp.utm, h="LSCV", hlim = c(0.565, 1.5), grid=125) #set values from pre.all
#plotLSCV(Pre.kd)
Pre.tmp.Area <- kernel.area(Pre.tmp.kd, percent = seq(20, 95, by = 5),
unin = c("m"),
unout = c("km"), standardize = FALSE)
Pre.tmp.Area
##start here:
Pre.tmp.bw <- Pre.tmp.kd@h[[3]] ##bandwidth estimate
Pre.tmp.Area.50 <- round(Pre.tmp.Area[7],2)
Pre.tmp.Area.95 <- round(Pre.tmp.Area[16],2)
Pre.ver.tmp.50 <- getverticeshr(Pre.tmp.kd, 50)
Pre.ver.tmp.95 <- getverticeshr(Pre.tmp.kd, 95)
filename50 <- paste("shapefiles2/", "tag",Pre.unique[i],"percent50th", ".shp", sep="")
filename95 <- paste("shapefiles2/", "tag",Pre.unique[i],"percent95th", ".shp", sep="")
##Write shapefile
writeOGR(Pre.ver.tmp.50, filename50,layer="Pre.ver.tmp.50", driver="ESRI Shapefile", overwrite_layer=TRUE)
writeOGR(Pre.ver.tmp.95, filename95,layer="Pre.ver.tmp.95", driver="ESRI Shapefile", overwrite_layer=TRUE)
pre.tmp.outputs <- data.frame(Period="Pre", Source=Pre.unique[i], Method="LSCV",
Bandwidth=Pre.tmp.bw,
A50=Pre.tmp.Area.50, A95=Pre.tmp.Area.95)
Pre.ind.outputs <- rbind(Pre.ind.outputs, pre.tmp.outputs)
}
write.csv(Pre.ind.outputs , "outputs2/Pre.ind.outputs .csv", row.names=FALSE)
###########
## Section 5: Post
## get coordinates as a dataframe and make a spatial object
Post.unique <- unique(Post.all$ArgosID)
Post.ind.outputs <- c() #to hold results
for(i in 1:length(Post.unique)){
Post.tmp <- subset(Post.all, ArgosID==Post.unique[i])
print(i)
Post.tmp.coords <- data.frame(x=Post.tmp$Lon, y=Post.tmp$Lat)
coordinates(Post.tmp.coords) <- ~ x + y
class(Post.tmp.coords)
#plot(Post.tmp.coords, axes=TRUE) ## sanity check
# library(rgdal)
# tagprj <- readOGR("C:/Users/Costco/Documents/Dropbox/John Business/Business/sdbay/homerange/shps/Tag_065.shp",
# layer="Tag_065")
#plot(tagprj, axes=TRUE)
#saveproj <- proj4string(tagprj)
Post.tmp.proj <- Post.tmp.coords
#latlong = "+init=epsg:4326"
proj4string(Post.tmp.proj) = CRS(latlong)
Post.tmp.utm <- spTransform(Post.tmp.proj, saveproj)
#plot(Post.tmp.utm, axes=TRUE) ## sanity check
## Section 4.1.2
Post.tmp.kd <- kernelUD(Post.tmp.utm, h="LSCV", hlim = c(0.525, 1.5), grid=375) #set grid=300
#plotLSCV(Post.kd)
Post.tmp.Area <- kernel.area(Post.tmp.kd, percent = seq(20, 95, by = 5),
unin = c("m"),
unout = c("km"), standardize = FALSE)
Post.tmp.Area
##start here:
Post.tmp.bw <- Post.tmp.kd@h[[3]] ##bandwidth estimate
Post.tmp.Area.50 <- round(Post.tmp.Area[7],2)
Post.tmp.Area.95 <- round(Post.tmp.Area[16],2)
Post.ver.tmp.50 <- getverticeshr(Post.tmp.kd, 50)
Post.ver.tmp.95 <- getverticeshr(Post.tmp.kd, 95)
filename50 <- paste("shapefiles2/", "tag",Post.unique[i],"percent50th", ".shp", sep="")
filename95 <- paste("shapefiles2/", "tag",Post.unique[i],"percent95th", ".shp", sep="")
##Write shapefile
writeOGR(Post.ver.tmp.50, filename50,layer="Post.ver.tmp.50", driver="ESRI Shapefile", overwrite_layer=TRUE)
writeOGR(Post.ver.tmp.95, filename95,layer="Post.ver.tmp.95", driver="ESRI Shapefile", overwrite_layer=TRUE)
Post.tmp.outputs <- data.frame(Period="Post", Source=Post.unique[i], Method="LSCV",
Bandwidth=Post.tmp.bw,
A50=Post.tmp.Area.50, A95=Post.tmp.Area.95)
Post.ind.outputs <- rbind(Post.ind.outputs, Post.tmp.outputs)
}
write.csv(Post.ind.outputs , "outputs/Post.ind.outputs .csv", row.names=FALSE)
##combine data
## dt table
## leaflet
Pre.outputs2 <- Pre.outputs
Pre.outputs2$Source <- factor(Pre.outputs2$Source)
Pre.ind.outputs2 <- Pre.ind.outputs
Pre.ind.outputs2$Source <- factor(Pre.ind.outputs2$Source)
Post.outputs2 <- Post.outputs
Post.outputs2$Source <- factor(Post.outputs2$Source)
Post.ind.outputs2 <- Post.ind.outputs
Post.ind.outputs2$Source <- factor(Post.ind.outputs2$Source)
summary.all <- rbind(Pre.outputs2,Pre.ind.outputs2, Post.outputs2, Post.ind.outputs2)
write.csv(summary.all , "outputs2/summaryall.csv", row.names=FALSE)
save.image("homerangegpsLC.RData")
##Post all leaflet map
setwd("/Users/sgraham/R/gst_hr_analysis2016")
#load("homerange5.RData")
library(leaflet)
m <- leaflet() %>% setView(lng = -117.1, lat = 32.65, zoom = 11)
m %>%
addTiles('http://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}',
options = providerTileOptions(noWrap = TRUE)) %>%
addTiles('http://server.arcgisonline.com/ArcGIS/rest/services/Reference/World_Boundaries_and_Places/Mapserver/tile/{z}/{y}/{x}',
options = providerTileOptions(noWrap = TRUE)) %>%
addPolygons(data=Post.leafver.95, stroke=TRUE, color="#2ca25f", fillOpacity=0.75, group="Post home range 95%")%>%
addPolygons(data=Post.leafver.50, stroke=TRUE, color="#99d8c9", fillOpacity=0.75, group="Post home range 50%")%>%
addCircles(data=Post.leafpoints, group="Location data", color="yellow", fillOpacity=0.3, stroke=FALSE) %>%
addLayersControl(
overlayGroups = c("Post home range 95%", "Post home range 50%", "Location data"),
options = layersControlOptions(collapsed = FALSE)
)
|
library(GSminer)
library(testthat)
context("test removeDup")
test_that("remove Duplicate rows", {
testdata <- data.frame("a" = c("ab", "cd", "ef"), "b" = c("cd", "ab", "ef"), stringsAsFactors = FALSE)
result <- data.frame("a" = c("ab", "ef"), "b" = c("cd", "ef"), stringsAsFactors = FALSE)
expect_true(all.equal(result, removeDup(testdata), check.attributes = F))
})
| /tests/testthat/test-GSminer.R | no_license | ShadowFiendSF/GSminer | R | false | false | 381 | r | library(GSminer)
library(testthat)
context("test removeDup")
test_that("remove Duplicate rows", {
testdata <- data.frame("a" = c("ab", "cd", "ef"), "b" = c("cd", "ab", "ef"), stringsAsFactors = FALSE)
result <- data.frame("a" = c("ab", "ef"), "b" = c("cd", "ef"), stringsAsFactors = FALSE)
expect_true(all.equal(result, removeDup(testdata), check.attributes = F))
})
|
### <======================================================================>
#' Plot ES contribution
#'
#'These functions plot the contribution of each asset to the overall portfolio expected shortfall.
#'
#'
#' @docType methods
#' @importFrom graphics plot
#' @name plot-ghyp.attribution
#' @rdname plot-ghyp.attribution
#' @aliases plot,ghyp.attribution,ANY-method
#'
#' @param x A \code{ghyp.attribution} object.
#' @param metrics either the \code{contribution} or \code{sensitivity} will be plotted.
#' @param column.index which column of the object.
#' @param percentage plot contribution or sensitivity in percent.
#' @param colorset vector of colors for the chart.
#' @param horiz plot horizontally.
#' @param unstacked unstacked plot.
#' @param pie.chart should a pie chart be plotted.
#' @param sub subtitle.
#' @param \dots arguments passed to \code{plot} function.
#'
#' @author Marc Weibel
#' @seealso \code{\link{ESghyp.attribution}}.
#' @keywords attribution
#' @examples
#' \dontrun{
#' data(smi.stocks)
#'
#' ## Fit a NIG model to Novartis, CS and Nestle log-returns
#' assets.fit <- fit.NIGmv(smi.stocks[, c("Novartis", "CS", "Nestle")], silent = TRUE)
#'
#' ## Define Weights of the Portfolio
#' weights <- c(0.2, 0.5, 0.3)
#'
#' ## Confidence level for Expected Shortfall
#' es.levels <- c(0.01)
#' portfolio.attrib <- ESghyp.attribution(alpha=es.levels, object=assets.fit, weights=weights)
#'
#' ## Plot Risk Contribution for each Asset
#' plot(portfolio.attrib, metrics='contribution')
#' }
#' @export
"plot.ghyp.attrib" <- function(x, metrics=c('contribution', 'sensitivity'),
column.index=NULL, percentage=FALSE, colorset=NULL,
horiz=FALSE, unstacked=TRUE,
pie.chart=FALSE, sub=NULL, ...)
{
metrics = match.arg(metrics)
if(metrics!='contribution' && percentage==TRUE)
stop('Percentage can only be chosen with contribution ! \n')
if(metrics!='contribution' && pie.chart==TRUE)
stop('Pie Chart can only be chosen with contribution and percentage set as TRUE ! \n')
object <- eval(parse(text=paste('x@', metrics, sep="")))
colNames <- colnames(object)
if(!is.null(column.index))
{
object <- as.matrix(object[, column.index])
colnames(object) <- colNames[column.index]
if(is.null(sub)) sub <- paste('Probability = ',
colnames(object),
sep="")
}
n.row <- NROW(object)
n.col <- NCOL(object)
## Stacked.Plot do not make sense for Sensitivity
## as it's not additive to overall Portfolio
if(n.col>1 && metrics=='sensitivity')
stop('Only one-dimensional objects for Sensitivity Chart ! \n')
if(n.col>1 && pie.chart==TRUE)
stop('Only one-dimensional objects for Pie Chart ! \n')
## If pie chart was chosen, set percentage as TRUE
if(metrics=='contribution' && pie.chart==TRUE && percentage==FALSE)
{
cat('percentage has been set to TRUE for pie chart ! \n')
percentage=TRUE
}
if(metrics=='contribution')
{
metrics <- 'Contribution'
if(percentage==TRUE) metrics <- 'Contribution (in %)'
} else {
metrics <- 'Sensitivity'
}
## Contribution in Percent
if(percentage==TRUE) object <- t(t(object)/colSums(object)) * 100
## Produce a bar plot or a pie chart
if(pie.chart==FALSE)
{
plot.StackedBar(t(object), xlab='Probability',
ylab=metrics,
main=paste('Expected Shortfall ',
metrics, sep=""), horiz=horiz,
colorset, sub=sub, unstacked = unstacked, ...)
} else {
if(is.null(colorset)) colorset=.my.pal(n.row,'topo')
plot.PieChart(object,
labels=paste(rownames(object)," (",round(object,2),"%)",sep=""),
main='Expected Shortfall Contribution (in %)',
colorset=colorset,
sub=paste('Probability = ',
colnames(object), sep=""),
...)
}
}
### <---------------------------------------------------------------------->
setMethod("plot", signature(x = "ghyp.attribution"), plot.ghyp.attrib)
### <---------------------------------------------------------------------->
### <======================================================================>
##------ Color palettes ------------
".my.pal" <- function(n, palette=c('blues','rainbow', 'heat', 'terrain', 'topo', 'cm'))
{
palette <- match.arg(palette)
ch.col = c("rainbow(n, start=.7, end=.1)", "heat.colors(n)",
"terrain.colors(n)", "topo.colors(n)","cm.colors(n)")
nt <- length(ch.col)
colors <- matrix(0,nt, n)
for (k in 1:nt) {
colors[k,] = eval(parse(text=ch.col[k]))
}
rownames(colors) <- c('rainbow', 'heat', 'terrain', 'topo', 'cm')
return(colors[which(rownames(colors)==palette), ])
}
### <---------------------------------------------------------------------->
seqPalette <- function (n, name = c("Blues", "BuGn", "BuPu", "GnBu", "Greens",
"Greys", "Oranges", "OrRd", "PuBu", "PuBuGn", "PuRd", "Purples",
"RdPu", "Reds", "YlGn", "YlGnBu", "YlOrBr", "YlOrRd"))
{
Blues = rgb(c(247, 222, 198, 158, 107, 66, 33, 8, 8), c(251,
235, 219, 202, 174, 146, 113, 81, 48), c(255, 247, 239,
225, 214, 198, 181, 156, 107), maxColorValue = 255)
BuGn = rgb(c(247, 229, 204, 153, 102, 65, 35, 0, 0), c(252,
245, 236, 216, 194, 174, 139, 109, 68), c(253, 249, 230,
201, 164, 118, 69, 44, 27), maxColorValue = 255)
BuPu = rgb(c(247, 224, 191, 158, 140, 140, 136, 129, 77),
c(252, 236, 211, 188, 150, 107, 65, 15, 0), c(253, 244,
230, 218, 198, 177, 157, 124, 75), maxColorValue = 255)
GnBu = rgb(c(247, 224, 204, 168, 123, 78, 43, 8, 8), c(252,
243, 235, 221, 204, 179, 140, 104, 64), c(240, 219, 197,
181, 196, 211, 190, 172, 129), maxColorValue = 255)
Greens = rgb(c(247, 229, 199, 161, 116, 65, 35, 0, 0), c(252,
245, 233, 217, 196, 171, 139, 109, 68), c(245, 224, 192,
155, 118, 93, 69, 44, 27), maxColorValue = 255)
Greys = rgb(c(255, 240, 217, 189, 150, 115, 82, 37, 0), c(255,
240, 217, 189, 150, 115, 82, 37, 0), c(255, 240, 217,
189, 150, 115, 82, 37, 0), maxColorValue = 255)
Oranges = rgb(c(255, 254, 253, 253, 253, 241, 217, 166, 127),
c(245, 230, 208, 174, 141, 105, 72, 54, 39), c(235, 206,
162, 107, 60, 19, 1, 3, 4), maxColorValue = 255)
OrRd = rgb(c(255, 254, 253, 253, 252, 239, 215, 179, 127),
c(247, 232, 212, 187, 141, 101, 48, 0, 0), c(236, 200,
158, 132, 89, 72, 31, 0, 0), maxColorValue = 255)
PuBu = rgb(c(255, 236, 208, 166, 116, 54, 5, 4, 2), c(247,
231, 209, 189, 169, 144, 112, 90, 56), c(251, 242, 230,
219, 207, 192, 176, 141, 88), maxColorValue = 255)
PuBuGn = rgb(c(255, 236, 208, 166, 103, 54, 2, 1, 1), c(247,
226, 209, 189, 169, 144, 129, 108, 70), c(251, 240, 230,
219, 207, 192, 138, 89, 54), maxColorValue = 255)
PuOr = rgb(c(127, 179, 224, 253, 254, 247, 216, 178, 128,
84, 45), c(59, 88, 130, 184, 224, 247, 218, 171, 115,
39, 0), c(8, 6, 20, 99, 182, 247, 235, 210, 172, 136,
75), maxColorValue = 255)
PuRd = rgb(c(247, 231, 212, 201, 223, 231, 206, 152, 103),
c(244, 225, 185, 148, 101, 41, 18, 0, 0), c(249, 239,
218, 199, 176, 138, 86, 67, 31), maxColorValue = 255)
Purples = rgb(c(252, 239, 218, 188, 158, 128, 106, 84, 63),
c(251, 237, 218, 189, 154, 125, 81, 39, 0), c(253, 245,
235, 220, 200, 186, 163, 143, 125), maxColorValue = 255)
RdPu = rgb(c(255, 253, 252, 250, 247, 221, 174, 122, 73),
c(247, 224, 197, 159, 104, 52, 1, 1, 0), c(243, 221,
192, 181, 161, 151, 126, 119, 106), maxColorValue = 255)
Reds = rgb(c(255, 254, 252, 252, 251, 239, 203, 165, 103),
c(245, 224, 187, 146, 106, 59, 24, 15, 0), c(240, 210,
161, 114, 74, 44, 29, 21, 13), maxColorValue = 255)
YlGn = rgb(c(255, 247, 217, 173, 120, 65, 35, 0, 0), c(255,
252, 240, 221, 198, 171, 132, 104, 69), c(229, 185, 163,
142, 121, 93, 67, 55, 41), maxColorValue = 255)
YlGnBu = rgb(c(255, 237, 199, 127, 65, 29, 34, 37, 8), c(255,
248, 233, 205, 182, 145, 94, 52, 29), c(217, 177, 180,
187, 196, 192, 168, 148, 88), maxColorValue = 255)
YlOrBr = rgb(c(255, 255, 254, 254, 254, 236, 204, 153, 102),
c(255, 247, 227, 196, 153, 112, 76, 52, 37), c(229, 188,
145, 79, 41, 20, 2, 4, 6), maxColorValue = 255)
YlOrRd = rgb(c(255, 255, 254, 254, 253, 252, 227, 189, 128),
c(255, 237, 217, 178, 141, 78, 26, 0, 0), c(204, 160,
118, 76, 60, 42, 28, 38, 38), maxColorValue = 255)
name = match.arg(name)
orig = eval(parse(text = name))
rgb = t(col2rgb(orig))
temp = matrix(NA, ncol = 3, nrow = n)
x = seq(0, 1, , length(orig))
xg = seq(0, 1, , n)
for (k in 1:3) {
hold = spline(x, rgb[, k], n = n)$y
hold[hold < 0] = 0
hold[hold > 255] = 255
temp[, k] = round(hold)
}
palette = rgb(temp[, 1], temp[, 2], temp[, 3], maxColorValue = 255)
palette
}
### <======================================================================>
"plot.StackedBar" <- function (object, colorset = NULL, horiz=FALSE,
space = 0.2, cex.axis=0.8,
cex.legend = 0.8, cex.lab = 1,
cex.labels = 0.8, cex.main = 1,
xaxis=TRUE, legend.loc="under",
element.color = "darkgray", unstacked = TRUE,
xlab="Date", ylab="Value", ylim=NULL,
date.format = "%b %y",
major.ticks='auto', minor.ticks=TRUE,
las = 0, xaxis.labels = NULL, ... )
{
## Data should be organized as columns for each category,
## rows for each period or observation
object.columns = NCOL(object)
object.rows = NROW(object)
posn = barplot(t(object), plot=FALSE, space=space)
if(is.null(colnames(object)))
legend.loc = NULL
if(is.null(colorset))
colorset=seqPalette(object.columns, 'Blues')
if(is.null(xlab))
minmargin = 3
else
minmargin = 5
if(unstacked & dim(object)[1] == 1){ # only one row is being passed into 'object', unstack the bars
if(las > 1) { # set the bottom border to accomodate labels
bottommargin = max(c(minmargin, (strwidth(colnames(object),units="in")) /
par("cin")[1])) * cex.lab
par(mar = c(bottommargin, 4, 4, 2) +.1)
}
barplot(object, col = '#9ECAE1', las = las, horiz = horiz,
space = space, xlab = "", cex.names = cex.lab, axes = FALSE, ylim=ylim, ...)
if (horiz==TRUE)
axis(1, col = element.color, las = las, cex.axis = cex.axis)
else
axis(2, col = element.color, las = las, cex.axis = cex.axis)
box(col = element.color)
}
else { # multiple columns being passed into 'object', stack the bars and put a legend underneith
if(!is.null(legend.loc) ){
if(legend.loc =="under") { # put the legend under the chart
op <- par(no.readonly=TRUE)
layout(rbind(1,2), heights=c(6,1), widths=1)
par(mar=c(3,4,4,2)+.1) # set the margins of the first panel
}
}
# Brute force solution for plotting negative values in the bar charts:
positives = object
for(column in 1:ncol(object)){
for(row in 1:nrow(object)){
positives[row,column]=max(0,object[row,column])
}
}
negatives = object
for(column in 1:ncol(object)){
for(row in 1:nrow(object)){
negatives[row,column]=min(0,object[row,column])
}
}
# Set ylim accordingly
if(is.null(ylim)){
ymax=max(0,apply(positives,FUN=sum,MARGIN=1))
ymin=min(0,apply(negatives,FUN=sum,MARGIN=1))
ylim=c(ymin,ymax)
}
if (horiz==TRUE)
{
barplot(t(positives), col=colorset, horiz=horiz,
space=space,
axisnames = FALSE, axes = FALSE,
xlim=ylim, xlab="", ...)
barplot(t(negatives), add=TRUE , col=colorset,
horiz=horiz,
space=space, las = las, xlab = "",
cex.names = cex.lab,
axes = FALSE, axisnames = FALSE, xlim=ylim, ...)
axis(1, col = element.color, las = las, cex.axis = cex.axis)
} else {
barplot(t(positives), col=colorset, horiz=horiz,
space=space,
axisnames = FALSE, axes = FALSE,
ylim=ylim, xlab="", ...)
barplot(t(negatives), add=TRUE , col=colorset,
horiz=horiz,
space=space, las = las, xlab = "",
cex.names = cex.lab,
axes = FALSE, axisnames = FALSE, ylim=ylim, ...)
axis(2, col = element.color, las = las, cex.axis = cex.axis)
}
title(ylab = ylab, cex = cex.lab)
if (xaxis) {
label.height = .25 + cex.axis *
max(strheight(rownames(object), units="in") /
par('cin')[2])
if(is.null(xaxis.labels))
xaxis.labels = rownames(object)
if (horiz==TRUE)
axis(2, at=posn, labels=xaxis.labels, las=las, lwd=1,
mgp=c(3,label.height,0), cex.axis = cex.axis)
else
axis(1, at=posn, labels=xaxis.labels, las=las, lwd=1,
mgp=c(3,label.height,0), cex.axis = cex.axis)
}
box(col = element.color)
if(!is.null(legend.loc)){
if(legend.loc =="under"){ # draw the legend under the chart
par(mar=c(0,2,0,1)+.1) # set the margins of the second panel
plot.new()
if(object.columns <4)
ncol= object.columns
else
ncol = 4
legend("center", legend=colnames(object), cex = cex.legend,
fill=colorset, ncol=ncol,
box.col=element.color, border = element.color)
par(op)
} # if legend.loc is null, then do nothing
}
}
}
### <---------------------------------------------------------------------->
### <======================================================================>
plot.PieChart <- function (x, labels = names(x), labels.loc = "under",
edges = 200, radius = 0.8,
clockwise = FALSE, init.angle = if (clockwise) 90 else 0,
density = NULL, angle = 45,
colorset = NULL,
border = NULL,
lty = NULL,
main = NULL,
...)
{
if (!is.numeric(x) || any(is.na(x) | x < 0))
stop("'x' values must be positive.")
if (is.null(labels))
labels <- as.character(seq_along(x))
else labels <- as.graphicsAnnot(labels)
object.columns = NCOL(x)
object.rows = NROW(x)
x <- c(0, cumsum(x)/sum(x))
dx <- diff(x)
nx <- length(dx)
plot.new()
pin <- par("pin")
xlim <- ylim <- c(-1, 1)
if (pin[1L] > pin[2L])
xlim <- (pin[1L]/pin[2L]) * xlim
else ylim <- (pin[2L]/pin[1L]) * ylim
plot.window(xlim, ylim, "", asp = 1)
if (is.null(colorset))
colorset <- if (is.null(density))
colorset=seqPalette(object.columns, 'Blues')
else par("fg")
colorset <- rep(colorset, length.out = nx)
border <- rep(border, length.out = nx)
lty <- rep(lty, length.out = nx)
angle <- rep(angle, length.out = nx)
density <- rep(density, length.out = nx)
twopi <- if (clockwise)
-2 * pi
else 2 * pi
t2xy <- function(t) {
t2p <- twopi * t + init.angle * pi/180
list(x = radius * cos(t2p), y = radius * sin(t2p))
}
for (i in 1L:nx) {
n <- max(2, floor(edges * dx[i]))
P <- t2xy(seq.int(x[i], x[i + 1], length.out = n))
polygon(c(P$x, 0), c(P$y, 0), density = density[i], angle = angle[i],
border = border[i], col = colorset[i], lty = lty[i])
P <- t2xy(mean(x[i + 0:1]))
lab <- as.character(labels[i])
if (!is.na(lab) && nzchar(lab)) {
label.name <-
lines(c(1, 1.05) * P$x, c(1, 1.05) * P$y)
text(1.1 * P$x, 1.1 * P$y, labels[i], xpd = TRUE,
adj = ifelse(P$x < 0, 1, 0), ...)
}
}
title(main = main, ...)
#if(!is.null(labels.loc)){
# if(legend.loc =="under"){ # draw the legend under the chart
# par(mar=c(0,2,0,1)+.1) # set the margins of the second panel
# if(object.rows <3)
# ncol= object.rows
# else
# ncol = 3
# label.name = paste(rownames(object)," (",round(object,2),"%)",sep="")
# legend("bottom", legend=label.name, cex = cex.legend,
# fill=colorset, ncol=ncol,
# box.col=element.color, border = element.color)
# } # if label.loc is null, then do nothing
#}
invisible(NULL)
}
### <----------------------------------------------------------------------> | /R/ghypPlots.R | no_license | KKAKKI/ghyp | R | false | false | 18,976 | r | ### <======================================================================>
#' Plot ES contribution
#'
#'These functions plot the contribution of each asset to the overall portfolio expected shortfall.
#'
#'
#' @docType methods
#' @importFrom graphics plot
#' @name plot-ghyp.attribution
#' @rdname plot-ghyp.attribution
#' @aliases plot,ghyp.attribution,ANY-method
#'
#' @param x A \code{ghyp.attribution} object.
#' @param metrics either the \code{contribution} or \code{sensitivity} will be plotted.
#' @param column.index which column of the object.
#' @param percentage plot contribution or sensitivity in percent.
#' @param colorset vector of colors for the chart.
#' @param horiz plot horizontally.
#' @param unstacked unstacked plot.
#' @param pie.chart should a pie chart be plotted.
#' @param sub subtitle.
#' @param \dots arguments passed to \code{plot} function.
#'
#' @author Marc Weibel
#' @seealso \code{\link{ESghyp.attribution}}.
#' @keywords attribution
#' @examples
#' \dontrun{
#' data(smi.stocks)
#'
#' ## Fit a NIG model to Novartis, CS and Nestle log-returns
#' assets.fit <- fit.NIGmv(smi.stocks[, c("Novartis", "CS", "Nestle")], silent = TRUE)
#'
#' ## Define Weights of the Portfolio
#' weights <- c(0.2, 0.5, 0.3)
#'
#' ## Confidence level for Expected Shortfall
#' es.levels <- c(0.01)
#' portfolio.attrib <- ESghyp.attribution(alpha=es.levels, object=assets.fit, weights=weights)
#'
#' ## Plot Risk Contribution for each Asset
#' plot(portfolio.attrib, metrics='contribution')
#' }
#' @export
"plot.ghyp.attrib" <- function(x, metrics=c('contribution', 'sensitivity'),
column.index=NULL, percentage=FALSE, colorset=NULL,
horiz=FALSE, unstacked=TRUE,
pie.chart=FALSE, sub=NULL, ...)
{
metrics = match.arg(metrics)
if(metrics!='contribution' && percentage==TRUE)
stop('Percentage can only be chosen with contribution ! \n')
if(metrics!='contribution' && pie.chart==TRUE)
stop('Pie Chart can only be chosen with contribution and percentage set as TRUE ! \n')
object <- eval(parse(text=paste('x@', metrics, sep="")))
colNames <- colnames(object)
if(!is.null(column.index))
{
object <- as.matrix(object[, column.index])
colnames(object) <- colNames[column.index]
if(is.null(sub)) sub <- paste('Probability = ',
colnames(object),
sep="")
}
n.row <- NROW(object)
n.col <- NCOL(object)
## Stacked.Plot do not make sense for Sensitivity
## as it's not additive to overall Portfolio
if(n.col>1 && metrics=='sensitivity')
stop('Only one-dimensional objects for Sensitivity Chart ! \n')
if(n.col>1 && pie.chart==TRUE)
stop('Only one-dimensional objects for Pie Chart ! \n')
## If pie chart was chosen, set percentage as TRUE
if(metrics=='contribution' && pie.chart==TRUE && percentage==FALSE)
{
cat('percentage has been set to TRUE for pie chart ! \n')
percentage=TRUE
}
if(metrics=='contribution')
{
metrics <- 'Contribution'
if(percentage==TRUE) metrics <- 'Contribution (in %)'
} else {
metrics <- 'Sensitivity'
}
## Contribution in Percent
if(percentage==TRUE) object <- t(t(object)/colSums(object)) * 100
## Produce a bar plot or a pie chart
if(pie.chart==FALSE)
{
plot.StackedBar(t(object), xlab='Probability',
ylab=metrics,
main=paste('Expected Shortfall ',
metrics, sep=""), horiz=horiz,
colorset, sub=sub, unstacked = unstacked, ...)
} else {
if(is.null(colorset)) colorset=.my.pal(n.row,'topo')
plot.PieChart(object,
labels=paste(rownames(object)," (",round(object,2),"%)",sep=""),
main='Expected Shortfall Contribution (in %)',
colorset=colorset,
sub=paste('Probability = ',
colnames(object), sep=""),
...)
}
}
### <---------------------------------------------------------------------->
setMethod("plot", signature(x = "ghyp.attribution"), plot.ghyp.attrib)
### <---------------------------------------------------------------------->
### <======================================================================>
##------ Color palettes ------------
".my.pal" <- function(n, palette=c('blues','rainbow', 'heat', 'terrain', 'topo', 'cm'))
{
palette <- match.arg(palette)
ch.col = c("rainbow(n, start=.7, end=.1)", "heat.colors(n)",
"terrain.colors(n)", "topo.colors(n)","cm.colors(n)")
nt <- length(ch.col)
colors <- matrix(0,nt, n)
for (k in 1:nt) {
colors[k,] = eval(parse(text=ch.col[k]))
}
rownames(colors) <- c('rainbow', 'heat', 'terrain', 'topo', 'cm')
return(colors[which(rownames(colors)==palette), ])
}
### <---------------------------------------------------------------------->
seqPalette <- function (n, name = c("Blues", "BuGn", "BuPu", "GnBu", "Greens",
"Greys", "Oranges", "OrRd", "PuBu", "PuBuGn", "PuRd", "Purples",
"RdPu", "Reds", "YlGn", "YlGnBu", "YlOrBr", "YlOrRd"))
{
Blues = rgb(c(247, 222, 198, 158, 107, 66, 33, 8, 8), c(251,
235, 219, 202, 174, 146, 113, 81, 48), c(255, 247, 239,
225, 214, 198, 181, 156, 107), maxColorValue = 255)
BuGn = rgb(c(247, 229, 204, 153, 102, 65, 35, 0, 0), c(252,
245, 236, 216, 194, 174, 139, 109, 68), c(253, 249, 230,
201, 164, 118, 69, 44, 27), maxColorValue = 255)
BuPu = rgb(c(247, 224, 191, 158, 140, 140, 136, 129, 77),
c(252, 236, 211, 188, 150, 107, 65, 15, 0), c(253, 244,
230, 218, 198, 177, 157, 124, 75), maxColorValue = 255)
GnBu = rgb(c(247, 224, 204, 168, 123, 78, 43, 8, 8), c(252,
243, 235, 221, 204, 179, 140, 104, 64), c(240, 219, 197,
181, 196, 211, 190, 172, 129), maxColorValue = 255)
Greens = rgb(c(247, 229, 199, 161, 116, 65, 35, 0, 0), c(252,
245, 233, 217, 196, 171, 139, 109, 68), c(245, 224, 192,
155, 118, 93, 69, 44, 27), maxColorValue = 255)
Greys = rgb(c(255, 240, 217, 189, 150, 115, 82, 37, 0), c(255,
240, 217, 189, 150, 115, 82, 37, 0), c(255, 240, 217,
189, 150, 115, 82, 37, 0), maxColorValue = 255)
Oranges = rgb(c(255, 254, 253, 253, 253, 241, 217, 166, 127),
c(245, 230, 208, 174, 141, 105, 72, 54, 39), c(235, 206,
162, 107, 60, 19, 1, 3, 4), maxColorValue = 255)
OrRd = rgb(c(255, 254, 253, 253, 252, 239, 215, 179, 127),
c(247, 232, 212, 187, 141, 101, 48, 0, 0), c(236, 200,
158, 132, 89, 72, 31, 0, 0), maxColorValue = 255)
PuBu = rgb(c(255, 236, 208, 166, 116, 54, 5, 4, 2), c(247,
231, 209, 189, 169, 144, 112, 90, 56), c(251, 242, 230,
219, 207, 192, 176, 141, 88), maxColorValue = 255)
PuBuGn = rgb(c(255, 236, 208, 166, 103, 54, 2, 1, 1), c(247,
226, 209, 189, 169, 144, 129, 108, 70), c(251, 240, 230,
219, 207, 192, 138, 89, 54), maxColorValue = 255)
PuOr = rgb(c(127, 179, 224, 253, 254, 247, 216, 178, 128,
84, 45), c(59, 88, 130, 184, 224, 247, 218, 171, 115,
39, 0), c(8, 6, 20, 99, 182, 247, 235, 210, 172, 136,
75), maxColorValue = 255)
PuRd = rgb(c(247, 231, 212, 201, 223, 231, 206, 152, 103),
c(244, 225, 185, 148, 101, 41, 18, 0, 0), c(249, 239,
218, 199, 176, 138, 86, 67, 31), maxColorValue = 255)
Purples = rgb(c(252, 239, 218, 188, 158, 128, 106, 84, 63),
c(251, 237, 218, 189, 154, 125, 81, 39, 0), c(253, 245,
235, 220, 200, 186, 163, 143, 125), maxColorValue = 255)
RdPu = rgb(c(255, 253, 252, 250, 247, 221, 174, 122, 73),
c(247, 224, 197, 159, 104, 52, 1, 1, 0), c(243, 221,
192, 181, 161, 151, 126, 119, 106), maxColorValue = 255)
Reds = rgb(c(255, 254, 252, 252, 251, 239, 203, 165, 103),
c(245, 224, 187, 146, 106, 59, 24, 15, 0), c(240, 210,
161, 114, 74, 44, 29, 21, 13), maxColorValue = 255)
YlGn = rgb(c(255, 247, 217, 173, 120, 65, 35, 0, 0), c(255,
252, 240, 221, 198, 171, 132, 104, 69), c(229, 185, 163,
142, 121, 93, 67, 55, 41), maxColorValue = 255)
YlGnBu = rgb(c(255, 237, 199, 127, 65, 29, 34, 37, 8), c(255,
248, 233, 205, 182, 145, 94, 52, 29), c(217, 177, 180,
187, 196, 192, 168, 148, 88), maxColorValue = 255)
YlOrBr = rgb(c(255, 255, 254, 254, 254, 236, 204, 153, 102),
c(255, 247, 227, 196, 153, 112, 76, 52, 37), c(229, 188,
145, 79, 41, 20, 2, 4, 6), maxColorValue = 255)
YlOrRd = rgb(c(255, 255, 254, 254, 253, 252, 227, 189, 128),
c(255, 237, 217, 178, 141, 78, 26, 0, 0), c(204, 160,
118, 76, 60, 42, 28, 38, 38), maxColorValue = 255)
name = match.arg(name)
orig = eval(parse(text = name))
rgb = t(col2rgb(orig))
temp = matrix(NA, ncol = 3, nrow = n)
x = seq(0, 1, , length(orig))
xg = seq(0, 1, , n)
for (k in 1:3) {
hold = spline(x, rgb[, k], n = n)$y
hold[hold < 0] = 0
hold[hold > 255] = 255
temp[, k] = round(hold)
}
palette = rgb(temp[, 1], temp[, 2], temp[, 3], maxColorValue = 255)
palette
}
### <======================================================================>
"plot.StackedBar" <- function (object, colorset = NULL, horiz=FALSE,
space = 0.2, cex.axis=0.8,
cex.legend = 0.8, cex.lab = 1,
cex.labels = 0.8, cex.main = 1,
xaxis=TRUE, legend.loc="under",
element.color = "darkgray", unstacked = TRUE,
xlab="Date", ylab="Value", ylim=NULL,
date.format = "%b %y",
major.ticks='auto', minor.ticks=TRUE,
las = 0, xaxis.labels = NULL, ... )
{
## Data should be organized as columns for each category,
## rows for each period or observation
object.columns = NCOL(object)
object.rows = NROW(object)
posn = barplot(t(object), plot=FALSE, space=space)
if(is.null(colnames(object)))
legend.loc = NULL
if(is.null(colorset))
colorset=seqPalette(object.columns, 'Blues')
if(is.null(xlab))
minmargin = 3
else
minmargin = 5
if(unstacked & dim(object)[1] == 1){ # only one row is being passed into 'object', unstack the bars
if(las > 1) { # set the bottom border to accomodate labels
bottommargin = max(c(minmargin, (strwidth(colnames(object),units="in")) /
par("cin")[1])) * cex.lab
par(mar = c(bottommargin, 4, 4, 2) +.1)
}
barplot(object, col = '#9ECAE1', las = las, horiz = horiz,
space = space, xlab = "", cex.names = cex.lab, axes = FALSE, ylim=ylim, ...)
if (horiz==TRUE)
axis(1, col = element.color, las = las, cex.axis = cex.axis)
else
axis(2, col = element.color, las = las, cex.axis = cex.axis)
box(col = element.color)
}
else { # multiple columns being passed into 'object', stack the bars and put a legend underneith
if(!is.null(legend.loc) ){
if(legend.loc =="under") { # put the legend under the chart
op <- par(no.readonly=TRUE)
layout(rbind(1,2), heights=c(6,1), widths=1)
par(mar=c(3,4,4,2)+.1) # set the margins of the first panel
}
}
# Brute force solution for plotting negative values in the bar charts:
positives = object
for(column in 1:ncol(object)){
for(row in 1:nrow(object)){
positives[row,column]=max(0,object[row,column])
}
}
negatives = object
for(column in 1:ncol(object)){
for(row in 1:nrow(object)){
negatives[row,column]=min(0,object[row,column])
}
}
# Set ylim accordingly
if(is.null(ylim)){
ymax=max(0,apply(positives,FUN=sum,MARGIN=1))
ymin=min(0,apply(negatives,FUN=sum,MARGIN=1))
ylim=c(ymin,ymax)
}
if (horiz==TRUE)
{
barplot(t(positives), col=colorset, horiz=horiz,
space=space,
axisnames = FALSE, axes = FALSE,
xlim=ylim, xlab="", ...)
barplot(t(negatives), add=TRUE , col=colorset,
horiz=horiz,
space=space, las = las, xlab = "",
cex.names = cex.lab,
axes = FALSE, axisnames = FALSE, xlim=ylim, ...)
axis(1, col = element.color, las = las, cex.axis = cex.axis)
} else {
barplot(t(positives), col=colorset, horiz=horiz,
space=space,
axisnames = FALSE, axes = FALSE,
ylim=ylim, xlab="", ...)
barplot(t(negatives), add=TRUE , col=colorset,
horiz=horiz,
space=space, las = las, xlab = "",
cex.names = cex.lab,
axes = FALSE, axisnames = FALSE, ylim=ylim, ...)
axis(2, col = element.color, las = las, cex.axis = cex.axis)
}
title(ylab = ylab, cex = cex.lab)
if (xaxis) {
label.height = .25 + cex.axis *
max(strheight(rownames(object), units="in") /
par('cin')[2])
if(is.null(xaxis.labels))
xaxis.labels = rownames(object)
if (horiz==TRUE)
axis(2, at=posn, labels=xaxis.labels, las=las, lwd=1,
mgp=c(3,label.height,0), cex.axis = cex.axis)
else
axis(1, at=posn, labels=xaxis.labels, las=las, lwd=1,
mgp=c(3,label.height,0), cex.axis = cex.axis)
}
box(col = element.color)
if(!is.null(legend.loc)){
if(legend.loc =="under"){ # draw the legend under the chart
par(mar=c(0,2,0,1)+.1) # set the margins of the second panel
plot.new()
if(object.columns <4)
ncol= object.columns
else
ncol = 4
legend("center", legend=colnames(object), cex = cex.legend,
fill=colorset, ncol=ncol,
box.col=element.color, border = element.color)
par(op)
} # if legend.loc is null, then do nothing
}
}
}
### <---------------------------------------------------------------------->
### <======================================================================>
plot.PieChart <- function (x, labels = names(x), labels.loc = "under",
edges = 200, radius = 0.8,
clockwise = FALSE, init.angle = if (clockwise) 90 else 0,
density = NULL, angle = 45,
colorset = NULL,
border = NULL,
lty = NULL,
main = NULL,
...)
{
if (!is.numeric(x) || any(is.na(x) | x < 0))
stop("'x' values must be positive.")
if (is.null(labels))
labels <- as.character(seq_along(x))
else labels <- as.graphicsAnnot(labels)
object.columns = NCOL(x)
object.rows = NROW(x)
x <- c(0, cumsum(x)/sum(x))
dx <- diff(x)
nx <- length(dx)
plot.new()
pin <- par("pin")
xlim <- ylim <- c(-1, 1)
if (pin[1L] > pin[2L])
xlim <- (pin[1L]/pin[2L]) * xlim
else ylim <- (pin[2L]/pin[1L]) * ylim
plot.window(xlim, ylim, "", asp = 1)
if (is.null(colorset))
colorset <- if (is.null(density))
colorset=seqPalette(object.columns, 'Blues')
else par("fg")
colorset <- rep(colorset, length.out = nx)
border <- rep(border, length.out = nx)
lty <- rep(lty, length.out = nx)
angle <- rep(angle, length.out = nx)
density <- rep(density, length.out = nx)
twopi <- if (clockwise)
-2 * pi
else 2 * pi
t2xy <- function(t) {
t2p <- twopi * t + init.angle * pi/180
list(x = radius * cos(t2p), y = radius * sin(t2p))
}
for (i in 1L:nx) {
n <- max(2, floor(edges * dx[i]))
P <- t2xy(seq.int(x[i], x[i + 1], length.out = n))
polygon(c(P$x, 0), c(P$y, 0), density = density[i], angle = angle[i],
border = border[i], col = colorset[i], lty = lty[i])
P <- t2xy(mean(x[i + 0:1]))
lab <- as.character(labels[i])
if (!is.na(lab) && nzchar(lab)) {
label.name <-
lines(c(1, 1.05) * P$x, c(1, 1.05) * P$y)
text(1.1 * P$x, 1.1 * P$y, labels[i], xpd = TRUE,
adj = ifelse(P$x < 0, 1, 0), ...)
}
}
title(main = main, ...)
#if(!is.null(labels.loc)){
# if(legend.loc =="under"){ # draw the legend under the chart
# par(mar=c(0,2,0,1)+.1) # set the margins of the second panel
# if(object.rows <3)
# ncol= object.rows
# else
# ncol = 3
# label.name = paste(rownames(object)," (",round(object,2),"%)",sep="")
# legend("bottom", legend=label.name, cex = cex.legend,
# fill=colorset, ncol=ncol,
# box.col=element.color, border = element.color)
# } # if label.loc is null, then do nothing
#}
invisible(NULL)
}
### <----------------------------------------------------------------------> |
#' Standard Deviation
#' This function calculates the standard deviation of input vector.
#' @param vector x
#'
#' @return numeric
#' @export
#'
#' @examples
#' standard_deviation(c(2,6,7,8))
standard_deviation <- function(x) {
if (is.null(x)){
stop("x should contain numbers and should not be null")
}
n <- length(x)
tryCatch({
mean = sum(x, na.rm = TRUE) / n
ssq <- sum((x-mean)^2, na.rm = TRUE)
stddev = sqrt(ssq/n)
return(stddev)
}, error=function(e) stop("x does not contain numbers and thus a NaN was returned"))
}
| /R/deviation.R | permissive | hntek/deviation | R | false | false | 545 | r | #' Standard Deviation
#' This function calculates the standard deviation of input vector.
#' @param vector x
#'
#' @return numeric
#' @export
#'
#' @examples
#' standard_deviation(c(2,6,7,8))
standard_deviation <- function(x) {
if (is.null(x)){
stop("x should contain numbers and should not be null")
}
n <- length(x)
tryCatch({
mean = sum(x, na.rm = TRUE) / n
ssq <- sum((x-mean)^2, na.rm = TRUE)
stddev = sqrt(ssq/n)
return(stddev)
}, error=function(e) stop("x does not contain numbers and thus a NaN was returned"))
}
|
lib <- c("raster", "rgdal", "MODIS", "remote", "doParallel", "reshape2",
"ggplot2", "dplyr", "scales", "Rsenal", "Kendall", "RColorBrewer",
"latticeExtra", "zoo")
sapply(lib, function(x) library(x, character.only = TRUE))
source("sortByElevation.R")
source("kendallStats.R")
registerDoParallel(cl <- makeCluster(3))
# Temporal range
st <- "200301"
nd <- "201212"
## DEM
dem <- raster("data/DEM_ARC1960_30m_Hemp.tif")
## GIMMS NDVI3G
fls_gimms <- list.files("data/rst/whittaker", pattern = "_wht_aggmax.tif$",
full.names = TRUE)
fls_gimms <- fls_gimms[grep(st, fls_gimms):grep(nd, fls_gimms)]
rst_gimms <- stack(fls_gimms)
rst_gimms_crp <- crop(rst_gimms, rasterToPolygons(rst_gimms[[1]]))
rst_gimms_crp <- deseason(rst_gimms_crp)
## MODIS
fls_modis_myd13 <- list.files("data/modis", pattern = "^SCL_AGGMAX.*.tif$",
full.names = TRUE)
rst_modis_myd13 <- stack(fls_modis_myd13)
rst_modis_myd13 <- deseason(rst_modis_myd13)
### define index for training data
pred_ind <- 1:60
gimms_stck_pred <- rst_gimms_crp[[pred_ind]]
gimms_stck_eval <- rst_gimms_crp[[-pred_ind]]
# ndvi_modes <- foreach(i = c(rst_modis_myd13, rst_modis_max, rst_modis_med,
# rst_modis_tmpmax, rst_modis_tmpmed), .packages = lib) %dopar% {
### create training (pred) and evaluation (eval) sets
mod_stck_pred <- rst_modis_myd13[[pred_ind]]
mod_stck_eval <- rst_modis_myd13[[-pred_ind]]
### calculate EOT
ndvi_modes <- eot(x = gimms_stck_pred, y = mod_stck_pred, n = 10,
standardised = FALSE, reduce.both = TRUE,
verbose = TRUE, write.out = TRUE, path.out = "data/eotdsn_eval")
### calculate number of modes necessary for explaining 95% variance
# nm <- nXplain(ndvi_modes, 0.95)
nm <- 10
### prediction using calculated intercept, slope and GIMMS NDVI values
mod_predicted <- predict(object = ndvi_modes,
newdata = gimms_stck_eval,
n = nm)
# ### prediction storage
projection(mod_predicted) <- projection(rst_gimms)
dir_out <- "data/rst/dwnscl"
file_out <- paste0(dir_out, "/gimms_ndvi3g_dwnscl_0812_dsn_reduceboth")
mod_predicted <- writeRaster(mod_predicted, filename = file_out,
format = "GTiff", bylayer = FALSE,
overwrite = TRUE)
################################################################################
### Model validation ###
################################################################################
# mod_predicted <- stack(list.files(dir_out, pattern = "dwnscl_0812",
# full.names = TRUE))
mod_observed <- mod_stck_eval
pred_vals <- getValues(mod_predicted)
obs_vals <- getValues(mod_observed)
### error scores
ME <- colMeans(pred_vals - obs_vals, na.rm = TRUE)
MAE <- colMeans(abs(pred_vals - obs_vals), na.rm = TRUE)
RMSE <- sqrt(colMeans((pred_vals - obs_vals)^2, na.rm = TRUE))
R <- diag(cor(pred_vals, obs_vals, use = "complete.obs"))
Rsq <- R * R
### visualise error scores
scores <- data.frame(ME, MAE, RMSE, R, Rsq)
round(colMeans(scores), 3)
write.csv(round(scores, 3), "data/eot_eval/scores.csv", row.names = FALSE)
melt_scores <- melt(scores)
p <- ggplot(melt_scores, aes(factor(variable), value))
p <- p + geom_boxplot() +
theme_bw() + xlab("") + ylab("")
print(p)
# ### pca
# mat_prd <- as.matrix(mod_predicted)
# mat_obs <- as.matrix(mod_observed)
#
# pca_prd <- prcomp(mat_prd)
# pca_obs <- prcomp(mat_obs)
#
# mat_prd_pc1 <- predict(pca_prd, mat_prd, index = 1)
# mat_obs_pc1 <- predict(pca_obs, mat_obs, index = 1)
#
# template_prd <- mod_predicted[[1]]
# template_prd[] <- mat_prd_pc1[, 2]
# plot(template_prd, zlim = c(-1.5, 1.5))
#
# template_obs <- mod_observed[[1]]
# template_obs[] <- mat_obs_pc1[, 2]
# plot(template_obs, zlim = c(-1.5, 1.5))
# Note: work on non-denoised rasters
# mod_predicted_dns <- denoise(mod_predicted, 3, weighted = FALSE)
# mod_observed_dns <- denoise(mod_observed, 3, weighted = FALSE)
# mod_dns <- stack(mod_observed_dns, mod_predicted_dns)
mod <- stack(mod_observed, mod_predicted)
cols_div <- colorRampPalette(brewer.pal(11, "RdBu"))
cols_seq <- colorRampPalette(brewer.pal(9, "Blues"))
## r and referring p values
mod_r <- calc(mod, fun = function(x) {cor(x[1:60], x[61:120])})
mod_p <- calc(mod, fun = function(x) {summary(lm(x[1:60] ~ x[61:120]))$coefficients[2, 4]})
tmp <- mod_r
tmp[mod_p[] >= .001] <- NA
p_r <- spplot(tmp, at = seq(-1, 1, .25), col.regions = cols_div(100),
scales = list(draw = TRUE), xlab = "x", ylab = "y")
p_rsq <- spplot(tmp^2, at = seq(0, 1, .1), col.regions = cols_seq(100),
scales = list(draw = TRUE), xlab = "x", ylab = "y",
sp.layout = list(list("sp.lines", rasterToContour(dem), col = "grey75"),
list("sp.text", c(285000, 9680000), "Rsq",
font = 2, cex = 1.2)))
## lm
mod_obs_sl <- calc(mod_observed, fun = function(x) {
model <- lm(x~seq(x))
p <- summary(model)$coefficients[2,4]
s <- summary(model)$coefficients[2,1]
s[p>=.001|p<=-.001] <- NA
return(s)
}, filename = "data/eotdsn_eval/mod_obs_dsn_sl_001", format = "GTiff", overwrite = TRUE)
mod_obs_sl <- raster("data/eotdsn_eval/mod_obs_dsn_sl_001.tif")
p_sl_obs <- spplot(mod_obs_sl, scales = list(draw = TRUE),
col.regions = cols_div(100), at = seq(-.006, .006, .001))
mod_prd_sl <- calc(mod_predicted, fun = function(x) {
model <- lm(x~seq(x))
p <- summary(model)$coefficients[2,4]
s <- summary(model)$coefficients[2,1]
s[p>=.001|p<=-.001] <- NA
return(s)
}, filename = "data/eotdsn_eval/mod_prd_dsn_sl_001", format = "GTiff", overwrite = TRUE)
mod_prd_sl <- raster("data/eotdsn_eval/mod_prd_dsn_sl_001.tif")
p_sl_prd <- spplot(mod_prd_sl, scales = list(draw = TRUE),
col.regions = cols_div(100), at = seq(-.006, .006, .001))
latticeCombineGrid(list(p_sl_obs, p_sl_prd), layout = c(1, 2))
## highly significant mannkendall
mod_observed_dsn <- deseason(mod_observed)
mod_obs_mk <- calc(mod_observed_dsn, fun = function(x) {
mk <- MannKendall(x)
if (mk$sl < .001) return(mk$tau) else return(NA)
}, filename = "data/eotdsn_eval/mod_obs_dsn_mk_001", format = "GTiff", overwrite = TRUE)
mod_obs_mk <- raster("data/eot_eval/mod_obs_dsn_mk_001.tif")
val_obs_mk <- getValues(mod_obs_mk)
mod_predicted_dsn <- deseason(mod_predicted)
mod_prd_mk <- calc(mod_predicted_dsn, fun = function(x) {
mk <- MannKendall(x)
if (mk$sl < .001) return(mk$tau) else return(NA)
}, filename = "data/eotdsn_eval/mod_prd_dsn_mk_001", format = "GTiff", overwrite = TRUE)
mod_prd_mk <- raster("data/eot_eval/mod_prd_dsn_mk_001.tif")
val_prd_mk <- getValues(mod_prd_mk)
# Statistics
mk_stats <- rbind(kendallStats(mod_obs_mk), kendallStats(mod_prd_mk))
which(val_obs_mk > 0 )
p_mk_obs <- spplot(mod_obs_mk, at = seq(-1, 1, .25), col.regions = rev(cols(100)),
scales = list(draw = TRUE), xlab = "x", ylab = "y",
sp.layout = list(list("sp.lines", rasterToContour(dem), col = "grey75"),
list("sp.text", c(347500, 9680000), "MK-OBS",
font = 2, cex = 1.2)))
p_mk_prd <- spplot(mod_prd_mk, at = seq(-1, 1, .25), col.regions = rev(cols(100)),
scales = list(draw = TRUE), xlab = "x", ylab = "y",
sp.layout = list(list("sp.lines", rasterToContour(dem), col = "grey75"),
list("sp.text", c(347500, 9680000), "MK-PRD",
font = 2, cex = 1.2)))
p_mk <- latticeCombineGrid(list(p_mk_obs, p_mk_prd), layout = c(1, 2))
png("vis/mk_obs_prd.png", width = 20, height = 30, units = "cm", res = 300, pointsize = 15)
print(p_mk)
dev.off()
## ioa
mod_ioa <- calc(mod, fun = function(x) ioa(x[1:60], x[61:120]))
cols_seq <- colorRampPalette(brewer.pal(9, "Reds"))
p_ioa <- spplot(mod_ioa, at = seq(0, 1, .125), col.regions = rev(cols(100)),
scales = list(draw = TRUE), xlab = "x", ylab = "y",
sp.layout = list(list("sp.lines", rasterToContour(dem), col = "grey75"),
list("sp.text", c(285000, 9680000), "IOA",
font = 2, cex = 1.2)))
p_rsq_ioa <- latticeCombineGrid(list(p_rsq, p_ioa), layout = c(1, 2))
png("vis/rsq_ioa.png", width = 20, height = 30, units = "cm", res = 300, pointsize = 15)
print(p_rsq_ioa)
dev.off()
## mannkendall scatter plots incl. regression line
mk_pred_val <- sapply(1:nrow(pred_vals), function(i) {
MannKendall(pred_vals[i, ])$tau
})
mk_obs_val <- sapply(1:nrow(obs_vals), function(i) {
MannKendall(obs_vals[i, ])$tau
})
xyplot(mk_pred_val ~ mk_obs_val) +
layer(panel.ablineq(lm(y~x)))
## mannkendall boxplots
bwplot(mk_obs_val, xlim = c(-1, 1))
bwplot(mk_pred_val, xlim = c(-1, 1))
### ioa
ioa_val <- sapply(1:nrow(pred_vals), function(i) {
ioa(pred_vals[i, ], obs_vals[i, ])
})
fls_cf <- list.files("../../ndvi/data/processed/", pattern = "^BF_SD_QA_MYD.*.tif$",
full.names = TRUE)
st <- grep("2003", fls_cf)[1]
nd <- grep("2012", fls_cf)[length(grep("2012", fls_cf))]
fls_cf <- fls_cf[st:nd]
rst_cf <- stack(fls_cf)
dates <- gsub("A", "", sapply(strsplit(basename(fls_cf), "\\."), "[[", 2))
indices <- as.numeric(as.factor(as.yearmon(dates, format = "%Y%j")))
rst_cf_agg <- stackApply(rst_cf, indices = indices, fun = max, na.rm = TRUE)
plot(obs_vals[which.min(ioa_val), ], type = "l", col = "grey65", ylim = c(0, 1))
lines(pred_vals[which.min(ioa_val), ])
lines(as.numeric(rst_cf_agg[which.min(ioa_val)] / 10000), lty = 2)
points(as.numeric(rst_cf_agg[which.min(ioa_val)] / 10000), pch = 20)
points(xyFromCell(mod_stck_eval[[1]], cell = which.min(ioa_val)), cex = 1)
library(lattice)
bwplot(ioa_val)
### visualise plots
official_plots <- c(paste0("cof", 1:5),
paste0("fed", 1:5),
paste0("fer", 0:4),
paste0("flm", c(1:4, 6)),
paste0("foc", 1:5),
paste0("fod", 1:5),
paste0("fpd", 1:5),
paste0("fpo", 1:5),
paste0("gra", c(1:2, 4:6)),
paste0("hel", 1:5),
paste0("hom", 1:5),
paste0("mai", 1:5),
paste0("sav", 1:5))
plt <- readOGR(dsn = "data/coords/",
layer = "PlotPoles_ARC1960_mod_20140807_final")
plt <- subset(plt, PoleName == "A middle pole")
col_names <- sapply(strsplit(names(mod_observed), "_"), "[[", 4)
plt_obs <- extract(mod_observed, plt, df = TRUE)
plt_obs$ID <- as.character(plt@data$PlotID)
names(plt_obs)[2:ncol(plt_obs)] <- col_names
plt_obs <- sortByElevation(plot_names = official_plots,
plot_shape = plt,
val = plt_obs)
plt_obs_mlt <- melt(plt_obs, variable.name = "month", value.name = "ndvi_obs")
plt_obs_mlt$month <- as.character(plt_obs_mlt$month)
plt_prd <- extract(mod_predicted, plt, df = TRUE)
plt_prd$ID <- as.character(plt@data$PlotID)
names(plt_prd)[2:ncol(plt_prd)] <- col_names
plt_prd <- sortByElevation(plot_names = official_plots,
plot_shape = plt,
val = plt_prd)
plt_prd_mlt <- melt(plt_prd, variable.name = "month", value.name = "ndvi_prd")
plt_prd_mlt$month <- as.character(plt_prd_mlt$month)
plt_obs_prd <- merge(plt_obs_mlt, plt_prd_mlt, by = c(1, 2, 3))
plt_obs_prd_mlt <- melt(plt_obs_prd, variable.name = "type")
luc <- unique(plt_obs_prd_mlt$Habitat)
png("vis/comparison_obs_prd_08_12.png", width = 22, height = 27, units = "cm",
res = 300, pointsize = 15)
ggplot(aes(x = as.Date(paste0(month, "01"), format = "%Y%m%d"), y = value,
group = type, color = type), data = plt_obs_prd_mlt) +
geom_line() +
geom_line(aes(color = type), lty = 2, stat = "hline",
yintercept = "mean", lwd = .1) +
facet_wrap(~ ID, ncol = 5) +
labs(x = "Time (months)", y = "NDVI") +
# scale_linetype_manual("", values = c("ndvi_obs" = 1, "ndvi_prd" = 2), guide = FALSE) +
scale_colour_manual("", values = c("ndvi_obs" = "grey75", "ndvi_prd" = "black"), guide = FALSE) +
scale_x_date(labels = date_format("%Y"),
breaks = date_breaks(width = "2 years"),
minor_breaks = waiver()) +
theme_bw() +
theme(axis.text = element_text(size = 8), panel.grid = element_blank(),
strip.text = element_text(size = 6, lineheight = .01))
dev.off()
| /dfg_for_kilimanjaro/ndvi_kilimanjaro/src/gimms/prediction_eval_eotdsn.R | no_license | environmentalinformatics-marburg/magic | R | false | false | 12,639 | r | lib <- c("raster", "rgdal", "MODIS", "remote", "doParallel", "reshape2",
"ggplot2", "dplyr", "scales", "Rsenal", "Kendall", "RColorBrewer",
"latticeExtra", "zoo")
sapply(lib, function(x) library(x, character.only = TRUE))
source("sortByElevation.R")
source("kendallStats.R")
registerDoParallel(cl <- makeCluster(3))
# Temporal range
st <- "200301"
nd <- "201212"
## DEM
dem <- raster("data/DEM_ARC1960_30m_Hemp.tif")
## GIMMS NDVI3G
fls_gimms <- list.files("data/rst/whittaker", pattern = "_wht_aggmax.tif$",
full.names = TRUE)
fls_gimms <- fls_gimms[grep(st, fls_gimms):grep(nd, fls_gimms)]
rst_gimms <- stack(fls_gimms)
rst_gimms_crp <- crop(rst_gimms, rasterToPolygons(rst_gimms[[1]]))
rst_gimms_crp <- deseason(rst_gimms_crp)
## MODIS
fls_modis_myd13 <- list.files("data/modis", pattern = "^SCL_AGGMAX.*.tif$",
full.names = TRUE)
rst_modis_myd13 <- stack(fls_modis_myd13)
rst_modis_myd13 <- deseason(rst_modis_myd13)
### define index for training data
pred_ind <- 1:60
gimms_stck_pred <- rst_gimms_crp[[pred_ind]]
gimms_stck_eval <- rst_gimms_crp[[-pred_ind]]
# ndvi_modes <- foreach(i = c(rst_modis_myd13, rst_modis_max, rst_modis_med,
# rst_modis_tmpmax, rst_modis_tmpmed), .packages = lib) %dopar% {
### create training (pred) and evaluation (eval) sets
mod_stck_pred <- rst_modis_myd13[[pred_ind]]
mod_stck_eval <- rst_modis_myd13[[-pred_ind]]
### calculate EOT
ndvi_modes <- eot(x = gimms_stck_pred, y = mod_stck_pred, n = 10,
standardised = FALSE, reduce.both = TRUE,
verbose = TRUE, write.out = TRUE, path.out = "data/eotdsn_eval")
### calculate number of modes necessary for explaining 95% variance
# nm <- nXplain(ndvi_modes, 0.95)
nm <- 10
### prediction using calculated intercept, slope and GIMMS NDVI values
mod_predicted <- predict(object = ndvi_modes,
newdata = gimms_stck_eval,
n = nm)
# ### prediction storage
projection(mod_predicted) <- projection(rst_gimms)
dir_out <- "data/rst/dwnscl"
file_out <- paste0(dir_out, "/gimms_ndvi3g_dwnscl_0812_dsn_reduceboth")
mod_predicted <- writeRaster(mod_predicted, filename = file_out,
format = "GTiff", bylayer = FALSE,
overwrite = TRUE)
################################################################################
### Model validation ###
################################################################################
# mod_predicted <- stack(list.files(dir_out, pattern = "dwnscl_0812",
# full.names = TRUE))
mod_observed <- mod_stck_eval
pred_vals <- getValues(mod_predicted)
obs_vals <- getValues(mod_observed)
### error scores
ME <- colMeans(pred_vals - obs_vals, na.rm = TRUE)
MAE <- colMeans(abs(pred_vals - obs_vals), na.rm = TRUE)
RMSE <- sqrt(colMeans((pred_vals - obs_vals)^2, na.rm = TRUE))
R <- diag(cor(pred_vals, obs_vals, use = "complete.obs"))
Rsq <- R * R
### visualise error scores
scores <- data.frame(ME, MAE, RMSE, R, Rsq)
round(colMeans(scores), 3)
write.csv(round(scores, 3), "data/eot_eval/scores.csv", row.names = FALSE)
melt_scores <- melt(scores)
p <- ggplot(melt_scores, aes(factor(variable), value))
p <- p + geom_boxplot() +
theme_bw() + xlab("") + ylab("")
print(p)
# ### pca
# mat_prd <- as.matrix(mod_predicted)
# mat_obs <- as.matrix(mod_observed)
#
# pca_prd <- prcomp(mat_prd)
# pca_obs <- prcomp(mat_obs)
#
# mat_prd_pc1 <- predict(pca_prd, mat_prd, index = 1)
# mat_obs_pc1 <- predict(pca_obs, mat_obs, index = 1)
#
# template_prd <- mod_predicted[[1]]
# template_prd[] <- mat_prd_pc1[, 2]
# plot(template_prd, zlim = c(-1.5, 1.5))
#
# template_obs <- mod_observed[[1]]
# template_obs[] <- mat_obs_pc1[, 2]
# plot(template_obs, zlim = c(-1.5, 1.5))
# Note: work on non-denoised rasters
# mod_predicted_dns <- denoise(mod_predicted, 3, weighted = FALSE)
# mod_observed_dns <- denoise(mod_observed, 3, weighted = FALSE)
# mod_dns <- stack(mod_observed_dns, mod_predicted_dns)
mod <- stack(mod_observed, mod_predicted)
cols_div <- colorRampPalette(brewer.pal(11, "RdBu"))
cols_seq <- colorRampPalette(brewer.pal(9, "Blues"))
## r and referring p values
mod_r <- calc(mod, fun = function(x) {cor(x[1:60], x[61:120])})
mod_p <- calc(mod, fun = function(x) {summary(lm(x[1:60] ~ x[61:120]))$coefficients[2, 4]})
tmp <- mod_r
tmp[mod_p[] >= .001] <- NA
p_r <- spplot(tmp, at = seq(-1, 1, .25), col.regions = cols_div(100),
scales = list(draw = TRUE), xlab = "x", ylab = "y")
p_rsq <- spplot(tmp^2, at = seq(0, 1, .1), col.regions = cols_seq(100),
scales = list(draw = TRUE), xlab = "x", ylab = "y",
sp.layout = list(list("sp.lines", rasterToContour(dem), col = "grey75"),
list("sp.text", c(285000, 9680000), "Rsq",
font = 2, cex = 1.2)))
## lm
mod_obs_sl <- calc(mod_observed, fun = function(x) {
model <- lm(x~seq(x))
p <- summary(model)$coefficients[2,4]
s <- summary(model)$coefficients[2,1]
s[p>=.001|p<=-.001] <- NA
return(s)
}, filename = "data/eotdsn_eval/mod_obs_dsn_sl_001", format = "GTiff", overwrite = TRUE)
mod_obs_sl <- raster("data/eotdsn_eval/mod_obs_dsn_sl_001.tif")
p_sl_obs <- spplot(mod_obs_sl, scales = list(draw = TRUE),
col.regions = cols_div(100), at = seq(-.006, .006, .001))
mod_prd_sl <- calc(mod_predicted, fun = function(x) {
model <- lm(x~seq(x))
p <- summary(model)$coefficients[2,4]
s <- summary(model)$coefficients[2,1]
s[p>=.001|p<=-.001] <- NA
return(s)
}, filename = "data/eotdsn_eval/mod_prd_dsn_sl_001", format = "GTiff", overwrite = TRUE)
mod_prd_sl <- raster("data/eotdsn_eval/mod_prd_dsn_sl_001.tif")
p_sl_prd <- spplot(mod_prd_sl, scales = list(draw = TRUE),
col.regions = cols_div(100), at = seq(-.006, .006, .001))
latticeCombineGrid(list(p_sl_obs, p_sl_prd), layout = c(1, 2))
## highly significant mannkendall
mod_observed_dsn <- deseason(mod_observed)
mod_obs_mk <- calc(mod_observed_dsn, fun = function(x) {
mk <- MannKendall(x)
if (mk$sl < .001) return(mk$tau) else return(NA)
}, filename = "data/eotdsn_eval/mod_obs_dsn_mk_001", format = "GTiff", overwrite = TRUE)
mod_obs_mk <- raster("data/eot_eval/mod_obs_dsn_mk_001.tif")
val_obs_mk <- getValues(mod_obs_mk)
mod_predicted_dsn <- deseason(mod_predicted)
mod_prd_mk <- calc(mod_predicted_dsn, fun = function(x) {
mk <- MannKendall(x)
if (mk$sl < .001) return(mk$tau) else return(NA)
}, filename = "data/eotdsn_eval/mod_prd_dsn_mk_001", format = "GTiff", overwrite = TRUE)
mod_prd_mk <- raster("data/eot_eval/mod_prd_dsn_mk_001.tif")
val_prd_mk <- getValues(mod_prd_mk)
# Statistics
mk_stats <- rbind(kendallStats(mod_obs_mk), kendallStats(mod_prd_mk))
which(val_obs_mk > 0 )
p_mk_obs <- spplot(mod_obs_mk, at = seq(-1, 1, .25), col.regions = rev(cols(100)),
scales = list(draw = TRUE), xlab = "x", ylab = "y",
sp.layout = list(list("sp.lines", rasterToContour(dem), col = "grey75"),
list("sp.text", c(347500, 9680000), "MK-OBS",
font = 2, cex = 1.2)))
p_mk_prd <- spplot(mod_prd_mk, at = seq(-1, 1, .25), col.regions = rev(cols(100)),
scales = list(draw = TRUE), xlab = "x", ylab = "y",
sp.layout = list(list("sp.lines", rasterToContour(dem), col = "grey75"),
list("sp.text", c(347500, 9680000), "MK-PRD",
font = 2, cex = 1.2)))
p_mk <- latticeCombineGrid(list(p_mk_obs, p_mk_prd), layout = c(1, 2))
png("vis/mk_obs_prd.png", width = 20, height = 30, units = "cm", res = 300, pointsize = 15)
print(p_mk)
dev.off()
## ioa
mod_ioa <- calc(mod, fun = function(x) ioa(x[1:60], x[61:120]))
cols_seq <- colorRampPalette(brewer.pal(9, "Reds"))
p_ioa <- spplot(mod_ioa, at = seq(0, 1, .125), col.regions = rev(cols(100)),
scales = list(draw = TRUE), xlab = "x", ylab = "y",
sp.layout = list(list("sp.lines", rasterToContour(dem), col = "grey75"),
list("sp.text", c(285000, 9680000), "IOA",
font = 2, cex = 1.2)))
p_rsq_ioa <- latticeCombineGrid(list(p_rsq, p_ioa), layout = c(1, 2))
png("vis/rsq_ioa.png", width = 20, height = 30, units = "cm", res = 300, pointsize = 15)
print(p_rsq_ioa)
dev.off()
## mannkendall scatter plots incl. regression line
mk_pred_val <- sapply(1:nrow(pred_vals), function(i) {
MannKendall(pred_vals[i, ])$tau
})
mk_obs_val <- sapply(1:nrow(obs_vals), function(i) {
MannKendall(obs_vals[i, ])$tau
})
xyplot(mk_pred_val ~ mk_obs_val) +
layer(panel.ablineq(lm(y~x)))
## mannkendall boxplots
bwplot(mk_obs_val, xlim = c(-1, 1))
bwplot(mk_pred_val, xlim = c(-1, 1))
### ioa
ioa_val <- sapply(1:nrow(pred_vals), function(i) {
ioa(pred_vals[i, ], obs_vals[i, ])
})
fls_cf <- list.files("../../ndvi/data/processed/", pattern = "^BF_SD_QA_MYD.*.tif$",
full.names = TRUE)
st <- grep("2003", fls_cf)[1]
nd <- grep("2012", fls_cf)[length(grep("2012", fls_cf))]
fls_cf <- fls_cf[st:nd]
rst_cf <- stack(fls_cf)
dates <- gsub("A", "", sapply(strsplit(basename(fls_cf), "\\."), "[[", 2))
indices <- as.numeric(as.factor(as.yearmon(dates, format = "%Y%j")))
rst_cf_agg <- stackApply(rst_cf, indices = indices, fun = max, na.rm = TRUE)
plot(obs_vals[which.min(ioa_val), ], type = "l", col = "grey65", ylim = c(0, 1))
lines(pred_vals[which.min(ioa_val), ])
lines(as.numeric(rst_cf_agg[which.min(ioa_val)] / 10000), lty = 2)
points(as.numeric(rst_cf_agg[which.min(ioa_val)] / 10000), pch = 20)
points(xyFromCell(mod_stck_eval[[1]], cell = which.min(ioa_val)), cex = 1)
library(lattice)
bwplot(ioa_val)
### visualise plots
official_plots <- c(paste0("cof", 1:5),
paste0("fed", 1:5),
paste0("fer", 0:4),
paste0("flm", c(1:4, 6)),
paste0("foc", 1:5),
paste0("fod", 1:5),
paste0("fpd", 1:5),
paste0("fpo", 1:5),
paste0("gra", c(1:2, 4:6)),
paste0("hel", 1:5),
paste0("hom", 1:5),
paste0("mai", 1:5),
paste0("sav", 1:5))
plt <- readOGR(dsn = "data/coords/",
layer = "PlotPoles_ARC1960_mod_20140807_final")
plt <- subset(plt, PoleName == "A middle pole")
col_names <- sapply(strsplit(names(mod_observed), "_"), "[[", 4)
plt_obs <- extract(mod_observed, plt, df = TRUE)
plt_obs$ID <- as.character(plt@data$PlotID)
names(plt_obs)[2:ncol(plt_obs)] <- col_names
plt_obs <- sortByElevation(plot_names = official_plots,
plot_shape = plt,
val = plt_obs)
plt_obs_mlt <- melt(plt_obs, variable.name = "month", value.name = "ndvi_obs")
plt_obs_mlt$month <- as.character(plt_obs_mlt$month)
plt_prd <- extract(mod_predicted, plt, df = TRUE)
plt_prd$ID <- as.character(plt@data$PlotID)
names(plt_prd)[2:ncol(plt_prd)] <- col_names
plt_prd <- sortByElevation(plot_names = official_plots,
plot_shape = plt,
val = plt_prd)
plt_prd_mlt <- melt(plt_prd, variable.name = "month", value.name = "ndvi_prd")
plt_prd_mlt$month <- as.character(plt_prd_mlt$month)
plt_obs_prd <- merge(plt_obs_mlt, plt_prd_mlt, by = c(1, 2, 3))
plt_obs_prd_mlt <- melt(plt_obs_prd, variable.name = "type")
luc <- unique(plt_obs_prd_mlt$Habitat)
png("vis/comparison_obs_prd_08_12.png", width = 22, height = 27, units = "cm",
res = 300, pointsize = 15)
ggplot(aes(x = as.Date(paste0(month, "01"), format = "%Y%m%d"), y = value,
group = type, color = type), data = plt_obs_prd_mlt) +
geom_line() +
geom_line(aes(color = type), lty = 2, stat = "hline",
yintercept = "mean", lwd = .1) +
facet_wrap(~ ID, ncol = 5) +
labs(x = "Time (months)", y = "NDVI") +
# scale_linetype_manual("", values = c("ndvi_obs" = 1, "ndvi_prd" = 2), guide = FALSE) +
scale_colour_manual("", values = c("ndvi_obs" = "grey75", "ndvi_prd" = "black"), guide = FALSE) +
scale_x_date(labels = date_format("%Y"),
breaks = date_breaks(width = "2 years"),
minor_breaks = waiver()) +
theme_bw() +
theme(axis.text = element_text(size = 8), panel.grid = element_blank(),
strip.text = element_text(size = 6, lineheight = .01))
dev.off()
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL # cache the inverse matrix
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(i) inv <<- i # setter of cache
getinv <- function() inv # getter of cache
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
# check if we have a cache matrix already
if(!is.null(inv)) {
# use the cache matrix
message("getting cached data")
return(inv)
}
# calculate the inverse matrix, and then cache it
data <- x$get()
inv <- solve(data)
x$setinv(inv)
inv
}
| /cachematrix.R | no_license | ccy123/ProgrammingAssignment2 | R | false | false | 899 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL # cache the inverse matrix
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(i) inv <<- i # setter of cache
getinv <- function() inv # getter of cache
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
# check if we have a cache matrix already
if(!is.null(inv)) {
# use the cache matrix
message("getting cached data")
return(inv)
}
# calculate the inverse matrix, and then cache it
data <- x$get()
inv <- solve(data)
x$setinv(inv)
inv
}
|
testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 536903680L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615828915-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 487 | r | testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 536903680L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_functions_for_class_checks.R
\name{is.probability.matrix}
\alias{is.probability.matrix}
\title{is.probability.matrix (internal function)}
\usage{
is.probability.matrix(x)
}
\arguments{
\item{x}{Object to be checked.}
}
\value{
Logical value (true or false).
}
\description{
`is.probability.matrix()` checks whether the input object is a numeric
matrix of probabilities with a total sum of 1 for every row. Each row
must have 6 columns (for Shiraishi format).
}
\references{
\url{http://rmpiro.net/decompTumor2Sig/}\cr
Krueger, Piro (2019) decompTumor2Sig: Identification of mutational
signatures active in individual tumors. BMC Bioinformatics
20(Suppl 4):152.\cr
}
\author{
Rosario M. Piro\cr Politecnico di Milano\cr Maintainer: Rosario
M. Piro\cr E-Mail: <rmpiro@gmail.com> or <rosariomichael.piro@polimi.it>
}
\keyword{internal}
| /man/is.probability.matrix.Rd | no_license | rmpiro/decompTumor2Sig | R | false | true | 918 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_functions_for_class_checks.R
\name{is.probability.matrix}
\alias{is.probability.matrix}
\title{is.probability.matrix (internal function)}
\usage{
is.probability.matrix(x)
}
\arguments{
\item{x}{Object to be checked.}
}
\value{
Logical value (true or false).
}
\description{
`is.probability.matrix()` checks whether the input object is a numeric
matrix of probabilities with a total sum of 1 for every row. Each row
must have 6 columns (for Shiraishi format).
}
\references{
\url{http://rmpiro.net/decompTumor2Sig/}\cr
Krueger, Piro (2019) decompTumor2Sig: Identification of mutational
signatures active in individual tumors. BMC Bioinformatics
20(Suppl 4):152.\cr
}
\author{
Rosario M. Piro\cr Politecnico di Milano\cr Maintainer: Rosario
M. Piro\cr E-Mail: <rmpiro@gmail.com> or <rosariomichael.piro@polimi.it>
}
\keyword{internal}
|
# this script recreates Simpkins et al 2013 analysis for tadpoles
# Simpkins C, Shuker J.D., Lollback G.W., Castley J.G., Hero J. (2013).
# Environmental variables associated with the distribution and occupancy
# of habitat specialist tadpoles in naturally acidic, oligotrophic
# waterbodies. Austral Ecology (in press).
# set the working directory
wd = "c:/userdata/FRM/shuker/"
setwd(wd)
species = c("Litoria_olongburensis", "Crinia_tinnula")
# read in the species and env data
all.data = read.csv(paste(wd, "shuker.csv", sep=""), header=TRUE)
# get the names of the columns to use as variable names
predictors = colnames(all.data)[7:11]
# create a list of formulas
glm.explanatories =
c("salinity",
"salinity + turbidity",
"salinity + turbidity + depth",
"salinity + turbidity + depth + percent_cover",
"salinity + turbidity + depth + percent_cover + predatory_fish",
"turbidity",
"turbidity + depth",
"turbidity + depth + percent_cover",
"turbidity + depth + percent_cover + predatory_fish",
"depth",
"depth + percent_cover",
"depth + percent_cover + predatory_fish",
"percent_cover",
"percent_cover + predatory_fish",
"predatory_fish")
######################################################
##
## 1) Assess the importance of environmental variables
## on the relative abundance of tadpoles
##
######################################################
# create a list to hold the model output
sp.abund.models = list()
# fit regression to abundance data for each species and model formula
for (sp in species) {
# get the index of the species
i = which(species == sp)
# create a list for outputs of each glm
sp.abund.models[[i]] = list()
# for each set of explanatories
for (j in 1:length(glm.explanatories)) {
# generate the model formula
my.resp = paste(sp, "_A", sep="")
my.formula = paste(my.resp, "~", glm.explanatories[j])
# fit the model
sp.abund.models[[i]][[j]] = glm(formula=my.formula, data=all.data,
family=poisson)
}
}
######################################################
##
## 2) Assess the importance of environmental variables
# on the occupancy of tadpoles
##
######################################################
# create a list to hold the model output
sp.occur.models = list()
# fit regression to occurrence data for each species
for (sp in species) {
# get the index of the species
k = which(species == sp)
# create a list for outputs of each glm
sp.occur.models[[k]] = list()
# for each set of explanatories
for (l in 1:length(glm.explanatories)) {
# generate the model formula
my.occur.resp = paste(sp, "_O", sep="")
my.occur.formula = paste(my.occur.resp, "~", glm.explanatories[l])
# fit the species model
sp.occur.models[[k]][[l]] = glm(formula=my.occur.formula, data=all.data,
family=binomial)
}
}
######################################################
##
## Model selection
##
######################################################
# rank the models using second order AICc
library(AICcmodavg)
# create model selection table for each species
for (s in 1:length(species)) {
A_comparison = aictab(sp.abund.models[[s]], modnames=glm.explanatories)
write.csv(A_comparison, file=paste(species[s], "_abundance_comparison.csv", sep=""))
O_comparison = aictab(sp.occur.models[[s]], modnames=glm.explanatories)
write.csv(O_comparison, file=paste(species[s], "_occurrence_comparison.csv", sep=""))
}
######################################################
##
## Parameter estimates
##
######################################################
best.abund.model = which(glm.explanatories==A_comparison[[1]][1])
summary(sp.abund.models[[1]][best.abund.model][[1]])
best.occur.model = which(glm.explanatories==O_comparison[[1]][1])
summary(sp.occur.models[[1]][best.occur.model][[1]]) | /FRM/shuker/frm_shuker.R | no_license | linbx73/modelling_scripts | R | false | false | 3,937 | r | # this script recreates Simpkins et al 2013 analysis for tadpoles
# Simpkins C, Shuker J.D., Lollback G.W., Castley J.G., Hero J. (2013).
# Environmental variables associated with the distribution and occupancy
# of habitat specialist tadpoles in naturally acidic, oligotrophic
# waterbodies. Austral Ecology (in press).
# set the working directory
wd = "c:/userdata/FRM/shuker/"
setwd(wd)
species = c("Litoria_olongburensis", "Crinia_tinnula")
# read in the species and env data
all.data = read.csv(paste(wd, "shuker.csv", sep=""), header=TRUE)
# get the names of the columns to use as variable names
predictors = colnames(all.data)[7:11]
# create a list of formulas
glm.explanatories =
c("salinity",
"salinity + turbidity",
"salinity + turbidity + depth",
"salinity + turbidity + depth + percent_cover",
"salinity + turbidity + depth + percent_cover + predatory_fish",
"turbidity",
"turbidity + depth",
"turbidity + depth + percent_cover",
"turbidity + depth + percent_cover + predatory_fish",
"depth",
"depth + percent_cover",
"depth + percent_cover + predatory_fish",
"percent_cover",
"percent_cover + predatory_fish",
"predatory_fish")
######################################################
##
## 1) Assess the importance of environmental variables
## on the relative abundance of tadpoles
##
######################################################
# create a list to hold the model output
sp.abund.models = list()
# fit regression to abundance data for each species and model formula
for (sp in species) {
# get the index of the species
i = which(species == sp)
# create a list for outputs of each glm
sp.abund.models[[i]] = list()
# for each set of explanatories
for (j in 1:length(glm.explanatories)) {
# generate the model formula
my.resp = paste(sp, "_A", sep="")
my.formula = paste(my.resp, "~", glm.explanatories[j])
# fit the model
sp.abund.models[[i]][[j]] = glm(formula=my.formula, data=all.data,
family=poisson)
}
}
######################################################
##
## 2) Assess the importance of environmental variables
# on the occupancy of tadpoles
##
######################################################
# create a list to hold the model output
sp.occur.models = list()
# fit regression to occurrence data for each species
for (sp in species) {
# get the index of the species
k = which(species == sp)
# create a list for outputs of each glm
sp.occur.models[[k]] = list()
# for each set of explanatories
for (l in 1:length(glm.explanatories)) {
# generate the model formula
my.occur.resp = paste(sp, "_O", sep="")
my.occur.formula = paste(my.occur.resp, "~", glm.explanatories[l])
# fit the species model
sp.occur.models[[k]][[l]] = glm(formula=my.occur.formula, data=all.data,
family=binomial)
}
}
######################################################
##
## Model selection
##
######################################################
# rank the models using second order AICc
library(AICcmodavg)
# create model selection table for each species
for (s in 1:length(species)) {
A_comparison = aictab(sp.abund.models[[s]], modnames=glm.explanatories)
write.csv(A_comparison, file=paste(species[s], "_abundance_comparison.csv", sep=""))
O_comparison = aictab(sp.occur.models[[s]], modnames=glm.explanatories)
write.csv(O_comparison, file=paste(species[s], "_occurrence_comparison.csv", sep=""))
}
######################################################
##
## Parameter estimates
##
######################################################
best.abund.model = which(glm.explanatories==A_comparison[[1]][1])
summary(sp.abund.models[[1]][best.abund.model][[1]])
best.occur.model = which(glm.explanatories==O_comparison[[1]][1])
summary(sp.occur.models[[1]][best.occur.model][[1]]) |
Homepage <- dashboardPage(
dashboardHeader(disable = T),
dashboardSidebar(disable = T),
dashboardBody(
tags$head(tags$style("section.content { overflow-y: hidden; }")),
fluidRow(
column(
width = 10,
offset = 1,
titleBox(title = "LIRBase: A web application for comprehensive analysis of siRNAs derived from long inverted repeat in 424 eukaryotic genomes")
)
),
fluidRow(
column(
width = 10,
offset = 1,
textBox(
width = 12,
p("We identified a total of 6,619,473", strong("long inverted repeats (LIR, longer than 800 nt)"), "in 424 eukaryotic genomes and implemented various functionalities for analysis of LIRs and small RNAs derived from LIRs.")
),
box(
width = 12,
HTML("<p class='aligncenter'><img src='header.png' width='100%' height='100%' /></p>
<style>
.aligncenter {
text-align: center;
}
</style>")
)
)
),
column(
width = 10,
offset = 1,
sectionBox(
title = "Statistics",
fluidRow(
valueBox("6,619,473", "Long inverted repeats", width = 4, color="blue"),
valueBox("424", "Eukaryotic genomes", width = 4, color="blue"),
valueBox(374, "Species", width = 4, color="blue")
),
fluidRow(
valueBox("297,317", "LIRs in 77 metazoa genomes", width = 4, color="blue"),
valueBox("1,731,978", "LIRs in 139 plant genomes", width = 4, color="blue"),
valueBox("4,590,178", "LIRs in 208 vertebrate genomes", width = 4, color="blue"),
)
)
),
column(
width = 10,
offset = 1,
sectionBox(
title = "Functionalities of LIRBase",
fluidRow(
box(width = 4,
shinyWidgets::actionBttn("Browse_butt", "Browse",
icon = icon("folder-open-o", class = NULL, lib = "font-awesome"),
block = TRUE, size = "lg", style="unite", color="default"),
h4("Browse LIRBase by species/genomes")
),
box(width = 4,
shinyWidgets::actionBttn("SearchByReg_butt", "Search by genomic location",
icon = icon("search", class = NULL, lib = "font-awesome"),
block = TRUE, size = "lg", style="unite", color="default"),
h4("Search LIRBase by genomic locations")
),
box(width = 4,
shinyWidgets::actionBttn("SearchByLIRID_butt", "Search by LIR identifier",
icon = icon("search", class = NULL, lib = "font-awesome"),
block = TRUE, size = "lg", style="unite", color="default"),
h4("Search LIRBase by the identifiers of LIRs")
)
),
fluidRow(
box(width = 4,
shinyWidgets::actionBttn("BLAST_butt", "BLAST",
icon = icon("rocket", class = NULL, lib = "font-awesome"),
block = TRUE, size = "lg", style="unite", color="default"),
h4("Search LIRBase by sequence similarity using BLAST")
),
box(width = 4,
shinyWidgets::actionBttn("Annotate_butt", "Annotate",
icon = icon("cogs", class = NULL, lib = "font-awesome"),
block = TRUE, size = "lg", style="unite", color="default"),
h4("Detect and annotate long inverted repeats in user-uploaded DNA sequences")
),
box(width = 4,
shinyWidgets::actionBttn("Quantify_butt", "Quantify",
icon = icon("upload", class = NULL, lib = "font-awesome"),
block = TRUE, size = "lg", style="unite", color="default"),
h4("Identify candidate LIRs encoding long hpRNAs by aligning sRNA sequencing data to LIRs")
)
),
fluidRow(
box(width = 4,
shinyWidgets::actionBttn("DESeq_butt", "DESeq",
icon = icon("eercast", class = NULL, lib = "font-awesome"),
block = TRUE, size = "lg", style="unite", color="default"),
h4("Differential expression analysis of LIRs or small RNAs between different biological samples/tissues")
),
box(width = 4,
shinyWidgets::actionBttn("Target_butt", "Target",
icon = icon("bullseye", class = NULL, lib = "font-awesome"),
block = TRUE, size = "lg", style="unite", color="default"),
h4("Identify protein-coding genes targeted by the small RNAs derived from a LIR")
),
box(width = 4,
shinyWidgets::actionBttn("Visualize_butt", "Visualize",
icon = icon("eye", class = NULL, lib = "font-awesome"),
block = TRUE, size = "lg", style="unite", color="default"),
h4("Predict and visualize the secondary structure of potential long hpRNA encoded by a LIR")
)
)
)
)
)
)
| /Homepage.R | no_license | lihuaj/LIRBase | R | false | false | 5,287 | r |
Homepage <- dashboardPage(
dashboardHeader(disable = T),
dashboardSidebar(disable = T),
dashboardBody(
tags$head(tags$style("section.content { overflow-y: hidden; }")),
fluidRow(
column(
width = 10,
offset = 1,
titleBox(title = "LIRBase: A web application for comprehensive analysis of siRNAs derived from long inverted repeat in 424 eukaryotic genomes")
)
),
fluidRow(
column(
width = 10,
offset = 1,
textBox(
width = 12,
p("We identified a total of 6,619,473", strong("long inverted repeats (LIR, longer than 800 nt)"), "in 424 eukaryotic genomes and implemented various functionalities for analysis of LIRs and small RNAs derived from LIRs.")
),
box(
width = 12,
HTML("<p class='aligncenter'><img src='header.png' width='100%' height='100%' /></p>
<style>
.aligncenter {
text-align: center;
}
</style>")
)
)
),
column(
width = 10,
offset = 1,
sectionBox(
title = "Statistics",
fluidRow(
valueBox("6,619,473", "Long inverted repeats", width = 4, color="blue"),
valueBox("424", "Eukaryotic genomes", width = 4, color="blue"),
valueBox(374, "Species", width = 4, color="blue")
),
fluidRow(
valueBox("297,317", "LIRs in 77 metazoa genomes", width = 4, color="blue"),
valueBox("1,731,978", "LIRs in 139 plant genomes", width = 4, color="blue"),
valueBox("4,590,178", "LIRs in 208 vertebrate genomes", width = 4, color="blue"),
)
)
),
column(
width = 10,
offset = 1,
sectionBox(
title = "Functionalities of LIRBase",
fluidRow(
box(width = 4,
shinyWidgets::actionBttn("Browse_butt", "Browse",
icon = icon("folder-open-o", class = NULL, lib = "font-awesome"),
block = TRUE, size = "lg", style="unite", color="default"),
h4("Browse LIRBase by species/genomes")
),
box(width = 4,
shinyWidgets::actionBttn("SearchByReg_butt", "Search by genomic location",
icon = icon("search", class = NULL, lib = "font-awesome"),
block = TRUE, size = "lg", style="unite", color="default"),
h4("Search LIRBase by genomic locations")
),
box(width = 4,
shinyWidgets::actionBttn("SearchByLIRID_butt", "Search by LIR identifier",
icon = icon("search", class = NULL, lib = "font-awesome"),
block = TRUE, size = "lg", style="unite", color="default"),
h4("Search LIRBase by the identifiers of LIRs")
)
),
fluidRow(
box(width = 4,
shinyWidgets::actionBttn("BLAST_butt", "BLAST",
icon = icon("rocket", class = NULL, lib = "font-awesome"),
block = TRUE, size = "lg", style="unite", color="default"),
h4("Search LIRBase by sequence similarity using BLAST")
),
box(width = 4,
shinyWidgets::actionBttn("Annotate_butt", "Annotate",
icon = icon("cogs", class = NULL, lib = "font-awesome"),
block = TRUE, size = "lg", style="unite", color="default"),
h4("Detect and annotate long inverted repeats in user-uploaded DNA sequences")
),
box(width = 4,
shinyWidgets::actionBttn("Quantify_butt", "Quantify",
icon = icon("upload", class = NULL, lib = "font-awesome"),
block = TRUE, size = "lg", style="unite", color="default"),
h4("Identify candidate LIRs encoding long hpRNAs by aligning sRNA sequencing data to LIRs")
)
),
fluidRow(
box(width = 4,
shinyWidgets::actionBttn("DESeq_butt", "DESeq",
icon = icon("eercast", class = NULL, lib = "font-awesome"),
block = TRUE, size = "lg", style="unite", color="default"),
h4("Differential expression analysis of LIRs or small RNAs between different biological samples/tissues")
),
box(width = 4,
shinyWidgets::actionBttn("Target_butt", "Target",
icon = icon("bullseye", class = NULL, lib = "font-awesome"),
block = TRUE, size = "lg", style="unite", color="default"),
h4("Identify protein-coding genes targeted by the small RNAs derived from a LIR")
),
box(width = 4,
shinyWidgets::actionBttn("Visualize_butt", "Visualize",
icon = icon("eye", class = NULL, lib = "font-awesome"),
block = TRUE, size = "lg", style="unite", color="default"),
h4("Predict and visualize the secondary structure of potential long hpRNA encoded by a LIR")
)
)
)
)
)
)
|
#!/usr/bin/env R
# Author: Sean Maden
# Get BeadArray control outcomes from quality signals, do PCA and
# ANOVAs on sample outcomes, then summarize results in component-wise
# stacked barplot screeplot (figS4).
library(ggplot2)
library(RColorBrewer)
library(recountmethylationManuscriptSupplement)
#----------
# load data
#----------
pkgname <- "recountmethylationManuscriptSupplement"
tables.dir <- system.file("extdata", "tables", package = pkgname)
qcmd.fn <- "table-s2_qcmd-allgsm.csv"
qcmd <- bt <- read.csv(file.path(tables.dir, qcmd.fn), header = TRUE)
bacols <- colnames(qcmd[,grepl("^ba\\..*", colnames(qcmd))])
which.cnames <- c("gsm", "gseid", bacols)
badat <- qcmd[,which.cnames]
#-----------------------
# pca using binary state
#-----------------------
# convert data to numeric
qf <- qcmd[,c(1,3:19)]
colnames(qf) <- gsub("ba\\." , "", colnames(qf))
bt <- bathresh(qf)
btnum <- bt
for(c in 2:ncol(btnum)){btnum[,c] <- ifelse(btnum[,c] == "PASS", 1, 0)}
table(btnum$biotin.stain.red)
colnames(btnum)[2:ncol(btnum)] <- paste0("ba.", colnames(btnum)[2:ncol(btnum)])
# pca
btpc <- prcomp(btnum[,c(2:ncol(btnum))])
btpca.dat <- as.data.frame(btpc$x, stringsAsFactors = FALSE)
# scatterplot
#ggplot(btpca.dat, aes(x = PC1, y = PC2)) +
# geom_point(alpha = 0.5)
#---------------------
# do variance analysis
#---------------------
vname.dat <- c(gsub("^ba\\.", "", colnames(btnum)[2:ncol(btnum)]), "Residuals")
ssqdat <- matrix(nrow = 0, ncol = length(vname.dat))
colnames(ssqdat) <- vname.dat
bplot <- matrix(nrow = 0, ncol = 3) # barplot df
for(ci in seq(ncol(btpca.dat))){
pcdat <- btpca.dat[,ci]
lmpc <- lm(pcdat ~ btnum$ba.restoration.grn + btnum$ba.biotin.stain.red +
btnum$ba.biotin.stain.grn + btnum$ba.specificityI.red + btnum$ba.specificityI.grn +
btnum$ba.specificityII + btnum$ba.extension.red + btnum$ba.extension.grn +
btnum$ba.hyb.hi.med + btnum$ba.hyb.med.low + btnum$ba.target.removal.1 +
btnum$ba.target.removal.2 + btnum$ba.bisulfite.conv.I.red +
btnum$ba.bisulfite.conv.I.grn + btnum$ba.bisulfite.conv.II +
btnum$ba.nonpolymorphic.red + btnum$ba.nonpolymorphic.grn)
anpc <- anova(lmpc)
names.form <- gsub("^btnum\\$ba\\.", "", rownames(anpc))
nr <- matrix(anpc$`Sum Sq`, nrow = 1)
names(nr) <- names.form
nr[!names(nr) %in% vname.dat] <- 0
# add missing terms
name.out <- vname.dat[!vname.dat %in% names(nr)]
nr.append <- rep(0, length(name.out))
names(nr.append) <- name.out
nr <- c(nr, nr.append)
nr <- nr[order(match(names(nr), vname.dat))]
# barplot
mdat <- c(names(nr), as.numeric(nr), rep(ci, length(nr)))
bm <- matrix(mdat, byrow = FALSE, ncol = 3)
# append new data
ssqdat <- rbind(ssqdat, nr)
bplot <- rbind(bplot, bm)
}
rownames(ssqdat) <- paste0("PC", seq(17))
colnames(ssqdat) <- vname.dat
colnames(bplot) <- c("Variable", "ssq", "component")
bplot <- as.data.frame(bplot)
bplot$ssq <- as.numeric(bplot$ssq)
#--------------
# get plot data
#--------------
# get pc var perc
pcvar.sum <- apply(ssqdat, 1, sum)
pcvar.perc <- round(100*pcvar.sum/sum(pcvar.sum), 0)
# modify component labels
ulab <- paste0(gsub("PC", "", names(pcvar.perc)), " (", pcvar.perc, "%)")
bplot$component <- rep(ulab, each = 18)
pcnum <- as.numeric(gsub("PC|\n.*","", bplot$component))
bplot$component <- factor(bplot$component, levels = ulab)
# make stacked barplot
colvect <- c("blue", "red", "green", "purple", "brown", "grey", "pink",
"firebrick", "cyan", "burlywood", "darkgoldenrod", "darkgreen",
"darkslategray4", "deeppink", "gray48", "aquamarine", "cadetblue",
"chocolate")
#----------
# make plot
#----------
figS4 <- ggplot(bplot, aes(x = component, y = ssq, fill = Variable)) +
geom_bar(stat = "identity") + scale_fill_manual(values = colvect) +
theme_bw() + ylab("Sum of squared variances") + xlab("Component") +
theme(text = element_text(size=20),
axis.text.x = element_text(angle = 90))
#pdf("sfig4_bapca-binnum-thresh.pdf", 8.5, 6)
#print(figS4); dev.off()
| /inst/scripts/figures/figS4.R | no_license | metamaden/recountmethylationManuscriptSupplement | R | false | false | 4,087 | r | #!/usr/bin/env R
# Author: Sean Maden
# Get BeadArray control outcomes from quality signals, do PCA and
# ANOVAs on sample outcomes, then summarize results in component-wise
# stacked barplot screeplot (figS4).
library(ggplot2)
library(RColorBrewer)
library(recountmethylationManuscriptSupplement)
#----------
# load data
#----------
pkgname <- "recountmethylationManuscriptSupplement"
tables.dir <- system.file("extdata", "tables", package = pkgname)
qcmd.fn <- "table-s2_qcmd-allgsm.csv"
qcmd <- bt <- read.csv(file.path(tables.dir, qcmd.fn), header = TRUE)
bacols <- colnames(qcmd[,grepl("^ba\\..*", colnames(qcmd))])
which.cnames <- c("gsm", "gseid", bacols)
badat <- qcmd[,which.cnames]
#-----------------------
# pca using binary state
#-----------------------
# convert data to numeric
qf <- qcmd[,c(1,3:19)]
colnames(qf) <- gsub("ba\\." , "", colnames(qf))
bt <- bathresh(qf)
btnum <- bt
for(c in 2:ncol(btnum)){btnum[,c] <- ifelse(btnum[,c] == "PASS", 1, 0)}
table(btnum$biotin.stain.red)
colnames(btnum)[2:ncol(btnum)] <- paste0("ba.", colnames(btnum)[2:ncol(btnum)])
# pca
btpc <- prcomp(btnum[,c(2:ncol(btnum))])
btpca.dat <- as.data.frame(btpc$x, stringsAsFactors = FALSE)
# scatterplot
#ggplot(btpca.dat, aes(x = PC1, y = PC2)) +
# geom_point(alpha = 0.5)
#---------------------
# do variance analysis
#---------------------
vname.dat <- c(gsub("^ba\\.", "", colnames(btnum)[2:ncol(btnum)]), "Residuals")
ssqdat <- matrix(nrow = 0, ncol = length(vname.dat))
colnames(ssqdat) <- vname.dat
bplot <- matrix(nrow = 0, ncol = 3) # barplot df
for(ci in seq(ncol(btpca.dat))){
pcdat <- btpca.dat[,ci]
lmpc <- lm(pcdat ~ btnum$ba.restoration.grn + btnum$ba.biotin.stain.red +
btnum$ba.biotin.stain.grn + btnum$ba.specificityI.red + btnum$ba.specificityI.grn +
btnum$ba.specificityII + btnum$ba.extension.red + btnum$ba.extension.grn +
btnum$ba.hyb.hi.med + btnum$ba.hyb.med.low + btnum$ba.target.removal.1 +
btnum$ba.target.removal.2 + btnum$ba.bisulfite.conv.I.red +
btnum$ba.bisulfite.conv.I.grn + btnum$ba.bisulfite.conv.II +
btnum$ba.nonpolymorphic.red + btnum$ba.nonpolymorphic.grn)
anpc <- anova(lmpc)
names.form <- gsub("^btnum\\$ba\\.", "", rownames(anpc))
nr <- matrix(anpc$`Sum Sq`, nrow = 1)
names(nr) <- names.form
nr[!names(nr) %in% vname.dat] <- 0
# add missing terms
name.out <- vname.dat[!vname.dat %in% names(nr)]
nr.append <- rep(0, length(name.out))
names(nr.append) <- name.out
nr <- c(nr, nr.append)
nr <- nr[order(match(names(nr), vname.dat))]
# barplot
mdat <- c(names(nr), as.numeric(nr), rep(ci, length(nr)))
bm <- matrix(mdat, byrow = FALSE, ncol = 3)
# append new data
ssqdat <- rbind(ssqdat, nr)
bplot <- rbind(bplot, bm)
}
rownames(ssqdat) <- paste0("PC", seq(17))
colnames(ssqdat) <- vname.dat
colnames(bplot) <- c("Variable", "ssq", "component")
bplot <- as.data.frame(bplot)
bplot$ssq <- as.numeric(bplot$ssq)
#--------------
# get plot data
#--------------
# get pc var perc
pcvar.sum <- apply(ssqdat, 1, sum)
pcvar.perc <- round(100*pcvar.sum/sum(pcvar.sum), 0)
# modify component labels
ulab <- paste0(gsub("PC", "", names(pcvar.perc)), " (", pcvar.perc, "%)")
bplot$component <- rep(ulab, each = 18)
pcnum <- as.numeric(gsub("PC|\n.*","", bplot$component))
bplot$component <- factor(bplot$component, levels = ulab)
# make stacked barplot
colvect <- c("blue", "red", "green", "purple", "brown", "grey", "pink",
"firebrick", "cyan", "burlywood", "darkgoldenrod", "darkgreen",
"darkslategray4", "deeppink", "gray48", "aquamarine", "cadetblue",
"chocolate")
#----------
# make plot
#----------
figS4 <- ggplot(bplot, aes(x = component, y = ssq, fill = Variable)) +
geom_bar(stat = "identity") + scale_fill_manual(values = colvect) +
theme_bw() + ylab("Sum of squared variances") + xlab("Component") +
theme(text = element_text(size=20),
axis.text.x = element_text(angle = 90))
#pdf("sfig4_bapca-binnum-thresh.pdf", 8.5, 6)
#print(figS4); dev.off()
|
#Decision Tree Regression
# Regression Template
# Importing the dataset
dataset = read.csv('Position_Salaries.csv')
dataset = dataset[2:3]
# Splitting the dataset into the Training set and Test set
# # install.packages('caTools')
# library(caTools)
# set.seed(123)
# split = sample.split(dataset$Salary, SplitRatio = 2/3)
# training_set = subset(dataset, split == TRUE)
# test_set = subset(dataset, split == FALSE)
# Feature Scaling
# training_set = scale(training_set)
# test_set = scale(test_set)
# Fitting the Decision Tree Regression to the dataset
# install.packages('rpart')
library(rpart)
regressor = rpart(formula = Salary ~.,
data = dataset,
control = rpart.control(minsplit = 1))
# Predicting a new result
y_pred = predict(regressor, data.frame(Level = 6.5))
# Visualising the Decision Tree Regression results
# install.packages('ggplot2')
library(ggplot2)
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = 'red') +
geom_line(aes(x = dataset$Level, y = predict(regressor, newdata = dataset)),
colour = 'blue') +
ggtitle('Truth or Bluff (Decision Tree Regression)') +
xlab('Level') +
ylab('Salary')
# Visualising the Decision Tree Regression results (for higher resolution and smoother curve)
# install.packages('ggplot2')
library(ggplot2)
x_grid = seq(min(dataset$Level), max(dataset$Level), 0.01)
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = 'red') +
geom_line(aes(x = x_grid, y = predict(regressor, newdata = data.frame(Level = x_grid))),
colour = 'blue') +
ggtitle('Truth or Bluff (Decision Tree Regression)') +
xlab('Level') +
ylab('Salary') | /Regression/Decision Tree Regression/Wils DTR R.R | no_license | Wkornhauser/Machine_Learning_in_R_and_Python | R | false | false | 1,724 | r | #Decision Tree Regression
# Regression Template
# Importing the dataset
dataset = read.csv('Position_Salaries.csv')
dataset = dataset[2:3]
# Splitting the dataset into the Training set and Test set
# # install.packages('caTools')
# library(caTools)
# set.seed(123)
# split = sample.split(dataset$Salary, SplitRatio = 2/3)
# training_set = subset(dataset, split == TRUE)
# test_set = subset(dataset, split == FALSE)
# Feature Scaling
# training_set = scale(training_set)
# test_set = scale(test_set)
# Fitting the Decision Tree Regression to the dataset
# install.packages('rpart')
library(rpart)
regressor = rpart(formula = Salary ~.,
data = dataset,
control = rpart.control(minsplit = 1))
# Predicting a new result
y_pred = predict(regressor, data.frame(Level = 6.5))
# Visualising the Decision Tree Regression results
# install.packages('ggplot2')
library(ggplot2)
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = 'red') +
geom_line(aes(x = dataset$Level, y = predict(regressor, newdata = dataset)),
colour = 'blue') +
ggtitle('Truth or Bluff (Decision Tree Regression)') +
xlab('Level') +
ylab('Salary')
# Visualising the Decision Tree Regression results (for higher resolution and smoother curve)
# install.packages('ggplot2')
library(ggplot2)
x_grid = seq(min(dataset$Level), max(dataset$Level), 0.01)
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = 'red') +
geom_line(aes(x = x_grid, y = predict(regressor, newdata = data.frame(Level = x_grid))),
colour = 'blue') +
ggtitle('Truth or Bluff (Decision Tree Regression)') +
xlab('Level') +
ylab('Salary') |
library(performanceEstimation)
### Name: EstimationResults-class
### Title: Class "EstimationResults"
### Aliases: EstimationResults EstimationResults-class
### plot,EstimationResults-method summary,EstimationResults-method
### show,EstimationResults-method
### Keywords: classes
### ** Examples
showClass("EstimationResults")
## Not run:
##D library(e1071)
##D data(swiss)
##D
##D ## Estimating the MAE and NMSE of an SVM on the swiss task
##D eval.res <- cvEstimates(
##D Workflow(learner="svm",learner.pars=list(cost=10,gamma=0.1)),
##D PredTask(Infant.Mortality ~ .,swiss),
##D EstimationTask(metrics=c("mae","nmse"),method=CV(nReps=2))
##D )
##D
##D ## Check a summary of the results
##D summary(eval.res)
##D
## End(Not run)
| /data/genthat_extracted_code/performanceEstimation/examples/EstimationResults-class.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 821 | r | library(performanceEstimation)
### Name: EstimationResults-class
### Title: Class "EstimationResults"
### Aliases: EstimationResults EstimationResults-class
### plot,EstimationResults-method summary,EstimationResults-method
### show,EstimationResults-method
### Keywords: classes
### ** Examples
showClass("EstimationResults")
## Not run:
##D library(e1071)
##D data(swiss)
##D
##D ## Estimating the MAE and NMSE of an SVM on the swiss task
##D eval.res <- cvEstimates(
##D Workflow(learner="svm",learner.pars=list(cost=10,gamma=0.1)),
##D PredTask(Infant.Mortality ~ .,swiss),
##D EstimationTask(metrics=c("mae","nmse"),method=CV(nReps=2))
##D )
##D
##D ## Check a summary of the results
##D summary(eval.res)
##D
## End(Not run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/input.R
\name{numeric_input}
\alias{numeric_input}
\alias{numericInput}
\title{Create Semantic UI Numeric Input}
\usage{
numeric_input(
input_id,
label,
value,
min = NA,
max = NA,
step = NA,
type = NULL,
icon = NULL,
placeholder = NULL,
...
)
numericInput(
inputId,
label,
value,
min = NA,
max = NA,
step = NA,
width = NULL,
...
)
}
\arguments{
\item{input_id}{Input name. Reactive value is available under \code{input[[input_id]]}.}
\item{label}{Display label for the control, or NULL for no label.}
\item{value}{Initial value of the numeric input.}
\item{min}{Minimum allowed value.}
\item{max}{Maximum allowed value.}
\item{step}{Interval to use when stepping between min and max.}
\item{type}{Input type specifying class attached to input container.
See [Fomantic UI](https://fomantic-ui.com/collections/form.html) for details.}
\item{icon}{Icon or label attached to numeric input.}
\item{placeholder}{Inner input label displayed when no value is specified.}
\item{...}{Other parameters passed to \code{\link{numeric_input}} like \code{type} or \code{icon}.}
\item{inputId}{The input slot that will be used to access the value.}
\item{width}{The width of the input.}
}
\description{
This creates a default numeric input using Semantic UI. The input is available
under \code{input[[input_id]]}.
}
\details{
The inputs are updateable by using \code{\link{updateNumericInput}}.
}
\examples{
## Only run examples in interactive R sessions
if (interactive()) {
library(shiny)
library(shiny.semantic)
ui <- semanticPage(
numeric_input("ex", "Select number", 10),
)
server <- function(input, output, session) {}
shinyApp(ui, server)
}
}
| /man/numeric_input.Rd | permissive | ashbaldry/shiny.semantic | R | false | true | 1,777 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/input.R
\name{numeric_input}
\alias{numeric_input}
\alias{numericInput}
\title{Create Semantic UI Numeric Input}
\usage{
numeric_input(
input_id,
label,
value,
min = NA,
max = NA,
step = NA,
type = NULL,
icon = NULL,
placeholder = NULL,
...
)
numericInput(
inputId,
label,
value,
min = NA,
max = NA,
step = NA,
width = NULL,
...
)
}
\arguments{
\item{input_id}{Input name. Reactive value is available under \code{input[[input_id]]}.}
\item{label}{Display label for the control, or NULL for no label.}
\item{value}{Initial value of the numeric input.}
\item{min}{Minimum allowed value.}
\item{max}{Maximum allowed value.}
\item{step}{Interval to use when stepping between min and max.}
\item{type}{Input type specifying class attached to input container.
See [Fomantic UI](https://fomantic-ui.com/collections/form.html) for details.}
\item{icon}{Icon or label attached to numeric input.}
\item{placeholder}{Inner input label displayed when no value is specified.}
\item{...}{Other parameters passed to \code{\link{numeric_input}} like \code{type} or \code{icon}.}
\item{inputId}{The input slot that will be used to access the value.}
\item{width}{The width of the input.}
}
\description{
This creates a default numeric input using Semantic UI. The input is available
under \code{input[[input_id]]}.
}
\details{
The inputs are updateable by using \code{\link{updateNumericInput}}.
}
\examples{
## Only run examples in interactive R sessions
if (interactive()) {
library(shiny)
library(shiny.semantic)
ui <- semanticPage(
numeric_input("ex", "Select number", 10),
)
server <- function(input, output, session) {}
shinyApp(ui, server)
}
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{data}
\name{iwv_online}
\alias{iwv_online}
\title{website traffic from IWV}
\usage{
iwv_online()
}
\value{
UrlData object
}
\description{
website traffic as tracked by iwv online.
Use yyyymm for query() as resource.
This is an example for the \code{urldata} function.
}
\references{
\href{http://en.wikipedia.org/wiki/Informationsgemeinschaft_zur_Feststellung_der_Verbreitung_von_Werbetraegern}{Wikipedia}
}
\seealso{
\code{\link{urldata}}
}
| /man/iwv_online.Rd | no_license | doomhammerhell/test-datamart | R | false | false | 528 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\docType{data}
\name{iwv_online}
\alias{iwv_online}
\title{website traffic from IWV}
\usage{
iwv_online()
}
\value{
UrlData object
}
\description{
website traffic as tracked by iwv online.
Use yyyymm for query() as resource.
This is an example for the \code{urldata} function.
}
\references{
\href{http://en.wikipedia.org/wiki/Informationsgemeinschaft_zur_Feststellung_der_Verbreitung_von_Werbetraegern}{Wikipedia}
}
\seealso{
\code{\link{urldata}}
}
|
#' @importFrom RODBC sqlQuery
#' @export
db_list_tables.TeradataODBCConnection <- function(con) {
table_names <- attr(con, "table_names")
if (is.null(table_names)) {
dbname <- attr(con, "dbname")
if (dbname != "") {
query <- sprintf("SELECT TABLENAME FROM DBC.TABLES WHERE DATABASENAME = '%s'", dbname)
qry <- sqlQuery(con, query)
check_odbc_error(qry, con)
table_names <- gsub("\\s+", "", as.character(qry$TableName))
} else {
message("Getting all table names for all schema.")
table_names <- db_list_tables.RODBC(con)
}
}
table_names
}
#' @export
db_has_table.TeradataODBCConnection <- function(con, table) {
table <- tolower(table)
table_names <- tolower(db_list_tables(con))
dbname <- attr(con, "dbname")
if (is.null(dbname)) {
table %in% table_names
} else {
dbname <- tolower(dbname)
table %in% c(table_names, paste(dbname, table_names, sep="."))
}
}
#' @importFrom RODBC sqlQuery
#' @export
db_explain.TeradataODBCConnection <- function(con, sql, format = "text", ...) {
# format <- match.arg(format, c("text", "json", "yaml", "xml"))
# exsql <- build_sql("EXPLAIN ", if (!is.null(format))
# build_sql("(FORMAT ", sql(format), ") "), sql)
if (is.ident(sql) || db_has_table(con, sql)) {
exsql <- build_sql("EXPLAIN SELECT * FROM ", sql)
} else {
exsql <- build_sql("EXPLAIN ", sql)
}
expl <- sqlQuery(con, exsql)
check_odbc_error(expl, con)
paste(expl[[1]], collapse = "\n")
}
#' @export
sql_translate_env.TeradataODBCConnection <- function(con) {
sql_variant(
base_scalar_teradata,
sql_translator(.parent = base_agg,
n = function() sql("count(*)"),
cor = sql_prefix("corr"),
cov = sql_prefix("covar_samp"),
sd = sql_prefix("stddev_samp"),
var = sql_prefix("var_samp"),
all = sql_prefix("bool_and"),
any = sql_prefix("bool_or"),
paste = function(x, collapse) build_sql("string_agg(", x, ", ", collapse, ")")
),
base_win
)
}
| /R/db-rodbc-teradata.R | no_license | dfalbel/dplyr.teradata | R | false | false | 2,117 | r | #' @importFrom RODBC sqlQuery
#' @export
db_list_tables.TeradataODBCConnection <- function(con) {
table_names <- attr(con, "table_names")
if (is.null(table_names)) {
dbname <- attr(con, "dbname")
if (dbname != "") {
query <- sprintf("SELECT TABLENAME FROM DBC.TABLES WHERE DATABASENAME = '%s'", dbname)
qry <- sqlQuery(con, query)
check_odbc_error(qry, con)
table_names <- gsub("\\s+", "", as.character(qry$TableName))
} else {
message("Getting all table names for all schema.")
table_names <- db_list_tables.RODBC(con)
}
}
table_names
}
#' @export
db_has_table.TeradataODBCConnection <- function(con, table) {
table <- tolower(table)
table_names <- tolower(db_list_tables(con))
dbname <- attr(con, "dbname")
if (is.null(dbname)) {
table %in% table_names
} else {
dbname <- tolower(dbname)
table %in% c(table_names, paste(dbname, table_names, sep="."))
}
}
#' @importFrom RODBC sqlQuery
#' @export
db_explain.TeradataODBCConnection <- function(con, sql, format = "text", ...) {
# format <- match.arg(format, c("text", "json", "yaml", "xml"))
# exsql <- build_sql("EXPLAIN ", if (!is.null(format))
# build_sql("(FORMAT ", sql(format), ") "), sql)
if (is.ident(sql) || db_has_table(con, sql)) {
exsql <- build_sql("EXPLAIN SELECT * FROM ", sql)
} else {
exsql <- build_sql("EXPLAIN ", sql)
}
expl <- sqlQuery(con, exsql)
check_odbc_error(expl, con)
paste(expl[[1]], collapse = "\n")
}
#' @export
sql_translate_env.TeradataODBCConnection <- function(con) {
sql_variant(
base_scalar_teradata,
sql_translator(.parent = base_agg,
n = function() sql("count(*)"),
cor = sql_prefix("corr"),
cov = sql_prefix("covar_samp"),
sd = sql_prefix("stddev_samp"),
var = sql_prefix("var_samp"),
all = sql_prefix("bool_and"),
any = sql_prefix("bool_or"),
paste = function(x, collapse) build_sql("string_agg(", x, ", ", collapse, ")")
),
base_win
)
}
|
ui_cards <- function(id){
ns <- NS(id)
uiOutput(ns("cards"))
}
server_cards <- function(input, output, session, df){
output$cards <- renderUI({
df <- df() %>% filter(is_parked == 0)
speed <- round(mean(df$SPEED, na.rm = TRUE), 2)
if(is.nan(speed)) speed <- NULL
df_1 <- head(df, 1)
distance <- head(df_1, 1)$DISTANCE
cards(
class = "three",
card(
style = "border-radius: 0; background: #efefef",
div(class = "content",
div(class = "header", style = "color : #636363; margin-bottom: 10px; font-family: 'Source Sans Pro'; font-size: 25px",
tags$img(src = "images/info.gif", style="width: 40px; height: 40px;"),
HTML("    Important!"),
),
div(class = "meta", style ="font-size: 15x", img(src="images/ship_1.gif", style="width: 20px; height: 20px;"), "Beginning of the movement"),
div(class = "meta", style ="font-size: 15px", img(src="images/ship_2.gif", style="width: 20px; height: 20px;"), "Enf of the movement")
)),
card(
style = "border-radius: 0; background: #efefef",
div(class = "content",
div(class = "header", style = "color : #636363; margin-bottom: 10px; font-family: 'Source Sans Pro'; font-size: 25px",
tags$img(src = "images/distance.gif", style="width: 40px; height: 40px;"),
HTML(paste0("   ", distance)),
),
div(class = "meta", style ="font-size: 15px", "Longest distance[meters] between two observations")
)),
card(
style = "border-radius: 0; background: #efefef",
div(class = "content",
div(class = "header", style = "color : #636363; margin-bottom: 10px; font-family: 'Source Sans Pro'; font-size: 25px",
tags$img(src = "images/speed.gif", style="width: 40px; height: 40px;"),
HTML(paste0("   ", speed)),
),
div(class = "meta", style ="font-size: 15px", "Mean speed[km] when it's not parked")
))
)
})
} | /app/modules/cards.R | no_license | sflorezp/ship_explorer | R | false | false | 2,130 | r | ui_cards <- function(id){
ns <- NS(id)
uiOutput(ns("cards"))
}
server_cards <- function(input, output, session, df){
output$cards <- renderUI({
df <- df() %>% filter(is_parked == 0)
speed <- round(mean(df$SPEED, na.rm = TRUE), 2)
if(is.nan(speed)) speed <- NULL
df_1 <- head(df, 1)
distance <- head(df_1, 1)$DISTANCE
cards(
class = "three",
card(
style = "border-radius: 0; background: #efefef",
div(class = "content",
div(class = "header", style = "color : #636363; margin-bottom: 10px; font-family: 'Source Sans Pro'; font-size: 25px",
tags$img(src = "images/info.gif", style="width: 40px; height: 40px;"),
HTML("    Important!"),
),
div(class = "meta", style ="font-size: 15x", img(src="images/ship_1.gif", style="width: 20px; height: 20px;"), "Beginning of the movement"),
div(class = "meta", style ="font-size: 15px", img(src="images/ship_2.gif", style="width: 20px; height: 20px;"), "Enf of the movement")
)),
card(
style = "border-radius: 0; background: #efefef",
div(class = "content",
div(class = "header", style = "color : #636363; margin-bottom: 10px; font-family: 'Source Sans Pro'; font-size: 25px",
tags$img(src = "images/distance.gif", style="width: 40px; height: 40px;"),
HTML(paste0("   ", distance)),
),
div(class = "meta", style ="font-size: 15px", "Longest distance[meters] between two observations")
)),
card(
style = "border-radius: 0; background: #efefef",
div(class = "content",
div(class = "header", style = "color : #636363; margin-bottom: 10px; font-family: 'Source Sans Pro'; font-size: 25px",
tags$img(src = "images/speed.gif", style="width: 40px; height: 40px;"),
HTML(paste0("   ", speed)),
),
div(class = "meta", style ="font-size: 15px", "Mean speed[km] when it's not parked")
))
)
})
} |
# Truncated regression
library(magrittr);
library(dplyr)
# Simulated data
set.seed(1056) # set seed to replicate example
nobs = 2500 # number of obs in model
x1 <- runif(nobs,-5,5) # random uniform variable
alpha = 10 # intercept
beta = 7 # angular coefficient
beta2 = 0.5
beta3 = -0.75
xb <- alpha + beta*x1 + beta2*x1^2 + beta3*x1^3 # linear predictor, xb
sd <- 1.5 # Standard deviation
y <- rnorm(nobs, xb, sd = sd) # create y as random normal variate
cc <- function(x){5*(1+0.3*log(x^2))}
regdat <- data.frame(x1,y)
plot(regdat)
# Truncated y
truncdat <- regdat %>% filter(.,y >=cc(x1))
ntrunc <- nrow(truncdat)
ytrunc <- truncdat$y
xtrunc <- truncdat$x1
plot(truncdat)
# Likelihood
trunc_likelihood <- function(par){
alpha = par[1]
beta = par[2]
beta2 = par[3]
beta3 = par[4]
sigma = par[5]
xb = alpha + beta*xtrunc + beta2*xtrunc^2 + beta3*xtrunc^3
lnL <- -log(sigma) + dnorm((ytrunc - xb)/sigma, log = TRUE) - pnorm((xb - cc(xtrunc))/sigma, log.p = TRUE)
LL <- sum(lnL)
return(LL)
}
# Prior
low <- c(rep(-50,4),1e-5)
up <- c(rep(50,4),10)
prior <- createUniformPrior(lower = low,
upper = up)
setup <- createBayesianSetup(likelihood = trunc_likelihood,prior = prior)
settings <- list(iterations = 1e5,adaptation = 0.25,
burnin = 2e4, message = T,nrChains = 1)
system.time(
res <- runMCMC(bayesianSetup = setup, settings = settings,sampler = "DREAMzs")
)
summary(res)
codaObject = getSample(res, start = 1E3, coda = TRUE)
getmcmc_var <- function(outjags=outjags, vars = vars){
as.data.frame(do.call(rbind, outjags[,vars]))
}
ss <- getmcmc_var(codaObject,vars = c("par 1","par 2","par 3","par 4","par 5"))
colnames(ss) <- c("alpha","beta","beta2","beta3","sd")
index <- sample(seq(1:nrow(ss)),250,replace=FALSE)
ss <- ss[index,]
xpred <- seq(min(x1),max(x1),length.out = 250)
df <- NULL
for(i in 1:250){
temp_df <- data.frame(x=xpred,y= (ss$alpha[i] + ss$beta[1]*xpred + ss$beta2[1]*xpred^2 +
ss$beta3[1]*xpred^3),col=rep(i:i, each=250))
df <- rbind(df,temp_df)
}
ggplot(data=truncdat,aes(x=x1,y=y)) +
geom_point() +
# geom_segment(data = filter(censdat,y==L_limit),
# mapping=aes(x=x1, y=y, xend=x1, yend = y-5),size=0.1,
# colour=cens,arrow = arrow()) +
geom_point(data=filter(regdat,y <= cc(x1)),mapping=aes(x=x1,y=y),color="gray50")+
coord_cartesian(ylim=c(-5,50)) +
theme_pubr() +
scale_color_stata() +
scale_shape_stata()+
xlab("x") + ylab("y") +
stat_smooth(formula=y ~ poly(x1, 3, raw=TRUE),linetype="dashed",colour="red",se=F) +
geom_line(data=df,aes(x = x, y = y,group=col),
alpha = 0.1, color = "green",size=0.3)
#+
# geom_abline(slope = mean(ss$beta),
# intercept = mean(ss$alpha),
# color = "orange3", size = 1)
| /truncation.R | no_license | RafaelSdeSouza/selection_function | R | false | false | 2,998 | r | # Truncated regression
library(magrittr);
library(dplyr)
# Simulated data
set.seed(1056) # set seed to replicate example
nobs = 2500 # number of obs in model
x1 <- runif(nobs,-5,5) # random uniform variable
alpha = 10 # intercept
beta = 7 # angular coefficient
beta2 = 0.5
beta3 = -0.75
xb <- alpha + beta*x1 + beta2*x1^2 + beta3*x1^3 # linear predictor, xb
sd <- 1.5 # Standard deviation
y <- rnorm(nobs, xb, sd = sd) # create y as random normal variate
cc <- function(x){5*(1+0.3*log(x^2))}
regdat <- data.frame(x1,y)
plot(regdat)
# Truncated y
truncdat <- regdat %>% filter(.,y >=cc(x1))
ntrunc <- nrow(truncdat)
ytrunc <- truncdat$y
xtrunc <- truncdat$x1
plot(truncdat)
# Likelihood
trunc_likelihood <- function(par){
alpha = par[1]
beta = par[2]
beta2 = par[3]
beta3 = par[4]
sigma = par[5]
xb = alpha + beta*xtrunc + beta2*xtrunc^2 + beta3*xtrunc^3
lnL <- -log(sigma) + dnorm((ytrunc - xb)/sigma, log = TRUE) - pnorm((xb - cc(xtrunc))/sigma, log.p = TRUE)
LL <- sum(lnL)
return(LL)
}
# Prior
low <- c(rep(-50,4),1e-5)
up <- c(rep(50,4),10)
prior <- createUniformPrior(lower = low,
upper = up)
setup <- createBayesianSetup(likelihood = trunc_likelihood,prior = prior)
settings <- list(iterations = 1e5,adaptation = 0.25,
burnin = 2e4, message = T,nrChains = 1)
system.time(
res <- runMCMC(bayesianSetup = setup, settings = settings,sampler = "DREAMzs")
)
summary(res)
codaObject = getSample(res, start = 1E3, coda = TRUE)
getmcmc_var <- function(outjags=outjags, vars = vars){
as.data.frame(do.call(rbind, outjags[,vars]))
}
ss <- getmcmc_var(codaObject,vars = c("par 1","par 2","par 3","par 4","par 5"))
colnames(ss) <- c("alpha","beta","beta2","beta3","sd")
index <- sample(seq(1:nrow(ss)),250,replace=FALSE)
ss <- ss[index,]
xpred <- seq(min(x1),max(x1),length.out = 250)
df <- NULL
for(i in 1:250){
temp_df <- data.frame(x=xpred,y= (ss$alpha[i] + ss$beta[1]*xpred + ss$beta2[1]*xpred^2 +
ss$beta3[1]*xpred^3),col=rep(i:i, each=250))
df <- rbind(df,temp_df)
}
ggplot(data=truncdat,aes(x=x1,y=y)) +
geom_point() +
# geom_segment(data = filter(censdat,y==L_limit),
# mapping=aes(x=x1, y=y, xend=x1, yend = y-5),size=0.1,
# colour=cens,arrow = arrow()) +
geom_point(data=filter(regdat,y <= cc(x1)),mapping=aes(x=x1,y=y),color="gray50")+
coord_cartesian(ylim=c(-5,50)) +
theme_pubr() +
scale_color_stata() +
scale_shape_stata()+
xlab("x") + ylab("y") +
stat_smooth(formula=y ~ poly(x1, 3, raw=TRUE),linetype="dashed",colour="red",se=F) +
geom_line(data=df,aes(x = x, y = y,group=col),
alpha = 0.1, color = "green",size=0.3)
#+
# geom_abline(slope = mean(ss$beta),
# intercept = mean(ss$alpha),
# color = "orange3", size = 1)
|
#############################################################################
# Supplementary Material to Schomaker M, Davies MA, Cornell M, Ford N. #
# Assessing the risk of dolutegravir for women of childbearing potential. #
# Lancet Global Health. 2018;6(9):e958-e9. #
# #
# R-Code for reproduction of figure #
#############################################################################
# P(X>=4)
1-pbinom(3,426,0.001)
#
pfunc <- function(xv){1-pbinom(3,xv,0.001)}
pfunc.1 <- function(xv){1-pbinom(4,xv,0.001)}
pfunc.2 <- function(xv){1-pbinom(5,xv,0.001)}
pfunc.3 <- function(xv){1-pbinom(6,xv,0.001)}
pfunc.4 <- function(xv){1-pbinom(7,xv,0.001)}
pfunc.5 <- function(xv){1-pbinom(8,xv,0.001)}
pfunc.6 <- function(xv){1-pbinom(9,xv,0.001)}
# not reported in paper
plot(c(400:4000),pfunc(c(400:4000)),type="l",xlab="number of patients",ylab="P(4 or more events)",cex.lab=1.5,lwd=2)
title(main = "Probability of 4 or more events\n depending on patients number")
abline(h=0.05,col="red",lwd=2)
# not reported in paper
plot(c(400:4000),pfunc(c(400:4000)),type="l",xlab="number of pregnant women",ylab="P(x or more events)",cex.lab=1.5,lwd=2)
lines(c(400:4000),pfunc.1(c(400:4000)),type="l",cex.lab=1.5,lwd=2,col=3,lty=2)
lines(c(400:4000),pfunc.2(c(400:4000)),type="l",cex.lab=1.5,lwd=2,col=4,lty=3)
lines(c(400:4000),pfunc.3(c(400:4000)),type="l",cex.lab=1.5,lwd=2,col=5,lty=4)
lines(c(400:4000),pfunc.4(c(400:4000)),type="l",cex.lab=1.5,lwd=2,col=6,lty=5)
lines(c(400:4000),pfunc.5(c(400:4000)),type="l",cex.lab=1.5,lwd=2,col=7,lty=6)
lines(c(400:4000),pfunc.6(c(400:4000)),type="l",cex.lab=1.5,lwd=2,col=8,lty=7)
title(main = "Probability of x or more adverse events\n depending on number of patients")
abline(h=0.05,col="red",lwd=2)
legend("topleft",c("4 events","5 events","6 events","7 events","8 events","9 events","10 events"),col=c(1,3:8),lty=1:7)
# not reported in paper
plot(c(250:2500),pfunc(c(250:2500)),type="l",xlab="number of pregnant women",ylab="P(x or more events)",cex.lab=1.5,lwd=2,axes=F,ylim=c(0,0.12))
axis(side = 1, at = c(250,426,1000,1426,2000,2500))
axis(side = 2, at = c(0,0.02,0.04,0.06,0.08,0.1,0.12),labels=c("","2%","4%","6%","8%","10%","12%"))
lines(c(250:2500),pfunc.1(c(250:2500)),type="l",cex.lab=1.5,lwd=2,col=3,lty=2)
lines(c(250:2500),pfunc.2(c(250:2500)),type="l",cex.lab=1.5,lwd=2,col=4,lty=3)
lines(c(250:2500),pfunc.3(c(250:2500)),type="l",cex.lab=1.5,lwd=2,col=5,lty=4)
lines(c(250:2500),pfunc.4(c(250:2500)),type="l",cex.lab=1.5,lwd=2,col=6,lty=5)
points(426,pfunc(426),cex=2.5,pch=19)
points(1426,pfunc(1426),cex=2.5,pch=17,col="red")
points(1426,pfunc.2(1426),cex=2.5,pch=18,col="blue")
abline(h=0.05,col="red",lwd=2,lty=2)
legend("left",c("current situation","+1000 patients / +no event","+1000 patients /+2 events"),col=c("black","red","blue"),pch=c(19,17,18),cex=1.1,bty="n")
legend("topleft",c("4 or more events","5 or more events","6 or more events","7 or more events","8 or more events"),col=c(1,3:6),lty=1:5,bty="n")
# binom.test
binom.test(c(4,422),p=0.001,alternative="greater")
pfunc2 <- function(xv){
results <- rep(NA,length(xv))
for(i in 1:length(xv)){results[i] <- binom.test(c(4,xv[i]),p=0.001,alternative="greater")$conf.int[1]}
return(results)
}
pfunc2.1 <- function(xv){
results <- rep(NA,length(xv))
for(i in 1:length(xv)){results[i] <- binom.test(c(5,xv[i]),p=0.001,alternative="greater")$conf.int[1]}
return(results)
}
pfunc2.2 <- function(xv){
results <- rep(NA,length(xv))
for(i in 1:length(xv)){results[i] <- binom.test(c(6,xv[i]),p=0.001,alternative="greater")$conf.int[1]}
return(results)
}
pfunc2.3 <- function(xv){
results <- rep(NA,length(xv))
for(i in 1:length(xv)){results[i] <- binom.test(c(7,xv[i]),p=0.001,alternative="greater")$conf.int[1]}
return(results)
}
pfunc2.4 <- function(xv){
results <- rep(NA,length(xv))
for(i in 1:length(xv)){results[i] <- binom.test(c(8,xv[i]),p=0.001,alternative="greater")$conf.int[1]}
return(results)
}
pfunc2.5 <- function(xv){
results <- rep(NA,length(xv))
for(i in 1:length(xv)){results[i] <- binom.test(c(9,xv[i]),p=0.001,alternative="greater")$conf.int[1]}
return(results)
}
pfunc2.6 <- function(xv){
results <- rep(NA,length(xv))
for(i in 1:length(xv)){results[i] <- binom.test(c(10,xv[i]),p=0.001,alternative="greater")$conf.int[1]}
return(results)
}
plot(c(400:4004),pfunc2(c(396:4000)),type="l",xlab="number of patients",ylab="lower conf. limit",cex.lab=1.5,lwd=2)
title(main = "Lower confidence limit \n depending on patients number")
abline(h=0.001,col="red",lwd=2)
#
plot(c(400:4004),pfunc2(c(396:4000)),type="l",xlab="number of patients",ylab="lower conf. limit",cex.lab=1.5,lwd=2)
lines(c(400:4004),pfunc2.1(c(396:4000)),type="l",cex.lab=1.5,lwd=2,col=3,lty=2)
lines(c(400:4004),pfunc2.2(c(396:4000)),type="l",cex.lab=1.5,lwd=2,col=4,lty=3)
lines(c(400:4004),pfunc2.3(c(396:4000)),type="l",cex.lab=1.5,lwd=2,col=5,lty=4)
lines(c(400:4004),pfunc2.4(c(396:4000)),type="l",cex.lab=1.5,lwd=2,col=6,lty=5)
lines(c(400:4004),pfunc2.5(c(396:4000)),type="l",cex.lab=1.5,lwd=2,col=7,lty=6)
lines(c(400:4004),pfunc2.6(c(396:4000)),type="l",cex.lab=1.5,lwd=2,col=8,lty=7)
title(main = "Lower 95% confidence limit \n depending on number of patients and events")
abline(h=0.001,col="red",lwd=2)
legend("topright",c("4 events","5 events","6 events","7 events","8 events","9 events","10 events"),col=c(1,3:8),lty=1:7)
# FIGURE FROM PAPER
plot(c(250:2504),pfunc2(c(246:2500)),type="l",xlab="number of pregnant women",ylab="lower 95% confidence limit",cex.lab=1.5,lwd=2,axes=F,ylim=c(0,0.007))
lines(c(250:2504),pfunc2.1(c(246:2500)),type="l",cex.lab=1.5,lwd=2,col=3,lty=2)
lines(c(250:2504),pfunc2.2(c(246:2500)),type="l",cex.lab=1.5,lwd=2,col=4,lty=3)
lines(c(250:2504),pfunc2.3(c(246:2500)),type="l",cex.lab=1.5,lwd=2,col=5,lty=4)
lines(c(250:2504),pfunc2.4(c(246:2500)),type="l",cex.lab=1.5,lwd=2,col=6,lty=5)
abline(h=0.001,col="red",lwd=2)
legend(2000,0.0045,c("4 events","5 events","6 events","7 events","8 events"),col=c(1,3:6),lty=1:5,lwd=2)
axis(side = 1, at = c(250,426,1000,1426,2000,2500))
axis(side = 2, at = c(0,0.001,0.002,0.003,0.004,0.005,0.006,0.007),labels=c("","0.1%","0.2%","0.3%","0.4%","0.5%","0.6%","0.7%"))
points(426,pfunc2(422),cex=2.5,pch=19)
points(1426,pfunc2(1422),cex=2.5,pch=17,col="red")
points(1426,pfunc2.2(1422),cex=2.5,pch=18,col="blue")
legend("topright",c("current situation","+1000 patients / +no event","+1000 patients /+2 events"),col=c("black","red","blue"),pch=c(19,17,18),cex=1.1,bty="n")
| /R/code_dolutegravir.r | no_license | MichaelSchomaker/MichaelSchomaker.github.io | R | false | false | 6,726 | r | #############################################################################
# Supplementary Material to Schomaker M, Davies MA, Cornell M, Ford N. #
# Assessing the risk of dolutegravir for women of childbearing potential. #
# Lancet Global Health. 2018;6(9):e958-e9. #
# #
# R-Code for reproduction of figure #
#############################################################################
# P(X>=4)
1-pbinom(3,426,0.001)
#
pfunc <- function(xv){1-pbinom(3,xv,0.001)}
pfunc.1 <- function(xv){1-pbinom(4,xv,0.001)}
pfunc.2 <- function(xv){1-pbinom(5,xv,0.001)}
pfunc.3 <- function(xv){1-pbinom(6,xv,0.001)}
pfunc.4 <- function(xv){1-pbinom(7,xv,0.001)}
pfunc.5 <- function(xv){1-pbinom(8,xv,0.001)}
pfunc.6 <- function(xv){1-pbinom(9,xv,0.001)}
# not reported in paper
plot(c(400:4000),pfunc(c(400:4000)),type="l",xlab="number of patients",ylab="P(4 or more events)",cex.lab=1.5,lwd=2)
title(main = "Probability of 4 or more events\n depending on patients number")
abline(h=0.05,col="red",lwd=2)
# not reported in paper
plot(c(400:4000),pfunc(c(400:4000)),type="l",xlab="number of pregnant women",ylab="P(x or more events)",cex.lab=1.5,lwd=2)
lines(c(400:4000),pfunc.1(c(400:4000)),type="l",cex.lab=1.5,lwd=2,col=3,lty=2)
lines(c(400:4000),pfunc.2(c(400:4000)),type="l",cex.lab=1.5,lwd=2,col=4,lty=3)
lines(c(400:4000),pfunc.3(c(400:4000)),type="l",cex.lab=1.5,lwd=2,col=5,lty=4)
lines(c(400:4000),pfunc.4(c(400:4000)),type="l",cex.lab=1.5,lwd=2,col=6,lty=5)
lines(c(400:4000),pfunc.5(c(400:4000)),type="l",cex.lab=1.5,lwd=2,col=7,lty=6)
lines(c(400:4000),pfunc.6(c(400:4000)),type="l",cex.lab=1.5,lwd=2,col=8,lty=7)
title(main = "Probability of x or more adverse events\n depending on number of patients")
abline(h=0.05,col="red",lwd=2)
legend("topleft",c("4 events","5 events","6 events","7 events","8 events","9 events","10 events"),col=c(1,3:8),lty=1:7)
# not reported in paper
plot(c(250:2500),pfunc(c(250:2500)),type="l",xlab="number of pregnant women",ylab="P(x or more events)",cex.lab=1.5,lwd=2,axes=F,ylim=c(0,0.12))
axis(side = 1, at = c(250,426,1000,1426,2000,2500))
axis(side = 2, at = c(0,0.02,0.04,0.06,0.08,0.1,0.12),labels=c("","2%","4%","6%","8%","10%","12%"))
lines(c(250:2500),pfunc.1(c(250:2500)),type="l",cex.lab=1.5,lwd=2,col=3,lty=2)
lines(c(250:2500),pfunc.2(c(250:2500)),type="l",cex.lab=1.5,lwd=2,col=4,lty=3)
lines(c(250:2500),pfunc.3(c(250:2500)),type="l",cex.lab=1.5,lwd=2,col=5,lty=4)
lines(c(250:2500),pfunc.4(c(250:2500)),type="l",cex.lab=1.5,lwd=2,col=6,lty=5)
points(426,pfunc(426),cex=2.5,pch=19)
points(1426,pfunc(1426),cex=2.5,pch=17,col="red")
points(1426,pfunc.2(1426),cex=2.5,pch=18,col="blue")
abline(h=0.05,col="red",lwd=2,lty=2)
legend("left",c("current situation","+1000 patients / +no event","+1000 patients /+2 events"),col=c("black","red","blue"),pch=c(19,17,18),cex=1.1,bty="n")
legend("topleft",c("4 or more events","5 or more events","6 or more events","7 or more events","8 or more events"),col=c(1,3:6),lty=1:5,bty="n")
# binom.test
binom.test(c(4,422),p=0.001,alternative="greater")
pfunc2 <- function(xv){
results <- rep(NA,length(xv))
for(i in 1:length(xv)){results[i] <- binom.test(c(4,xv[i]),p=0.001,alternative="greater")$conf.int[1]}
return(results)
}
pfunc2.1 <- function(xv){
results <- rep(NA,length(xv))
for(i in 1:length(xv)){results[i] <- binom.test(c(5,xv[i]),p=0.001,alternative="greater")$conf.int[1]}
return(results)
}
pfunc2.2 <- function(xv){
results <- rep(NA,length(xv))
for(i in 1:length(xv)){results[i] <- binom.test(c(6,xv[i]),p=0.001,alternative="greater")$conf.int[1]}
return(results)
}
pfunc2.3 <- function(xv){
results <- rep(NA,length(xv))
for(i in 1:length(xv)){results[i] <- binom.test(c(7,xv[i]),p=0.001,alternative="greater")$conf.int[1]}
return(results)
}
pfunc2.4 <- function(xv){
results <- rep(NA,length(xv))
for(i in 1:length(xv)){results[i] <- binom.test(c(8,xv[i]),p=0.001,alternative="greater")$conf.int[1]}
return(results)
}
pfunc2.5 <- function(xv){
results <- rep(NA,length(xv))
for(i in 1:length(xv)){results[i] <- binom.test(c(9,xv[i]),p=0.001,alternative="greater")$conf.int[1]}
return(results)
}
pfunc2.6 <- function(xv){
results <- rep(NA,length(xv))
for(i in 1:length(xv)){results[i] <- binom.test(c(10,xv[i]),p=0.001,alternative="greater")$conf.int[1]}
return(results)
}
plot(c(400:4004),pfunc2(c(396:4000)),type="l",xlab="number of patients",ylab="lower conf. limit",cex.lab=1.5,lwd=2)
title(main = "Lower confidence limit \n depending on patients number")
abline(h=0.001,col="red",lwd=2)
#
plot(c(400:4004),pfunc2(c(396:4000)),type="l",xlab="number of patients",ylab="lower conf. limit",cex.lab=1.5,lwd=2)
lines(c(400:4004),pfunc2.1(c(396:4000)),type="l",cex.lab=1.5,lwd=2,col=3,lty=2)
lines(c(400:4004),pfunc2.2(c(396:4000)),type="l",cex.lab=1.5,lwd=2,col=4,lty=3)
lines(c(400:4004),pfunc2.3(c(396:4000)),type="l",cex.lab=1.5,lwd=2,col=5,lty=4)
lines(c(400:4004),pfunc2.4(c(396:4000)),type="l",cex.lab=1.5,lwd=2,col=6,lty=5)
lines(c(400:4004),pfunc2.5(c(396:4000)),type="l",cex.lab=1.5,lwd=2,col=7,lty=6)
lines(c(400:4004),pfunc2.6(c(396:4000)),type="l",cex.lab=1.5,lwd=2,col=8,lty=7)
title(main = "Lower 95% confidence limit \n depending on number of patients and events")
abline(h=0.001,col="red",lwd=2)
legend("topright",c("4 events","5 events","6 events","7 events","8 events","9 events","10 events"),col=c(1,3:8),lty=1:7)
# FIGURE FROM PAPER
plot(c(250:2504),pfunc2(c(246:2500)),type="l",xlab="number of pregnant women",ylab="lower 95% confidence limit",cex.lab=1.5,lwd=2,axes=F,ylim=c(0,0.007))
lines(c(250:2504),pfunc2.1(c(246:2500)),type="l",cex.lab=1.5,lwd=2,col=3,lty=2)
lines(c(250:2504),pfunc2.2(c(246:2500)),type="l",cex.lab=1.5,lwd=2,col=4,lty=3)
lines(c(250:2504),pfunc2.3(c(246:2500)),type="l",cex.lab=1.5,lwd=2,col=5,lty=4)
lines(c(250:2504),pfunc2.4(c(246:2500)),type="l",cex.lab=1.5,lwd=2,col=6,lty=5)
abline(h=0.001,col="red",lwd=2)
legend(2000,0.0045,c("4 events","5 events","6 events","7 events","8 events"),col=c(1,3:6),lty=1:5,lwd=2)
axis(side = 1, at = c(250,426,1000,1426,2000,2500))
axis(side = 2, at = c(0,0.001,0.002,0.003,0.004,0.005,0.006,0.007),labels=c("","0.1%","0.2%","0.3%","0.4%","0.5%","0.6%","0.7%"))
points(426,pfunc2(422),cex=2.5,pch=19)
points(1426,pfunc2(1422),cex=2.5,pch=17,col="red")
points(1426,pfunc2.2(1422),cex=2.5,pch=18,col="blue")
legend("topright",c("current situation","+1000 patients / +no event","+1000 patients /+2 events"),col=c("black","red","blue"),pch=c(19,17,18),cex=1.1,bty="n")
|
#
# KnockoutFigure5.R
#
#
# Automatically handling the control, one-side and two-side I-cell knockout variations.
#
# Clear the workspace.
rm(list = ls());
# Load up some libraries and helper functions.
source ( "NMHelperFunctions.R" );
#
# Additional specialized helper functions.
#
#
# Standardized plots.
#
PlotKnockoutRFMap = function ( ref, irefIter, exp, iexpIter, refTitleText, expTitleText,
iTrim, iFilter.E, iFilter.I, iRowList, iColList, x, y, tiffFlag=FALSE, iKnockOutLength=0, iExpFlag=0 ) {
# Set up the parameters so that the "experimental zone" will be outlined on subsequent plots.
y[1] = y[1] - 0.5; y[2] = y[2] + 0.5;
x[1] = x[1] - 0.5; x[2] = x[2] + 0.5;
tmp.x = rbind(c(x[1], x[2]), c(x[1], x[2]), c(x[1], x[1]), c(x[2], x[2]));
tmp.y = rbind(c(y[1], y[1]), c(y[2], y[2]), c(y[1], y[2]), c(y[1], y[2]));
xLabText = "Distal -> Proximal"; yLabText = "Digit 1 -> Digit 3";
N = as.integer ( sqrt ( dim(base$r1.i.rfMap)[1] ) );
boundaryMarks = c ( as.integer(N/3)+0.5, as.integer(N/3)*2+0.5 );
iRFTrackStepSize = 2;
#
# RF Centroids
#
if ( tiffFlag ) {
tiff ( paste("Knock", "Centroids", iKnockOutLength, iExpFlag, "tiff", sep="."), compression="lzw", units="in", width=7.0, height=7.0, res=300);
} else {
x11();
} # if ( tiffFlag )
par ( mfcol=c(2,2) );
ShowTopoMap1 ( ref$rfTrackData.e[[length(ref$rfTrackData.e)]], paste(paste("E-Type","Iter",irefIter,sep=" "),refTitleText, sep=""), FALSE, 0.5, 0 );
GenOutline1X ( tmp.x, tmp.y, 2, 1, 2 );
ShowTopoMap1 ( ref$rfTrackData.i[[length(ref$rfTrackData.i )]], paste(paste("I-Type","Iter",irefIter,sep=" "),refTitleText, sep=""), FALSE, 0.5, 0 );
GenOutline1X ( tmp.x, tmp.y, 2, 1, 2 );
ShowTopoMap1 ( exp$rfTrackData.e[[length(exp$rfTrackData.e)]], paste(paste("E-Type","Iter",iexpIter,sep=" "),expTitleText, sep=""), FALSE, 0.5, 0 );
GenOutline1X ( tmp.x, tmp.y, 2, 1, 2 );
ShowTopoMap1 ( exp$rfTrackData.i[[length(exp$rfTrackData.i)]], paste(paste("I-Type","Iter",iexpIter,sep=" "),expTitleText, sep=""), FALSE, 0.5, 0 );
GenOutline1X ( tmp.x, tmp.y, 2, 1, 2 );
if ( tiffFlag ) {
dev.off();
} # if ( tiffFlag )
#
# RF Tracks
#
if ( tiffFlag ) {
tiff ( paste("Knock", "Tracks", iKnockOutLength, iExpFlag, "tiff", sep="."), compression="lzw", units="in", width=7.0, height=7.0, res=300);
} else {
x11();
} # if ( tiffFlag )
par ( mfcol=c(2,2) );
ShowThreeDigitRFTrackFlex ( ref$rfTrackData.e[[length(ref$rfTrackData.e)]], paste(paste("E-Type","Iter",irefIter,sep=" "),refTitleText, sep=""),
TRUE, 0.5, 0, 1, iRowList, iColList, iRFTrackStepSize );
GenOutline1X ( tmp.x, tmp.y, 2, 1, 2 );
ShowThreeDigitRFTrackFlex ( ref$rfTrackData.i[[length(ref$rfTrackData.i)]], paste(paste("I-Type","Iter",irefIter,sep=" "),refTitleText, sep=""),
TRUE, 0.5, 0, 1, iRowList, iColList, iRFTrackStepSize );
GenOutline1X ( tmp.x, tmp.y, 2, 1, 2 );
ShowThreeDigitRFTrackFlex ( exp$rfTrackData.e[[length(exp$rfTrackData.e)]], paste(paste("E-Type","Iter",iexpIter,sep=" "),expTitleText, sep=""),
TRUE, 0.5, 0, 1, iRowList, iColList, iRFTrackStepSize );
GenOutline1X ( tmp.x, tmp.y, 2, 1, 2 );
ShowThreeDigitRFTrackFlex ( exp$rfTrackData.i[[length(exp$rfTrackData.i)]], paste(paste("I-Type","Iter",iexpIter,sep=" "),expTitleText, sep=""),
TRUE, 0.5, 0, 1, iRowList, iColList, iRFTrackStepSize );
GenOutline1X ( tmp.x, tmp.y, 2, 1, 2 );
if ( tiffFlag ) {
dev.off();
} # if ( tiffFlag )
#
# RF Translocation
#
if ( tiffFlag ) {
tiff ( paste("Knock", "Position_Size", iKnockOutLength, iExpFlag, "tiff", sep="."), compression="lzw", units="in", width=7.0, height=7.0, res=300);
} else {
x11();
} # if ( tiffFlag )
par ( mfcol=c(2,2) );
centroidDelta.e = RFCentroidDelta ( exp$rfTrackData.e[[length(exp$rfTrackData.e)]], ref$rfTrackData.e[[length(ref$rfTrackData.e)]]);
centroidDelta.i = RFCentroidDelta ( exp$rfTrackData.i[[length(exp$rfTrackData.i)]], ref$rfTrackData.i[[length(ref$rfTrackData.i)]]);
centroidDelta.e[c(iTrim,iFilter.E)] = 0;
centroidDelta.i[c(iTrim,iFilter.I)] = 0;
#zmin = min ( centroidDelta.e, centroidDelta.i ); zmax = max ( centroidDelta.e, centroidDelta.i );
ShowVecAsMap2 ( centroidDelta.e, paste(paste("E-Type RF Centroid Shift","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText, min(centroidDelta.e), max(centroidDelta.e) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
ShowVecAsMap2 ( centroidDelta.i, paste(paste("I-Type RF Centroid Shift","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText, min(centroidDelta.i), max(centroidDelta.i) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
#
# RF Size Change
#
sizeDelta.e = ( QuantRFSize ( exp$r1.e.rfMap, kRFPeakToEdgeDetect ) / QuantRFSize ( ref$r1.e.rfMap, kRFPeakToEdgeDetect ) ) - 1.0;
sizeDelta.i = ( QuantRFSize ( exp$r1.i.rfMap, kRFPeakToEdgeDetect ) / QuantRFSize ( ref$r1.i.rfMap, kRFPeakToEdgeDetect ) ) - 1.0;
sizeDelta.e[c(iTrim,iFilter.E)] = 0;
sizeDelta.i[c(iTrim,iFilter.I)] = 0;
ShowVecAsMap2 ( sizeDelta.e, paste(paste("E-Type % RF Size Change","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText, min(sizeDelta.e), max(sizeDelta.e) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
ShowVecAsMap2 ( sizeDelta.i, paste(paste("I-Type % RF Size Change","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText, min(sizeDelta.i), max(sizeDelta.i) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
if ( tiffFlag ) {
dev.off();
} # if ( tiffFlag )
#
# RF Max Magnitude Response
#
if ( tiffFlag ) {
tiff ( paste("Knock", "MaxResp", iKnockOutLength, iExpFlag, "tiff", sep="."), compression="lzw", units="in", width=7.0, height=7.0, res=300);
} else {
x11();
} # if ( tiffFlag )
par ( mfcol=c(2,2) );
ref.maxresp.e = apply ( ref$r1.e.rfMap, 1, max );
ref.maxresp.i = apply ( ref$r1.i.rfMap, 1, max );
exp.maxresp.e = apply ( exp$r1.e.rfMap, 1, max );
exp.maxresp.i = apply ( exp$r1.i.rfMap, 1, max );
delta.maxresp.e = ( exp.maxresp.e / ref.maxresp.e ) - 1.0;
delta.maxresp.i = ( exp.maxresp.i / ref.maxresp.i ) - 1.0;
ref.maxresp.e[c(iTrim,iFilter.E)] = 0;
ref.maxresp.i[c(iTrim,iFilter.I)] = 0;
exp.maxresp.e[c(iTrim,iFilter.E)] = 0;
exp.maxresp.i[c(iTrim,iFilter.I)] = 0;
delta.maxresp.e[c(iTrim,iFilter.E)] = 0;
delta.maxresp.i[c(iTrim,iFilter.I)] = 0;
ShowVecAsMap2 ( exp.maxresp.e, paste(paste("E-Type RF Mag Resp","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText,
min(exp.maxresp.e), max(exp.maxresp.e) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
ShowVecAsMap2 ( exp.maxresp.i, paste(paste("I-Type RF Mag Resp","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText,
min(exp.maxresp.i), max(exp.maxresp.i) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
ShowVecAsMap2 ( delta.maxresp.e, paste(paste("E-Type % RF Mag Resp Diff","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText,
min(delta.maxresp.e), max(delta.maxresp.e) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
ShowVecAsMap2 ( delta.maxresp.i, paste(paste("I-Type % RF Mag Resp Diff","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText,
min(delta.maxresp.i), max(delta.maxresp.i) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
if ( tiffFlag ) {
dev.off();
} # if ( tiffFlag )
#
# Intracolumnar RF Centroid Divergence
#
if ( tiffFlag ) {
tiff ( paste("Knock", "Divergence", iKnockOutLength, iExpFlag, "tiff", sep="."), compression="lzw", units="in", width=7.0, height=7.0, res=300);
} else {
x11();
} # if ( tiffFlag )
par ( mfcol=c(2,2) );
centroidDelta.ref = RFCentroidDelta ( ref$rfTrackData.e[[length(ref$rfTrackData.e)]], ref$rfTrackData.i[[length(ref$rfTrackData.i)]]);
centroidDelta.exp = RFCentroidDelta ( exp$rfTrackData.e[[length(exp$rfTrackData.e)]], exp$rfTrackData.i[[length(exp$rfTrackData.i)]]);
centroidDelta.ref[c(iTrim,iFilter.E,iFilter.I)] = 0;
centroidDelta.exp[c(iTrim,iFilter.I,iFilter.E)] = 0;
ShowVecAsMap2 ( centroidDelta.ref, paste(paste("Baseline RF Divergence","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText, min(centroidDelta.ref), max(centroidDelta.ref) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
ShowVecAsMap2 ( centroidDelta.exp, paste(paste("Knockout RF Divergence","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText, min(centroidDelta.exp), max(centroidDelta.exp) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
centroidDelta.ref[ centroidDelta.ref > 20 ] = 0.0;
ShowVecAsMap2 ( centroidDelta.ref, paste(paste("Baseline RF Divergence","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText, min(centroidDelta.ref), max(centroidDelta.ref) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
centroidDelta.exp[ centroidDelta.exp > 20 ] = 0.0;
ShowVecAsMap2 ( centroidDelta.exp, paste(paste("Knockout RF Divergence","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText, min(centroidDelta.exp), max(centroidDelta.exp) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
if ( tiffFlag ) {
dev.off();
} # if ( tiffFlag )
return ( list ( centroidDelta.e = centroidDelta.e, centroidDelta.i = centroidDelta.i, sizeDelta.e = sizeDelta.e, sizeDelta.i = sizeDelta.i ) );
} # PlotKnockoutRFMap ( ref, exp, refTitleText, expTitleText, iTrim, iFilter.E, iFilter.I, iColList ) {
RFFlyOver = function ( ref, exp, N, iRowParm, iColParm, rfExpZone.x, rfExpZone.y ) {
iRowParm = as.integer ( iRowParm );
iColParm = as.integer ( iColParm );
numColors = 128;
xLabText = "Distal -> Proximal"; yLabText = "Digit 1 -> Digit 3";
graphicalPanelMarks = ( N + 0.5 );
digitBorderMarks = c((N/3)+0.5, (2*N/3)+0.5);
digitBorderMarks = c ( digitBorderMarks, N+digitBorderMarks );
zmin = Inf; zmax = -Inf;
for ( iCol in seq ( iColParm[1], iColParm[2], iColParm[3] ) ) {
for ( iRow in seq ( iRowParm[1], iRowParm[2], iRowParm[3] ) ) {
iCell = GetLin ( iRow, iCol, N );
z = c ( ref$r1.i.rfMap[iCell, ], ref$r1.e.rfMap[iCell, ], exp$r1.i.rfMap[iCell, ], exp$r1.e.rfMap[iCell, ] );
tmin = min ( z ); tmax = max ( z );
if ( tmin < zmin ) { zmin = tmin; }
if ( tmax > zmax ) { zmax = tmax; }
} # for ( iRow in seq ( iRowParm[1], iRowParm[2], iRowParm[3] ) ) {
} # for ( iCol in seq ( iColParm[1], iColParm[2], iColParm[3] ) ) {
zlim = log10 ( c ( zmin, zmax ) );
x11();
saveHTML ( {
for ( iCol in seq ( iColParm[1], iColParm[2], iColParm[3] ) ) {
for ( iRow in seq ( iRowParm[1], iRowParm[2], iRowParm[3] ) ) {
iCell = GetLin ( iRow, iCol, N );
z = log10 ( InterleaveForDisplay ( ref$r1.i.rfMap[iCell, ], ref$r1.e.rfMap[iCell, ],
exp$r1.i.rfMap[iCell, ], exp$r1.e.rfMap[iCell, ] ) );
titleText = paste ( "Cortical Column # ", iCell, sep="" );
image.plot( c(1:(2*N)), c(1:(2*N)), matrix ( (z), nrow=2*N, ncol=2*N, byrow=FALSE ),
col=rainbow(numColors, s = 1.0, v = 1.0, start = 0.67, end = 1.0, alpha = 1 ),
zlim = zlim, main=titleText, xlab=xLabText, ylab=yLabText );
abline(h=digitBorderMarks, col=3, lty=3, lwd=0.5 );
abline(h=graphicalPanelMarks, v=graphicalPanelMarks, col=1, lty=1, lwd=2 );
GenOutline4X( N, rfExpZone.x, rfExpZone.y, TRUE );
} # for ( iRow in seq ( iRowParm[1], iRowParm[2], iRowParm[3] ) ) {
} # for ( iCol in seq ( iColParm[1], iColParm[2], iColParm[3] ) ) {
}, interval = 1 ); # saveGIF
} # RFFlyOver = function ( ref, exp, ... ) {
#
#
RFFlyOverA = function ( ref, exp, N, iRowParm, iColParm, rfExpZone.x, rfExpZone.y, kRFPeakToEdgeDetect ) {
iRowParm = as.integer ( iRowParm );
iColParm = as.integer ( iColParm );
numColors = 128;
xLabText = "Distal -> Proximal"; yLabText = "Digit 1 -> Digit 3";
graphicalPanelMarks = ( N + 0.5 );
digitBorderMarks = c((N/3)+0.5, (2*N/3)+0.5);
digitBorderMarks = c ( digitBorderMarks, N+digitBorderMarks );
x11();
saveHTML ( {
for ( iRow in seq ( iRowParm[1], iRowParm[2], iRowParm[3] ) ) {
for ( iCol in seq ( iColParm[1], iColParm[2], iColParm[3] ) ) {
iCell = GetLin ( iRow, iCol, N );
tmp1 = QuantRFSizeA ( ref$r1.i.rfMap, kRFPeakToEdgeDetect );
t.ref.r1.i = ref$r1.i.rfMap[iCell, ];
t.ref.r1.i[tmp1] = 0.0;
tmp1 = QuantRFSizeA ( ref$r1.e.rfMap, kRFPeakToEdgeDetect );
t.ref.r1.e = ref$r1.e.rfMap[iCell, ];
t.ref.r1.e[tmp1] = 0.0;
tmp1 = QuantRFSizeA ( ref$r1.i.rfMap, kRFPeakToEdgeDetect );
t.exp.r1.i = exp$r1.i.rfMap[iCell, ];
t.exp.r1.i[tmp1] = 0.0;
tmp1 = QuantRFSizeA ( ref$r1.e.rfMap, kRFPeakToEdgeDetect );
t.exp.r1.e = exp$r1.e.rfMap[iCell, ];
t.exp.r1.e[tmp1] = 0.0;
z = InterleaveForDisplay ( t.ref.r1.i, t.ref.r1.e, t.exp.r1.i, t.exp.r1.e );
zlim = c ( min(z), max(z) );
titleText = paste ( "Cortical Column # ", iCell, sep="" );
image.plot( c(1:(2*N)), c(1:(2*N)), matrix ( z, nrow=2*N, ncol=2*N, byrow=FALSE ),
col=rainbow(numColors, s = 1.0, v = 1.0, start = 0.67, end = 1.0, alpha = 1 ),
zlim = zlim, main=titleText, xlab=xLabText, ylab=yLabText );
abline(h=digitBorderMarks, col=3, lty=3, lwd=0.5 );
abline(h=graphicalPanelMarks, v=graphicalPanelMarks, col=1, lty=1, lwd=2 );
GenOutline4X( N, rfExpZone.x, rfExpZone.y, TRUE );
} # for ( iCol in seq ( iColStart, iColEnd, iColStep ) {
} # for ( iRow in seq ( iRowStart, iRowEnd, iRowStep ) {
}, interval = 1 ); # saveGIF
} # A = function ( ref, exp, ... ) {
#
# MAIN
#
#
# Global constants.
#
N = 75;
N2 = N * N;
kRFPeakToEdgeDetect = 0.5;
iTrim = EdgeTrimCellList ( seq ( 1, N2 ), N, 2 ); # Trim the outer-most edges.
boundaryMarks = c ( as.integer(N/3)+0.5, as.integer(N/3)*2+0.5 );
iRFTrackStepSize = 3;
makeMovie = FALSE;
tiffFlag = FALSE;
#
# Detailed parameters describing the knockouts.
#
iBase = iKnockLength = 8;
#
#
#
iKnockOffset = round( ( N / 2 ), 0 ) - 4;
controlTrack = c ( round( ( 2 * N / 3), 0 ), N + 1 - round( N / 6, 0 ) );
rfExpZone.x = c ( iKnockOffset + 1, iKnockOffset + iKnockLength );
rowParmsRFFlyOver = c ( iKnockOffset - 3, iKnockOffset + iKnockLength + 3, 1 );
iRowList = seq ( rfExpZone.x[1]-2, rfExpZone.x[2]+2, 2 );
# Set the directory where the experimental data is sitting.
fDir = paste ( "G:/NMLab/Working/S.45.7.Lesion.", iKnockLength, "/", sep="" );
#fDir = paste ( "F:/NMLab/Working/S.45.7.Lesion.x0.4.", iKnockLength, "/", sep="" );
fDir = paste ( "F:/NMLab/Working/T.75.7.Lesion.x0.2.", iKnockLength, "/", sep="" );
#
# Get the Random Initial RF Map.
#
fRoot = "Base.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
rnet = GetRFMapData2A ( fileRootName, 15, 0, 0, 15, kRFPeakToEdgeDetect, N2 );
#
# Get the Baseline Refinement RF Map.
#
fRoot = "Base.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
base = GetRFMapData2A ( fileRootName, 15, 15, 15, 15, kRFPeakToEdgeDetect, N2 );
##############################################################
##############################################################
#
# 0. Get the Placebo
#
##############################################################
##############################################################
fRoot = "BorderKnockout_Control_Placebo.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 0; iEnd = 0; iStepSize = 1; iExpFlag = 0;
placebo = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
# Set up and do the basic RF Map plots
iColList = c ( controlTrack, as.integer ( N / 6 - 1 ), as.integer ( N / 6 + 2 ) );
rfExpZone.y = c ( round((N/6),0), round((N/6),0)+1 );
rfstats.placebo = PlotKnockoutRFMap ( base, 15, placebo, 15, "\nBaseline Refinement", "\nControl Placebo", iTrim,
NULL, NULL, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/6)-3, as.integer(N/6)+4, 1 );
RFFlyOver ( base, placebo, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
summary ( rfstats.placebo$centroidDelta.e )
summary ( rfstats.placebo$centroidDelta.i )
summary ( rfstats.placebo$sizeDelta.e )
summary ( rfstats.placebo$sizeDelta.i )
##############################################################
##############################################################
#
# 1. Get the Knockout RF Map - CONTROL I Only.
#
##############################################################
##############################################################
fRoot = "BorderKnockout_Control_I.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 1; iEnd = 1; iStepSize = 1; iExpFlag = 1;
test.ctl.I = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
# Set up and do the basic RF Map plots
rfExpZone.y = rfExpZone.y.ctl = c ( round((N/6),0), round((N/6),0)+1 ); # Partially describes the zone that was manipulated.
iColList = c ( controlTrack, as.integer ( N / 6 - 1 ), as.integer ( N / 6 + 2 ) );
itmp = as.integer ( N/6 ) * N + 1 + iKnockOffset;
iFilter.E = NULL;
iFilter.I = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ) );
rfstats.test.ctl.I = PlotKnockoutRFMap ( base, 15, test.ctl.I, 15, "\nBaseline Refinement", "\nControl I Only", iTrim,
iFilter.E, iFilter.I, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/6)-2, as.integer(N/3) + 1, 1 ); # Partially describes the fly-over zone.
RFFlyOver ( base, test.ctl.I, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
summary ( rfstats.test.ctl.I$centroidDelta.e )
summary ( rfstats.test.ctl.I$centroidDelta.i )
summary ( rfstats.test.ctl.I$sizeDelta.e )
summary ( rfstats.test.ctl.I$sizeDelta.i )
##############################################################
##############################################################
#
# 2. Get the Knockout RF Map - CONTROL E Only.
#
##############################################################
##############################################################
fRoot = "BorderKnockout_Control_E.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 2; iEnd = 2; iStepSize = 1; iExpFlag = 2;
test.ctl.E = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
# Set up and do the basic RF Map plots
rfExpZone.y = c ( round((N/6),0), round((N/6),0)+1 ); # Partially describes the zone that was manipulated.
iColList = c ( controlTrack, as.integer ( N / 6 - 1 ), as.integer ( N / 6 + 2 ) );
iFilter.I = NULL;
itmp = as.integer ( N/6 ) * N + 1 + iKnockOffset;
iFilter.E = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ) );
rfstats.test.ctl.E = PlotKnockoutRFMap ( base, 15, test.ctl.E, 15, "\nBaseline Refinement", "\nControl E Only", iTrim,
iFilter.E, iFilter.I, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/6)-2, as.integer(N/3) + 1, 1 ); # Partially describes the fly-over zone.
RFFlyOver ( base, test.ctl.E, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
##############################################################
##############################################################
#
# 3. Get the Knockout RF Map - CONTROL E and I.
#
##############################################################
##############################################################
fRoot = "BorderKnockout_Control_EI.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 3; iEnd = 3; iStepSize = 1; iExpFlag = 3;
test.ctl.EI = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
# Set up and do the basic RF Map plots
rfExpZone.y = c ( round((N/6),0), round((N/6),0)+1 ); # Partially describes the zone that was manipulated.
iColList = c ( controlTrack, as.integer ( N / 6 - 1 ), as.integer ( N / 6 + 2 ) );
itmp = as.integer ( N/6 ) * N + 1 + iKnockOffset;
iFilter.E = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ) );
iFilter.I = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ) );
rfstats.test.ctl.EI = PlotKnockoutRFMap ( base, 15, test.ctl.EI, 15, "\nBaseline Refinement", "\nControl E and I", iTrim,
iFilter.E, iFilter.I, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/6)-2, as.integer(N/3) + 1, 1 ); # Partially describes the fly-over zone.
RFFlyOver ( base, test.ctl.EI, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
##############################################################
##############################################################
#
# 4. Get the Knockout RF Map - ONE-SIDE I.
#
##############################################################
##############################################################
fRoot = "BorderKnockout_OneSide_I.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 4; iEnd = 4; iStepSize = 1; iExpFlag = 4;
test.oneside.I = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
# Set up and do the basic RF Map plots
rfExpZone.y = rfExpZone.y.oneside = c ( round((N/3),0)-1, round((N/3),0)-0 ); # Partially describes the zone that was manipulated.
iColList = c ( controlTrack, 16, 17 ); iColList = c ( controlTrack, 14, 15 );
iColList = c ( controlTrack, as.integer ( N / 3 - 1 ), as.integer ( N / 3 + 0 ) );
iColList = c ( controlTrack, as.integer ( N / 3 - 2 ), as.integer ( N / 3 + 3 ) );
iFilter.E = NULL;
itmp = ( as.integer ( N/3 ) - 2) * N + 1 + iKnockOffset;
iFilter.I = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ) );
rfstats.test.oneside.I = PlotKnockoutRFMap ( base, 15, test.oneside.I, 15, "\nBaseline Refinement", "\nBoundary One Side I Only", iTrim,
iFilter.E, iFilter.I, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/3)-3, as.integer(N/3)+4, 1 ); # Partially describes the fly-over zone.
RFFlyOver ( base, test.oneside.I, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
##############################################################
##############################################################
#
# 5. Get the Knockout RF Map - ONE-SIDE E.
#
##############################################################
##############################################################
fRoot = "BorderKnockout_OneSide_E.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 5; iEnd = 5; iStepSize = 1; iExpFlag = 5;
test.oneside.E = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
# Set up and do the basic RF Map plots
rfExpZone.y = c ( round((N/3),0)-1, round((N/3),0)-0 ); # Partially describes the zone that was manipulated.
iColList = c ( controlTrack, 16, 17 );
iColList = c ( controlTrack, as.integer ( N / 3 - 2 ), as.integer ( N / 3 + 3 ) );
iFilter.I = NULL;
itmp = ( as.integer ( N/3 ) - 2) * N + 1 + iKnockOffset;
iFilter.E = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ) );
rfstats.test.oneside.E = PlotKnockoutRFMap ( base, 15, test.oneside.E, 15, "\nBaseline Refinement", "\nBoundary One Side E Only", iTrim,
iFilter.E, iFilter.I, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Additional longitudinal tracks.
iColList = c ( controlTrack, as.integer ( N / 3 - 2 ), as.integer ( N / 3 + 2 ) );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/3)-3, as.integer(N/3)+4, 1 ); # Partially describes the fly-over zone.
RFFlyOver ( base, test.oneside.E, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
##############################################################
##############################################################
#
# 6. Get the Knockout RF Map - ONE-SIDE E and I.
#
##############################################################
##############################################################
fRoot = "BorderKnockout_OneSide_EI.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 6; iEnd = 6; iStepSize = 1; iExpFlag = 6;
test.oneside.EI = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
# Set up and do the basic RF Map plots
rfExpZone.y = c ( round((N/3),0)-1, round((N/3),0)-0 ); # Partially describes the zone that was manipulated.
iColList = c ( controlTrack, 16, 17 ); iColList = c ( controlTrack, 14, 15 );
iColList = c ( controlTrack, as.integer ( N / 3 - 2 ), as.integer ( N / 3 + 3 ) );
itmp = ( as.integer ( N/3 ) - 2) * N + 1 + iKnockOffset;
iFilter.I = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ) );
iFilter.E = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ) );
rfstats.test.oneside.EI = PlotKnockoutRFMap ( base, 15, test.oneside.EI, 15, "\nBaseline Refinement", "\nBoundary One Side E and I", iTrim,
iFilter.E, iFilter.I, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/3)-3, as.integer(N/3)+4, 1 ); # Partially describes the fly-over zone.
RFFlyOver ( base, test.oneside.EI, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
##############################################################
##############################################################
#
# 7. Get the Knockout RF Map - TWO-SIDE I.
#
##############################################################
##############################################################
fRoot = "BorderKnockout_TwoSide_I.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 7; iEnd = 7; iStepSize = 1; iExpFlag = 7;
test.twoside.I = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
# Set up and do the basic RF Map plots
rfExpZone.y = c ( round((N/3),0)-1, round((N/3),0)+2 ); # Partially describes the zone that was manipulated.
iColList = c ( controlTrack, 16, 17 ); iColList = c ( controlTrack, 14, 15 );
iColList = c ( controlTrack, as.integer ( N / 3 - 2 ), as.integer ( N / 3 + 3 ) );
itmp = ( as.integer ( N/3 ) - 2) * N + 1 + iKnockOffset;
iFilter.E = NULL;
iFilter.I = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ),
seq ( itmp + 2*N, itmp + 2*N + iKnockLength - 1 ), seq ( itmp + 3*N, itmp + 3*N + iKnockLength - 1 ) );
rfstats.test.twoside.I = PlotKnockoutRFMap ( base, 15, test.twoside.I, 15, "\nBaseline Refinement", "\nBoundary Two Side I Only", iTrim,
iFilter.E, iFilter.I, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/3) - (N/6), as.integer(N/3) + (N/6), 1 ); # Partially describes the fly-over zone.
RFFlyOver ( base, test.twoside.I, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
##############################################################
##############################################################
#
# 8. Get the Knockout RF Map - TWO-SIDE E.
#
##############################################################
##############################################################
fRoot = "BorderKnockout_TwoSide_E.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 8; iEnd = 8; iStepSize = 1; iExpFlag = 8;
test.twoside.E = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
# Set up and do the basic RF Map plots
rfExpZone.y = c ( round((N/3),0)-1, round((N/3),0)+2 ); # Partially describes the zone that was manipulated.
iColList = c ( controlTrack, 16, 17 ); iColList = c ( controlTrack, 14, 15 );
iColList = c ( controlTrack, as.integer ( N / 3 - 2 ), as.integer ( N / 3 + 3 ) );
itmp = ( as.integer ( N/3 ) - 2) * N + 1 + iKnockOffset;
iFilter.I = NULL;
iFilter.E = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ),
seq ( itmp + 2*N, itmp + 2*N + iKnockLength - 1 ), seq ( itmp + 3*N, itmp + 3*N + iKnockLength - 1 ) );
rfstats.test.twoside.I = PlotKnockoutRFMap ( base, 15, test.twoside.E, 15, "\nBaseline Refinement", "\nBoundary Two Side E Only", iTrim,
iFilter.E, iFilter.I, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/3) - (N/6), as.integer(N/3) + (N/6), 1 ); # Partially describes the fly-over zone.
RFFlyOver ( base, test.twoside.E, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
##############################################################
##############################################################
#
# 9. Get the Knockout RF Map - TWO-SIDE E and I.
#
##############################################################
##############################################################
fRoot = "BorderKnockout_TwoSide_EI.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 9; iEnd = 9; iStepSize = 1; iExpFlag = 9;
test.twoside.EI = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
test.twoside.EI = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, 0.1, N2 );
# Set up and do the basic RF Map plots
rfExpZone.y = c ( round((N/3),0)-1, round((N/3),0)+2 ); # Partially describes the zone that was manipulated.
iColList = c ( controlTrack, 16, 17 ); iColList = c ( controlTrack, 14, 15 );
iColList = c ( controlTrack, as.integer ( N / 3 - 2 ), as.integer ( N / 3 + 3 ) );
itmp = ( as.integer ( N/3 ) - 2) * N + 1 + iKnockOffset;
iFilter.E = iFilter.I = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ),
seq ( itmp + 2*N, itmp + 2*N + iKnockLength - 1 ), seq ( itmp + 3*N, itmp + 3*N + iKnockLength - 1 ) );
rfstats.test.twoside.I = PlotKnockoutRFMap ( base, 15, test.twoside.EI, 15, "\nBaseline Refinement", "\nBoundary Two Side E and I", iTrim,
iFilter.E, iFilter.I, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/3) - (N/6), as.integer(N/3) + (N/6), 1 ); # Partially describes the fly-over zone.
RFFlyOver ( base, test.twoside.EI, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
| /KnockoutFigure5.R | no_license | kgrajski/KamilGrajski-Somatotopic-Discontinuity-Plasticity | R | false | false | 32,434 | r | #
# KnockoutFigure5.R
#
#
# Automatically handling the control, one-side and two-side I-cell knockout variations.
#
# Clear the workspace.
rm(list = ls());
# Load up some libraries and helper functions.
source ( "NMHelperFunctions.R" );
#
# Additional specialized helper functions.
#
#
# Standardized plots.
#
PlotKnockoutRFMap = function ( ref, irefIter, exp, iexpIter, refTitleText, expTitleText,
iTrim, iFilter.E, iFilter.I, iRowList, iColList, x, y, tiffFlag=FALSE, iKnockOutLength=0, iExpFlag=0 ) {
# Set up the parameters so that the "experimental zone" will be outlined on subsequent plots.
y[1] = y[1] - 0.5; y[2] = y[2] + 0.5;
x[1] = x[1] - 0.5; x[2] = x[2] + 0.5;
tmp.x = rbind(c(x[1], x[2]), c(x[1], x[2]), c(x[1], x[1]), c(x[2], x[2]));
tmp.y = rbind(c(y[1], y[1]), c(y[2], y[2]), c(y[1], y[2]), c(y[1], y[2]));
xLabText = "Distal -> Proximal"; yLabText = "Digit 1 -> Digit 3";
N = as.integer ( sqrt ( dim(base$r1.i.rfMap)[1] ) );
boundaryMarks = c ( as.integer(N/3)+0.5, as.integer(N/3)*2+0.5 );
iRFTrackStepSize = 2;
#
# RF Centroids
#
if ( tiffFlag ) {
tiff ( paste("Knock", "Centroids", iKnockOutLength, iExpFlag, "tiff", sep="."), compression="lzw", units="in", width=7.0, height=7.0, res=300);
} else {
x11();
} # if ( tiffFlag )
par ( mfcol=c(2,2) );
ShowTopoMap1 ( ref$rfTrackData.e[[length(ref$rfTrackData.e)]], paste(paste("E-Type","Iter",irefIter,sep=" "),refTitleText, sep=""), FALSE, 0.5, 0 );
GenOutline1X ( tmp.x, tmp.y, 2, 1, 2 );
ShowTopoMap1 ( ref$rfTrackData.i[[length(ref$rfTrackData.i )]], paste(paste("I-Type","Iter",irefIter,sep=" "),refTitleText, sep=""), FALSE, 0.5, 0 );
GenOutline1X ( tmp.x, tmp.y, 2, 1, 2 );
ShowTopoMap1 ( exp$rfTrackData.e[[length(exp$rfTrackData.e)]], paste(paste("E-Type","Iter",iexpIter,sep=" "),expTitleText, sep=""), FALSE, 0.5, 0 );
GenOutline1X ( tmp.x, tmp.y, 2, 1, 2 );
ShowTopoMap1 ( exp$rfTrackData.i[[length(exp$rfTrackData.i)]], paste(paste("I-Type","Iter",iexpIter,sep=" "),expTitleText, sep=""), FALSE, 0.5, 0 );
GenOutline1X ( tmp.x, tmp.y, 2, 1, 2 );
if ( tiffFlag ) {
dev.off();
} # if ( tiffFlag )
#
# RF Tracks
#
if ( tiffFlag ) {
tiff ( paste("Knock", "Tracks", iKnockOutLength, iExpFlag, "tiff", sep="."), compression="lzw", units="in", width=7.0, height=7.0, res=300);
} else {
x11();
} # if ( tiffFlag )
par ( mfcol=c(2,2) );
ShowThreeDigitRFTrackFlex ( ref$rfTrackData.e[[length(ref$rfTrackData.e)]], paste(paste("E-Type","Iter",irefIter,sep=" "),refTitleText, sep=""),
TRUE, 0.5, 0, 1, iRowList, iColList, iRFTrackStepSize );
GenOutline1X ( tmp.x, tmp.y, 2, 1, 2 );
ShowThreeDigitRFTrackFlex ( ref$rfTrackData.i[[length(ref$rfTrackData.i)]], paste(paste("I-Type","Iter",irefIter,sep=" "),refTitleText, sep=""),
TRUE, 0.5, 0, 1, iRowList, iColList, iRFTrackStepSize );
GenOutline1X ( tmp.x, tmp.y, 2, 1, 2 );
ShowThreeDigitRFTrackFlex ( exp$rfTrackData.e[[length(exp$rfTrackData.e)]], paste(paste("E-Type","Iter",iexpIter,sep=" "),expTitleText, sep=""),
TRUE, 0.5, 0, 1, iRowList, iColList, iRFTrackStepSize );
GenOutline1X ( tmp.x, tmp.y, 2, 1, 2 );
ShowThreeDigitRFTrackFlex ( exp$rfTrackData.i[[length(exp$rfTrackData.i)]], paste(paste("I-Type","Iter",iexpIter,sep=" "),expTitleText, sep=""),
TRUE, 0.5, 0, 1, iRowList, iColList, iRFTrackStepSize );
GenOutline1X ( tmp.x, tmp.y, 2, 1, 2 );
if ( tiffFlag ) {
dev.off();
} # if ( tiffFlag )
#
# RF Translocation
#
if ( tiffFlag ) {
tiff ( paste("Knock", "Position_Size", iKnockOutLength, iExpFlag, "tiff", sep="."), compression="lzw", units="in", width=7.0, height=7.0, res=300);
} else {
x11();
} # if ( tiffFlag )
par ( mfcol=c(2,2) );
centroidDelta.e = RFCentroidDelta ( exp$rfTrackData.e[[length(exp$rfTrackData.e)]], ref$rfTrackData.e[[length(ref$rfTrackData.e)]]);
centroidDelta.i = RFCentroidDelta ( exp$rfTrackData.i[[length(exp$rfTrackData.i)]], ref$rfTrackData.i[[length(ref$rfTrackData.i)]]);
centroidDelta.e[c(iTrim,iFilter.E)] = 0;
centroidDelta.i[c(iTrim,iFilter.I)] = 0;
#zmin = min ( centroidDelta.e, centroidDelta.i ); zmax = max ( centroidDelta.e, centroidDelta.i );
ShowVecAsMap2 ( centroidDelta.e, paste(paste("E-Type RF Centroid Shift","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText, min(centroidDelta.e), max(centroidDelta.e) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
ShowVecAsMap2 ( centroidDelta.i, paste(paste("I-Type RF Centroid Shift","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText, min(centroidDelta.i), max(centroidDelta.i) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
#
# RF Size Change
#
sizeDelta.e = ( QuantRFSize ( exp$r1.e.rfMap, kRFPeakToEdgeDetect ) / QuantRFSize ( ref$r1.e.rfMap, kRFPeakToEdgeDetect ) ) - 1.0;
sizeDelta.i = ( QuantRFSize ( exp$r1.i.rfMap, kRFPeakToEdgeDetect ) / QuantRFSize ( ref$r1.i.rfMap, kRFPeakToEdgeDetect ) ) - 1.0;
sizeDelta.e[c(iTrim,iFilter.E)] = 0;
sizeDelta.i[c(iTrim,iFilter.I)] = 0;
ShowVecAsMap2 ( sizeDelta.e, paste(paste("E-Type % RF Size Change","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText, min(sizeDelta.e), max(sizeDelta.e) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
ShowVecAsMap2 ( sizeDelta.i, paste(paste("I-Type % RF Size Change","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText, min(sizeDelta.i), max(sizeDelta.i) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
if ( tiffFlag ) {
dev.off();
} # if ( tiffFlag )
#
# RF Max Magnitude Response
#
if ( tiffFlag ) {
tiff ( paste("Knock", "MaxResp", iKnockOutLength, iExpFlag, "tiff", sep="."), compression="lzw", units="in", width=7.0, height=7.0, res=300);
} else {
x11();
} # if ( tiffFlag )
par ( mfcol=c(2,2) );
ref.maxresp.e = apply ( ref$r1.e.rfMap, 1, max );
ref.maxresp.i = apply ( ref$r1.i.rfMap, 1, max );
exp.maxresp.e = apply ( exp$r1.e.rfMap, 1, max );
exp.maxresp.i = apply ( exp$r1.i.rfMap, 1, max );
delta.maxresp.e = ( exp.maxresp.e / ref.maxresp.e ) - 1.0;
delta.maxresp.i = ( exp.maxresp.i / ref.maxresp.i ) - 1.0;
ref.maxresp.e[c(iTrim,iFilter.E)] = 0;
ref.maxresp.i[c(iTrim,iFilter.I)] = 0;
exp.maxresp.e[c(iTrim,iFilter.E)] = 0;
exp.maxresp.i[c(iTrim,iFilter.I)] = 0;
delta.maxresp.e[c(iTrim,iFilter.E)] = 0;
delta.maxresp.i[c(iTrim,iFilter.I)] = 0;
ShowVecAsMap2 ( exp.maxresp.e, paste(paste("E-Type RF Mag Resp","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText,
min(exp.maxresp.e), max(exp.maxresp.e) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
ShowVecAsMap2 ( exp.maxresp.i, paste(paste("I-Type RF Mag Resp","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText,
min(exp.maxresp.i), max(exp.maxresp.i) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
ShowVecAsMap2 ( delta.maxresp.e, paste(paste("E-Type % RF Mag Resp Diff","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText,
min(delta.maxresp.e), max(delta.maxresp.e) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
ShowVecAsMap2 ( delta.maxresp.i, paste(paste("I-Type % RF Mag Resp Diff","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText,
min(delta.maxresp.i), max(delta.maxresp.i) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
if ( tiffFlag ) {
dev.off();
} # if ( tiffFlag )
#
# Intracolumnar RF Centroid Divergence
#
if ( tiffFlag ) {
tiff ( paste("Knock", "Divergence", iKnockOutLength, iExpFlag, "tiff", sep="."), compression="lzw", units="in", width=7.0, height=7.0, res=300);
} else {
x11();
} # if ( tiffFlag )
par ( mfcol=c(2,2) );
centroidDelta.ref = RFCentroidDelta ( ref$rfTrackData.e[[length(ref$rfTrackData.e)]], ref$rfTrackData.i[[length(ref$rfTrackData.i)]]);
centroidDelta.exp = RFCentroidDelta ( exp$rfTrackData.e[[length(exp$rfTrackData.e)]], exp$rfTrackData.i[[length(exp$rfTrackData.i)]]);
centroidDelta.ref[c(iTrim,iFilter.E,iFilter.I)] = 0;
centroidDelta.exp[c(iTrim,iFilter.I,iFilter.E)] = 0;
ShowVecAsMap2 ( centroidDelta.ref, paste(paste("Baseline RF Divergence","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText, min(centroidDelta.ref), max(centroidDelta.ref) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
ShowVecAsMap2 ( centroidDelta.exp, paste(paste("Knockout RF Divergence","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText, min(centroidDelta.exp), max(centroidDelta.exp) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
centroidDelta.ref[ centroidDelta.ref > 20 ] = 0.0;
ShowVecAsMap2 ( centroidDelta.ref, paste(paste("Baseline RF Divergence","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText, min(centroidDelta.ref), max(centroidDelta.ref) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
centroidDelta.exp[ centroidDelta.exp > 20 ] = 0.0;
ShowVecAsMap2 ( centroidDelta.exp, paste(paste("Knockout RF Divergence","Iter",irefIter,sep=" "),expTitleText, sep=""), xLabText, yLabText, min(centroidDelta.exp), max(centroidDelta.exp) );
abline ( h = boundaryMarks, lty=3, col=3 );
GenOutline1X ( tmp.x, tmp.y, "white", 1, 0.5 );
if ( tiffFlag ) {
dev.off();
} # if ( tiffFlag )
return ( list ( centroidDelta.e = centroidDelta.e, centroidDelta.i = centroidDelta.i, sizeDelta.e = sizeDelta.e, sizeDelta.i = sizeDelta.i ) );
} # PlotKnockoutRFMap ( ref, exp, refTitleText, expTitleText, iTrim, iFilter.E, iFilter.I, iColList ) {
RFFlyOver = function ( ref, exp, N, iRowParm, iColParm, rfExpZone.x, rfExpZone.y ) {
iRowParm = as.integer ( iRowParm );
iColParm = as.integer ( iColParm );
numColors = 128;
xLabText = "Distal -> Proximal"; yLabText = "Digit 1 -> Digit 3";
graphicalPanelMarks = ( N + 0.5 );
digitBorderMarks = c((N/3)+0.5, (2*N/3)+0.5);
digitBorderMarks = c ( digitBorderMarks, N+digitBorderMarks );
zmin = Inf; zmax = -Inf;
for ( iCol in seq ( iColParm[1], iColParm[2], iColParm[3] ) ) {
for ( iRow in seq ( iRowParm[1], iRowParm[2], iRowParm[3] ) ) {
iCell = GetLin ( iRow, iCol, N );
z = c ( ref$r1.i.rfMap[iCell, ], ref$r1.e.rfMap[iCell, ], exp$r1.i.rfMap[iCell, ], exp$r1.e.rfMap[iCell, ] );
tmin = min ( z ); tmax = max ( z );
if ( tmin < zmin ) { zmin = tmin; }
if ( tmax > zmax ) { zmax = tmax; }
} # for ( iRow in seq ( iRowParm[1], iRowParm[2], iRowParm[3] ) ) {
} # for ( iCol in seq ( iColParm[1], iColParm[2], iColParm[3] ) ) {
zlim = log10 ( c ( zmin, zmax ) );
x11();
saveHTML ( {
for ( iCol in seq ( iColParm[1], iColParm[2], iColParm[3] ) ) {
for ( iRow in seq ( iRowParm[1], iRowParm[2], iRowParm[3] ) ) {
iCell = GetLin ( iRow, iCol, N );
z = log10 ( InterleaveForDisplay ( ref$r1.i.rfMap[iCell, ], ref$r1.e.rfMap[iCell, ],
exp$r1.i.rfMap[iCell, ], exp$r1.e.rfMap[iCell, ] ) );
titleText = paste ( "Cortical Column # ", iCell, sep="" );
image.plot( c(1:(2*N)), c(1:(2*N)), matrix ( (z), nrow=2*N, ncol=2*N, byrow=FALSE ),
col=rainbow(numColors, s = 1.0, v = 1.0, start = 0.67, end = 1.0, alpha = 1 ),
zlim = zlim, main=titleText, xlab=xLabText, ylab=yLabText );
abline(h=digitBorderMarks, col=3, lty=3, lwd=0.5 );
abline(h=graphicalPanelMarks, v=graphicalPanelMarks, col=1, lty=1, lwd=2 );
GenOutline4X( N, rfExpZone.x, rfExpZone.y, TRUE );
} # for ( iRow in seq ( iRowParm[1], iRowParm[2], iRowParm[3] ) ) {
} # for ( iCol in seq ( iColParm[1], iColParm[2], iColParm[3] ) ) {
}, interval = 1 ); # saveGIF
} # RFFlyOver = function ( ref, exp, ... ) {
#
#
RFFlyOverA = function ( ref, exp, N, iRowParm, iColParm, rfExpZone.x, rfExpZone.y, kRFPeakToEdgeDetect ) {
iRowParm = as.integer ( iRowParm );
iColParm = as.integer ( iColParm );
numColors = 128;
xLabText = "Distal -> Proximal"; yLabText = "Digit 1 -> Digit 3";
graphicalPanelMarks = ( N + 0.5 );
digitBorderMarks = c((N/3)+0.5, (2*N/3)+0.5);
digitBorderMarks = c ( digitBorderMarks, N+digitBorderMarks );
x11();
saveHTML ( {
for ( iRow in seq ( iRowParm[1], iRowParm[2], iRowParm[3] ) ) {
for ( iCol in seq ( iColParm[1], iColParm[2], iColParm[3] ) ) {
iCell = GetLin ( iRow, iCol, N );
tmp1 = QuantRFSizeA ( ref$r1.i.rfMap, kRFPeakToEdgeDetect );
t.ref.r1.i = ref$r1.i.rfMap[iCell, ];
t.ref.r1.i[tmp1] = 0.0;
tmp1 = QuantRFSizeA ( ref$r1.e.rfMap, kRFPeakToEdgeDetect );
t.ref.r1.e = ref$r1.e.rfMap[iCell, ];
t.ref.r1.e[tmp1] = 0.0;
tmp1 = QuantRFSizeA ( ref$r1.i.rfMap, kRFPeakToEdgeDetect );
t.exp.r1.i = exp$r1.i.rfMap[iCell, ];
t.exp.r1.i[tmp1] = 0.0;
tmp1 = QuantRFSizeA ( ref$r1.e.rfMap, kRFPeakToEdgeDetect );
t.exp.r1.e = exp$r1.e.rfMap[iCell, ];
t.exp.r1.e[tmp1] = 0.0;
z = InterleaveForDisplay ( t.ref.r1.i, t.ref.r1.e, t.exp.r1.i, t.exp.r1.e );
zlim = c ( min(z), max(z) );
titleText = paste ( "Cortical Column # ", iCell, sep="" );
image.plot( c(1:(2*N)), c(1:(2*N)), matrix ( z, nrow=2*N, ncol=2*N, byrow=FALSE ),
col=rainbow(numColors, s = 1.0, v = 1.0, start = 0.67, end = 1.0, alpha = 1 ),
zlim = zlim, main=titleText, xlab=xLabText, ylab=yLabText );
abline(h=digitBorderMarks, col=3, lty=3, lwd=0.5 );
abline(h=graphicalPanelMarks, v=graphicalPanelMarks, col=1, lty=1, lwd=2 );
GenOutline4X( N, rfExpZone.x, rfExpZone.y, TRUE );
} # for ( iCol in seq ( iColStart, iColEnd, iColStep ) {
} # for ( iRow in seq ( iRowStart, iRowEnd, iRowStep ) {
}, interval = 1 ); # saveGIF
} # A = function ( ref, exp, ... ) {
#
# MAIN
#
#
# Global constants.
#
N = 75;
N2 = N * N;
kRFPeakToEdgeDetect = 0.5;
iTrim = EdgeTrimCellList ( seq ( 1, N2 ), N, 2 ); # Trim the outer-most edges.
boundaryMarks = c ( as.integer(N/3)+0.5, as.integer(N/3)*2+0.5 );
iRFTrackStepSize = 3;
makeMovie = FALSE;
tiffFlag = FALSE;
#
# Detailed parameters describing the knockouts.
#
iBase = iKnockLength = 8;
#
#
#
iKnockOffset = round( ( N / 2 ), 0 ) - 4;
controlTrack = c ( round( ( 2 * N / 3), 0 ), N + 1 - round( N / 6, 0 ) );
rfExpZone.x = c ( iKnockOffset + 1, iKnockOffset + iKnockLength );
rowParmsRFFlyOver = c ( iKnockOffset - 3, iKnockOffset + iKnockLength + 3, 1 );
iRowList = seq ( rfExpZone.x[1]-2, rfExpZone.x[2]+2, 2 );
# Set the directory where the experimental data is sitting.
fDir = paste ( "G:/NMLab/Working/S.45.7.Lesion.", iKnockLength, "/", sep="" );
#fDir = paste ( "F:/NMLab/Working/S.45.7.Lesion.x0.4.", iKnockLength, "/", sep="" );
fDir = paste ( "F:/NMLab/Working/T.75.7.Lesion.x0.2.", iKnockLength, "/", sep="" );
#
# Get the Random Initial RF Map.
#
fRoot = "Base.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
rnet = GetRFMapData2A ( fileRootName, 15, 0, 0, 15, kRFPeakToEdgeDetect, N2 );
#
# Get the Baseline Refinement RF Map.
#
fRoot = "Base.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
base = GetRFMapData2A ( fileRootName, 15, 15, 15, 15, kRFPeakToEdgeDetect, N2 );
##############################################################
##############################################################
#
# 0. Get the Placebo
#
##############################################################
##############################################################
fRoot = "BorderKnockout_Control_Placebo.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 0; iEnd = 0; iStepSize = 1; iExpFlag = 0;
placebo = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
# Set up and do the basic RF Map plots
iColList = c ( controlTrack, as.integer ( N / 6 - 1 ), as.integer ( N / 6 + 2 ) );
rfExpZone.y = c ( round((N/6),0), round((N/6),0)+1 );
rfstats.placebo = PlotKnockoutRFMap ( base, 15, placebo, 15, "\nBaseline Refinement", "\nControl Placebo", iTrim,
NULL, NULL, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/6)-3, as.integer(N/6)+4, 1 );
RFFlyOver ( base, placebo, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
summary ( rfstats.placebo$centroidDelta.e )
summary ( rfstats.placebo$centroidDelta.i )
summary ( rfstats.placebo$sizeDelta.e )
summary ( rfstats.placebo$sizeDelta.i )
##############################################################
##############################################################
#
# 1. Get the Knockout RF Map - CONTROL I Only.
#
##############################################################
##############################################################
fRoot = "BorderKnockout_Control_I.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 1; iEnd = 1; iStepSize = 1; iExpFlag = 1;
test.ctl.I = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
# Set up and do the basic RF Map plots
rfExpZone.y = rfExpZone.y.ctl = c ( round((N/6),0), round((N/6),0)+1 ); # Partially describes the zone that was manipulated.
iColList = c ( controlTrack, as.integer ( N / 6 - 1 ), as.integer ( N / 6 + 2 ) );
itmp = as.integer ( N/6 ) * N + 1 + iKnockOffset;
iFilter.E = NULL;
iFilter.I = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ) );
rfstats.test.ctl.I = PlotKnockoutRFMap ( base, 15, test.ctl.I, 15, "\nBaseline Refinement", "\nControl I Only", iTrim,
iFilter.E, iFilter.I, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/6)-2, as.integer(N/3) + 1, 1 ); # Partially describes the fly-over zone.
RFFlyOver ( base, test.ctl.I, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
summary ( rfstats.test.ctl.I$centroidDelta.e )
summary ( rfstats.test.ctl.I$centroidDelta.i )
summary ( rfstats.test.ctl.I$sizeDelta.e )
summary ( rfstats.test.ctl.I$sizeDelta.i )
##############################################################
##############################################################
#
# 2. Get the Knockout RF Map - CONTROL E Only.
#
##############################################################
##############################################################
fRoot = "BorderKnockout_Control_E.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 2; iEnd = 2; iStepSize = 1; iExpFlag = 2;
test.ctl.E = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
# Set up and do the basic RF Map plots
rfExpZone.y = c ( round((N/6),0), round((N/6),0)+1 ); # Partially describes the zone that was manipulated.
iColList = c ( controlTrack, as.integer ( N / 6 - 1 ), as.integer ( N / 6 + 2 ) );
iFilter.I = NULL;
itmp = as.integer ( N/6 ) * N + 1 + iKnockOffset;
iFilter.E = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ) );
rfstats.test.ctl.E = PlotKnockoutRFMap ( base, 15, test.ctl.E, 15, "\nBaseline Refinement", "\nControl E Only", iTrim,
iFilter.E, iFilter.I, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/6)-2, as.integer(N/3) + 1, 1 ); # Partially describes the fly-over zone.
RFFlyOver ( base, test.ctl.E, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
##############################################################
##############################################################
#
# 3. Get the Knockout RF Map - CONTROL E and I.
#
##############################################################
##############################################################
fRoot = "BorderKnockout_Control_EI.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 3; iEnd = 3; iStepSize = 1; iExpFlag = 3;
test.ctl.EI = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
# Set up and do the basic RF Map plots
rfExpZone.y = c ( round((N/6),0), round((N/6),0)+1 ); # Partially describes the zone that was manipulated.
iColList = c ( controlTrack, as.integer ( N / 6 - 1 ), as.integer ( N / 6 + 2 ) );
itmp = as.integer ( N/6 ) * N + 1 + iKnockOffset;
iFilter.E = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ) );
iFilter.I = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ) );
rfstats.test.ctl.EI = PlotKnockoutRFMap ( base, 15, test.ctl.EI, 15, "\nBaseline Refinement", "\nControl E and I", iTrim,
iFilter.E, iFilter.I, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/6)-2, as.integer(N/3) + 1, 1 ); # Partially describes the fly-over zone.
RFFlyOver ( base, test.ctl.EI, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
##############################################################
##############################################################
#
# 4. Get the Knockout RF Map - ONE-SIDE I.
#
##############################################################
##############################################################
fRoot = "BorderKnockout_OneSide_I.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 4; iEnd = 4; iStepSize = 1; iExpFlag = 4;
test.oneside.I = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
# Set up and do the basic RF Map plots
rfExpZone.y = rfExpZone.y.oneside = c ( round((N/3),0)-1, round((N/3),0)-0 ); # Partially describes the zone that was manipulated.
iColList = c ( controlTrack, 16, 17 ); iColList = c ( controlTrack, 14, 15 );
iColList = c ( controlTrack, as.integer ( N / 3 - 1 ), as.integer ( N / 3 + 0 ) );
iColList = c ( controlTrack, as.integer ( N / 3 - 2 ), as.integer ( N / 3 + 3 ) );
iFilter.E = NULL;
itmp = ( as.integer ( N/3 ) - 2) * N + 1 + iKnockOffset;
iFilter.I = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ) );
rfstats.test.oneside.I = PlotKnockoutRFMap ( base, 15, test.oneside.I, 15, "\nBaseline Refinement", "\nBoundary One Side I Only", iTrim,
iFilter.E, iFilter.I, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/3)-3, as.integer(N/3)+4, 1 ); # Partially describes the fly-over zone.
RFFlyOver ( base, test.oneside.I, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
##############################################################
##############################################################
#
# 5. Get the Knockout RF Map - ONE-SIDE E.
#
##############################################################
##############################################################
fRoot = "BorderKnockout_OneSide_E.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 5; iEnd = 5; iStepSize = 1; iExpFlag = 5;
test.oneside.E = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
# Set up and do the basic RF Map plots
rfExpZone.y = c ( round((N/3),0)-1, round((N/3),0)-0 ); # Partially describes the zone that was manipulated.
iColList = c ( controlTrack, 16, 17 );
iColList = c ( controlTrack, as.integer ( N / 3 - 2 ), as.integer ( N / 3 + 3 ) );
iFilter.I = NULL;
itmp = ( as.integer ( N/3 ) - 2) * N + 1 + iKnockOffset;
iFilter.E = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ) );
rfstats.test.oneside.E = PlotKnockoutRFMap ( base, 15, test.oneside.E, 15, "\nBaseline Refinement", "\nBoundary One Side E Only", iTrim,
iFilter.E, iFilter.I, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Additional longitudinal tracks.
iColList = c ( controlTrack, as.integer ( N / 3 - 2 ), as.integer ( N / 3 + 2 ) );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/3)-3, as.integer(N/3)+4, 1 ); # Partially describes the fly-over zone.
RFFlyOver ( base, test.oneside.E, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
##############################################################
##############################################################
#
# 6. Get the Knockout RF Map - ONE-SIDE E and I.
#
##############################################################
##############################################################
fRoot = "BorderKnockout_OneSide_EI.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 6; iEnd = 6; iStepSize = 1; iExpFlag = 6;
test.oneside.EI = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
# Set up and do the basic RF Map plots
rfExpZone.y = c ( round((N/3),0)-1, round((N/3),0)-0 ); # Partially describes the zone that was manipulated.
iColList = c ( controlTrack, 16, 17 ); iColList = c ( controlTrack, 14, 15 );
iColList = c ( controlTrack, as.integer ( N / 3 - 2 ), as.integer ( N / 3 + 3 ) );
itmp = ( as.integer ( N/3 ) - 2) * N + 1 + iKnockOffset;
iFilter.I = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ) );
iFilter.E = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ) );
rfstats.test.oneside.EI = PlotKnockoutRFMap ( base, 15, test.oneside.EI, 15, "\nBaseline Refinement", "\nBoundary One Side E and I", iTrim,
iFilter.E, iFilter.I, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/3)-3, as.integer(N/3)+4, 1 ); # Partially describes the fly-over zone.
RFFlyOver ( base, test.oneside.EI, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
##############################################################
##############################################################
#
# 7. Get the Knockout RF Map - TWO-SIDE I.
#
##############################################################
##############################################################
fRoot = "BorderKnockout_TwoSide_I.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 7; iEnd = 7; iStepSize = 1; iExpFlag = 7;
test.twoside.I = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
# Set up and do the basic RF Map plots
rfExpZone.y = c ( round((N/3),0)-1, round((N/3),0)+2 ); # Partially describes the zone that was manipulated.
iColList = c ( controlTrack, 16, 17 ); iColList = c ( controlTrack, 14, 15 );
iColList = c ( controlTrack, as.integer ( N / 3 - 2 ), as.integer ( N / 3 + 3 ) );
itmp = ( as.integer ( N/3 ) - 2) * N + 1 + iKnockOffset;
iFilter.E = NULL;
iFilter.I = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ),
seq ( itmp + 2*N, itmp + 2*N + iKnockLength - 1 ), seq ( itmp + 3*N, itmp + 3*N + iKnockLength - 1 ) );
rfstats.test.twoside.I = PlotKnockoutRFMap ( base, 15, test.twoside.I, 15, "\nBaseline Refinement", "\nBoundary Two Side I Only", iTrim,
iFilter.E, iFilter.I, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/3) - (N/6), as.integer(N/3) + (N/6), 1 ); # Partially describes the fly-over zone.
RFFlyOver ( base, test.twoside.I, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
##############################################################
##############################################################
#
# 8. Get the Knockout RF Map - TWO-SIDE E.
#
##############################################################
##############################################################
fRoot = "BorderKnockout_TwoSide_E.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 8; iEnd = 8; iStepSize = 1; iExpFlag = 8;
test.twoside.E = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
# Set up and do the basic RF Map plots
rfExpZone.y = c ( round((N/3),0)-1, round((N/3),0)+2 ); # Partially describes the zone that was manipulated.
iColList = c ( controlTrack, 16, 17 ); iColList = c ( controlTrack, 14, 15 );
iColList = c ( controlTrack, as.integer ( N / 3 - 2 ), as.integer ( N / 3 + 3 ) );
itmp = ( as.integer ( N/3 ) - 2) * N + 1 + iKnockOffset;
iFilter.I = NULL;
iFilter.E = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ),
seq ( itmp + 2*N, itmp + 2*N + iKnockLength - 1 ), seq ( itmp + 3*N, itmp + 3*N + iKnockLength - 1 ) );
rfstats.test.twoside.I = PlotKnockoutRFMap ( base, 15, test.twoside.E, 15, "\nBaseline Refinement", "\nBoundary Two Side E Only", iTrim,
iFilter.E, iFilter.I, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/3) - (N/6), as.integer(N/3) + (N/6), 1 ); # Partially describes the fly-over zone.
RFFlyOver ( base, test.twoside.E, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
##############################################################
##############################################################
#
# 9. Get the Knockout RF Map - TWO-SIDE E and I.
#
##############################################################
##############################################################
fRoot = "BorderKnockout_TwoSide_EI.RFMap";
fileRootName = paste(fDir, fRoot, sep="\\");
iStart = 9; iEnd = 9; iStepSize = 1; iExpFlag = 9;
test.twoside.EI = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, kRFPeakToEdgeDetect, N2 );
test.twoside.EI = GetRFMapData2A ( fileRootName, iBase, iStart, iEnd, iStepSize, 0.1, N2 );
# Set up and do the basic RF Map plots
rfExpZone.y = c ( round((N/3),0)-1, round((N/3),0)+2 ); # Partially describes the zone that was manipulated.
iColList = c ( controlTrack, 16, 17 ); iColList = c ( controlTrack, 14, 15 );
iColList = c ( controlTrack, as.integer ( N / 3 - 2 ), as.integer ( N / 3 + 3 ) );
itmp = ( as.integer ( N/3 ) - 2) * N + 1 + iKnockOffset;
iFilter.E = iFilter.I = c ( seq ( itmp, itmp + iKnockLength - 1 ), seq ( itmp + N, itmp + N + iKnockLength - 1 ),
seq ( itmp + 2*N, itmp + 2*N + iKnockLength - 1 ), seq ( itmp + 3*N, itmp + 3*N + iKnockLength - 1 ) );
rfstats.test.twoside.I = PlotKnockoutRFMap ( base, 15, test.twoside.EI, 15, "\nBaseline Refinement", "\nBoundary Two Side E and I", iTrim,
iFilter.E, iFilter.I, iRowList, iColList, rfExpZone.x, rfExpZone.y, tiffFlag, iKnockLength, iExpFlag );
# Set up and do the RF "fly over"
if ( makeMovie ) {
colParmsRFFlyOver = c ( as.integer(N/3) - (N/6), as.integer(N/3) + (N/6), 1 ); # Partially describes the fly-over zone.
RFFlyOver ( base, test.twoside.EI, N, rowParmsRFFlyOver, colParmsRFFlyOver, rfExpZone.x, rfExpZone.y );
} # if ( makeMovie )
|
# write a vcf file from a tidy data frame
#' @name write_vcf
#' @title Used internally in stackr to write a vcf file from a tidy
#' data frame
#' @description Write a vcf file from a tidy data frame.
#' Used internally in \href{https://github.com/thierrygosselin/stackr}{stackr}
#' and might be of interest for users.
#'
#' @param data A file in the working directory or object in the global environment
#' in wide or long (tidy) formats. See details for more info.
#' @param pop.info (optional, logical) Should the population information be
#' included in the FORMAT field (along the GT info for each samples ?). To make
#' the VCF population-ready use \code{pop.info = TRUE}. The populatio information
#' must be included in the \code{POP_ID} column of the tidy dataset.
#' Default: \code{pop.info = FALSE}.
#' @param filename (optional) The file name prefix for the vcf file
#' written to the working directory. With default: \code{filename = NULL},
#' the date and time is appended to \code{stackr_vcf_file_}.
#' @details \strong{Input data:}
#'
#' To discriminate the long from the wide format,
#' the function \pkg{stackr} \code{\link[stackr]{read_long_tidy_wide}} searches
#' for \code{MARKERS or LOCUS} in column names (TRUE = long format).
#' The data frame is tab delimitted.
#' \strong{Wide format:}
#' The wide format cannot store metadata info.
#' The wide format starts with these 2 id columns:
#' \code{INDIVIDUALS}, \code{POP_ID} (that refers to any grouping of individuals),
#' the remaining columns are the markers in separate columns storing genotypes.
#'
#' \strong{Long/Tidy format:}
#' The long format is considered to be a tidy data frame and can store metadata info.
#' (e.g. from a VCF see \pkg{stackr} \code{\link{tidy_genomic_data}}). A minimum of 4 columns
#' are required in the long format: \code{INDIVIDUALS}, \code{POP_ID},
#' \code{MARKERS or LOCUS} and \code{GENOTYPE or GT}. The rest are considered metata info.
#'
#' \strong{2 genotypes formats are available:}
#' 6 characters no separator: e.g. \code{001002 of 111333} (for heterozygote individual).
#' 6 characters WITH separator: e.g. \code{001/002 of 111/333} (for heterozygote individual).
#' The separator can be any of these: \code{"/", ":", "_", "-", "."}.
#'
#' \emph{How to get a tidy data frame ?}
#' \pkg{stackr} \code{\link{tidy_genomic_data}} can transform 6 genomic data formats
#' in a tidy data frame.
#' @export
#' @rdname write_vcf
#' @import reshape2
#' @import dplyr
#' @import stringi
#' @importFrom data.table fread
#' @references Danecek P, Auton A, Abecasis G et al. (2011)
#' The variant call format and VCFtools.
#' Bioinformatics, 27, 2156-2158.
#' @author Thierry Gosselin \email{thierrygosselin@@icloud.com}
write_vcf <- function(data, pop.info = FALSE, filename = NULL) {
# Import data ---------------------------------------------------------------
input <- stackr::read_long_tidy_wide(data = data, import.metadata = TRUE)
colnames(input) <- stri_replace_all_fixed(str = colnames(input),
pattern = "GENOTYPE",
replacement = "GT",
vectorize_all = FALSE)
# REF/ALT Alleles and VCF genotype format ------------------------------------
if (!tibble::has_name(input, "GT_VCF")) {
ref.alt.alleles.change <- ref_alt_alleles(data = input)
input <- left_join(input, ref.alt.alleles.change, by = c("MARKERS", "INDIVIDUALS"))
}
# Include CHROM, LOCUS, POS --------------------------------------------------
if (!tibble::has_name(input, "CHROM")) {
input <- mutate(
.data = input,
CHROM = rep("1", n()),
LOCUS = MARKERS,
POS = MARKERS
)
}
# Remove the POP_ID column ---------------------------------------------------
if (tibble::has_name(input, "POP_ID") & (!pop.info)) {
input <- select(.data = input, -POP_ID)
}
# Info field -----------------------------------------------------------------
info.field <- suppressWarnings(
input %>%
group_by(MARKERS) %>%
filter(GT_VCF != "./.") %>%
tally %>%
mutate(INFO = stri_paste("NS=", n, sep = "")) %>%
select(-n)
)
# VCF body ------------------------------------------------------------------
GT_VCF_POP_ID <- NULL
if (pop.info) {
output <- suppressWarnings(
left_join(input, info.field, by = "MARKERS") %>%
select(MARKERS, CHROM, LOCUS, POS, REF, ALT, INFO, INDIVIDUALS, GT_VCF, POP_ID) %>%
mutate(GT_VCF_POP_ID = stri_paste(GT_VCF, POP_ID, sep = ":")) %>%
select(-c(GT_VCF, POP_ID)) %>%
group_by(MARKERS, CHROM, LOCUS, POS, INFO, REF, ALT) %>%
tidyr::spread(data = ., key = INDIVIDUALS, value = GT_VCF_POP_ID) %>%
ungroup() %>%
mutate(
QUAL = rep(".", n()),
FILTER = rep("PASS", n()),
FORMAT = rep("GT:POP", n())
)
)
} else {
output <- suppressWarnings(
left_join(input, info.field, by = "MARKERS") %>%
select(MARKERS, CHROM, LOCUS, POS, REF, ALT, INDIVIDUALS, GT_VCF, INFO) %>%
group_by(MARKERS, CHROM, LOCUS, POS, INFO, REF, ALT) %>%
tidyr::spread(data = ., key = INDIVIDUALS, value = GT_VCF) %>%
ungroup() %>%
mutate(
QUAL = rep(".", n()),
FILTER = rep("PASS", n()),
FORMAT = rep("GT", n())
)
)
}
output <- output %>%
arrange(CHROM, LOCUS, POS) %>%
ungroup() %>%
select(-MARKERS) %>%
select('#CHROM' = CHROM, POS, ID = LOCUS, REF, ALT, QUAL, FILTER, INFO, FORMAT, everything())
# Filename ------------------------------------------------------------------
if (is.null(filename)) {
# Get date and time to have unique filenaming
file.date <- stri_replace_all_fixed(Sys.time(), pattern = " EDT", replacement = "", vectorize_all = FALSE)
file.date <- stri_replace_all_fixed(file.date, pattern = c("-", " ", ":"), replacement = c("", "@", ""), vectorize_all = FALSE)
file.date <- stri_sub(file.date, from = 1, to = 13)
filename <- stri_paste("stackr_vcf_file_", file.date, ".vcf")
} else {
filename <- stri_paste(filename, ".vcf")
}
# File format ----------------------------------------------------------------
write_delim(x = data_frame("##fileformat=VCFv4.2"), path = filename, delim = " ", append = FALSE, col_names = FALSE)
# File date ------------------------------------------------------------------
file.date <- stri_replace_all_fixed(Sys.Date(), pattern = "-", replacement = "")
file.date <- stri_paste("##fileDate=", file.date, sep = "")
write_delim(x = data_frame(file.date), path = filename, delim = " ", append = TRUE, col_names = FALSE)
# Source ---------------------------------------------------------------------
write_delim(x = data_frame(stri_paste("##source=stackr_v.", utils::packageVersion("stackr"))), path = filename, delim = " ", append = TRUE, col_names = FALSE)
# Info field 1 ---------------------------------------------------------------
info1 <- as.data.frame('##INFO=<ID=NS,Number=1,Type=Integer,Description=\"Number of Samples With Data\">')
utils::write.table(x = info1, file = filename, sep = " ", append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
# Format field 1 -------------------------------------------------------------
format1 <- '##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">'
format1 <- as.data.frame(format1)
utils::write.table(x = format1, file = filename, sep = " ", append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
# Format field 2 ---------------------------------------------------------------
if (pop.info) {
format2 <- as.data.frame('##FORMAT=<ID=POP_ID,Number=1,Type=Character,Description="Population identification of Sample">')
utils::write.table(x = format2, file = filename, sep = " ", append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
}
# Write the prunned vcf to the file ------------------------------------------
suppressWarnings(write_tsv(x = output, path = filename, append = TRUE, col_names = TRUE))
} # end write_vcf
| /R/write_vcf.R | no_license | caitiecollins/stackr | R | false | false | 8,251 | r | # write a vcf file from a tidy data frame
#' @name write_vcf
#' @title Used internally in stackr to write a vcf file from a tidy
#' data frame
#' @description Write a vcf file from a tidy data frame.
#' Used internally in \href{https://github.com/thierrygosselin/stackr}{stackr}
#' and might be of interest for users.
#'
#' @param data A file in the working directory or object in the global environment
#' in wide or long (tidy) formats. See details for more info.
#' @param pop.info (optional, logical) Should the population information be
#' included in the FORMAT field (along the GT info for each samples ?). To make
#' the VCF population-ready use \code{pop.info = TRUE}. The populatio information
#' must be included in the \code{POP_ID} column of the tidy dataset.
#' Default: \code{pop.info = FALSE}.
#' @param filename (optional) The file name prefix for the vcf file
#' written to the working directory. With default: \code{filename = NULL},
#' the date and time is appended to \code{stackr_vcf_file_}.
#' @details \strong{Input data:}
#'
#' To discriminate the long from the wide format,
#' the function \pkg{stackr} \code{\link[stackr]{read_long_tidy_wide}} searches
#' for \code{MARKERS or LOCUS} in column names (TRUE = long format).
#' The data frame is tab delimitted.
#' \strong{Wide format:}
#' The wide format cannot store metadata info.
#' The wide format starts with these 2 id columns:
#' \code{INDIVIDUALS}, \code{POP_ID} (that refers to any grouping of individuals),
#' the remaining columns are the markers in separate columns storing genotypes.
#'
#' \strong{Long/Tidy format:}
#' The long format is considered to be a tidy data frame and can store metadata info.
#' (e.g. from a VCF see \pkg{stackr} \code{\link{tidy_genomic_data}}). A minimum of 4 columns
#' are required in the long format: \code{INDIVIDUALS}, \code{POP_ID},
#' \code{MARKERS or LOCUS} and \code{GENOTYPE or GT}. The rest are considered metata info.
#'
#' \strong{2 genotypes formats are available:}
#' 6 characters no separator: e.g. \code{001002 of 111333} (for heterozygote individual).
#' 6 characters WITH separator: e.g. \code{001/002 of 111/333} (for heterozygote individual).
#' The separator can be any of these: \code{"/", ":", "_", "-", "."}.
#'
#' \emph{How to get a tidy data frame ?}
#' \pkg{stackr} \code{\link{tidy_genomic_data}} can transform 6 genomic data formats
#' in a tidy data frame.
#' @export
#' @rdname write_vcf
#' @import reshape2
#' @import dplyr
#' @import stringi
#' @importFrom data.table fread
#' @references Danecek P, Auton A, Abecasis G et al. (2011)
#' The variant call format and VCFtools.
#' Bioinformatics, 27, 2156-2158.
#' @author Thierry Gosselin \email{thierrygosselin@@icloud.com}
write_vcf <- function(data, pop.info = FALSE, filename = NULL) {
# Import data ---------------------------------------------------------------
input <- stackr::read_long_tidy_wide(data = data, import.metadata = TRUE)
colnames(input) <- stri_replace_all_fixed(str = colnames(input),
pattern = "GENOTYPE",
replacement = "GT",
vectorize_all = FALSE)
# REF/ALT Alleles and VCF genotype format ------------------------------------
if (!tibble::has_name(input, "GT_VCF")) {
ref.alt.alleles.change <- ref_alt_alleles(data = input)
input <- left_join(input, ref.alt.alleles.change, by = c("MARKERS", "INDIVIDUALS"))
}
# Include CHROM, LOCUS, POS --------------------------------------------------
if (!tibble::has_name(input, "CHROM")) {
input <- mutate(
.data = input,
CHROM = rep("1", n()),
LOCUS = MARKERS,
POS = MARKERS
)
}
# Remove the POP_ID column ---------------------------------------------------
if (tibble::has_name(input, "POP_ID") & (!pop.info)) {
input <- select(.data = input, -POP_ID)
}
# Info field -----------------------------------------------------------------
info.field <- suppressWarnings(
input %>%
group_by(MARKERS) %>%
filter(GT_VCF != "./.") %>%
tally %>%
mutate(INFO = stri_paste("NS=", n, sep = "")) %>%
select(-n)
)
# VCF body ------------------------------------------------------------------
GT_VCF_POP_ID <- NULL
if (pop.info) {
output <- suppressWarnings(
left_join(input, info.field, by = "MARKERS") %>%
select(MARKERS, CHROM, LOCUS, POS, REF, ALT, INFO, INDIVIDUALS, GT_VCF, POP_ID) %>%
mutate(GT_VCF_POP_ID = stri_paste(GT_VCF, POP_ID, sep = ":")) %>%
select(-c(GT_VCF, POP_ID)) %>%
group_by(MARKERS, CHROM, LOCUS, POS, INFO, REF, ALT) %>%
tidyr::spread(data = ., key = INDIVIDUALS, value = GT_VCF_POP_ID) %>%
ungroup() %>%
mutate(
QUAL = rep(".", n()),
FILTER = rep("PASS", n()),
FORMAT = rep("GT:POP", n())
)
)
} else {
output <- suppressWarnings(
left_join(input, info.field, by = "MARKERS") %>%
select(MARKERS, CHROM, LOCUS, POS, REF, ALT, INDIVIDUALS, GT_VCF, INFO) %>%
group_by(MARKERS, CHROM, LOCUS, POS, INFO, REF, ALT) %>%
tidyr::spread(data = ., key = INDIVIDUALS, value = GT_VCF) %>%
ungroup() %>%
mutate(
QUAL = rep(".", n()),
FILTER = rep("PASS", n()),
FORMAT = rep("GT", n())
)
)
}
output <- output %>%
arrange(CHROM, LOCUS, POS) %>%
ungroup() %>%
select(-MARKERS) %>%
select('#CHROM' = CHROM, POS, ID = LOCUS, REF, ALT, QUAL, FILTER, INFO, FORMAT, everything())
# Filename ------------------------------------------------------------------
if (is.null(filename)) {
# Get date and time to have unique filenaming
file.date <- stri_replace_all_fixed(Sys.time(), pattern = " EDT", replacement = "", vectorize_all = FALSE)
file.date <- stri_replace_all_fixed(file.date, pattern = c("-", " ", ":"), replacement = c("", "@", ""), vectorize_all = FALSE)
file.date <- stri_sub(file.date, from = 1, to = 13)
filename <- stri_paste("stackr_vcf_file_", file.date, ".vcf")
} else {
filename <- stri_paste(filename, ".vcf")
}
# File format ----------------------------------------------------------------
write_delim(x = data_frame("##fileformat=VCFv4.2"), path = filename, delim = " ", append = FALSE, col_names = FALSE)
# File date ------------------------------------------------------------------
file.date <- stri_replace_all_fixed(Sys.Date(), pattern = "-", replacement = "")
file.date <- stri_paste("##fileDate=", file.date, sep = "")
write_delim(x = data_frame(file.date), path = filename, delim = " ", append = TRUE, col_names = FALSE)
# Source ---------------------------------------------------------------------
write_delim(x = data_frame(stri_paste("##source=stackr_v.", utils::packageVersion("stackr"))), path = filename, delim = " ", append = TRUE, col_names = FALSE)
# Info field 1 ---------------------------------------------------------------
info1 <- as.data.frame('##INFO=<ID=NS,Number=1,Type=Integer,Description=\"Number of Samples With Data\">')
utils::write.table(x = info1, file = filename, sep = " ", append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
# Format field 1 -------------------------------------------------------------
format1 <- '##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">'
format1 <- as.data.frame(format1)
utils::write.table(x = format1, file = filename, sep = " ", append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
# Format field 2 ---------------------------------------------------------------
if (pop.info) {
format2 <- as.data.frame('##FORMAT=<ID=POP_ID,Number=1,Type=Character,Description="Population identification of Sample">')
utils::write.table(x = format2, file = filename, sep = " ", append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
}
# Write the prunned vcf to the file ------------------------------------------
suppressWarnings(write_tsv(x = output, path = filename, append = TRUE, col_names = TRUE))
} # end write_vcf
|
library(shiny)
# UI Interfaz de Usuario #########################################################
ui <- fluidPage(
# Título de la App #############################################################
titlePanel('"Automatic Text Summarization" de textos legales'),
# Layout de la App #############################################################
sidebarLayout(
# Panel lateral de la App. Inputs. ###########################################
sidebarPanel(
helpText(h3("Selecciona el texto legal para resumir")),
# Multiple choice
selectInput(inputId = "select",
label = h3("Select box"),
choices = list("BOE-A-1994-26003-consolidado_LAU.pdf" = "../data/BOE-A-1994-26003-consolidado_LAU.pdf"),
selected = "data/BOE-A-1994-26003-consolidado_LAU.pdf"),
),
# End. Panel lateral de la App. Inputs. ######################################
# Panel principal de la App. Outputs. ########################################
mainPanel(
textOutput(outputId = "texto"))
# End. Panel principal de la App. Outputs. ###################################
)
# End. Layout de la App ########################################################
)
# Servidor de la App #############################################################
server <- function(input, output) {
output$texto <- renderText({
fulltext::ft_extract(input$select)$data
})
}
# End. Servidor de la App #########################################################
# Ejecución de la App #############################################################
shinyApp(ui = ui, server = server) | /shiny/app.R | no_license | japellaniz/NLP_Lectura_Facil | R | false | false | 1,681 | r | library(shiny)
# UI Interfaz de Usuario #########################################################
ui <- fluidPage(
# Título de la App #############################################################
titlePanel('"Automatic Text Summarization" de textos legales'),
# Layout de la App #############################################################
sidebarLayout(
# Panel lateral de la App. Inputs. ###########################################
sidebarPanel(
helpText(h3("Selecciona el texto legal para resumir")),
# Multiple choice
selectInput(inputId = "select",
label = h3("Select box"),
choices = list("BOE-A-1994-26003-consolidado_LAU.pdf" = "../data/BOE-A-1994-26003-consolidado_LAU.pdf"),
selected = "data/BOE-A-1994-26003-consolidado_LAU.pdf"),
),
# End. Panel lateral de la App. Inputs. ######################################
# Panel principal de la App. Outputs. ########################################
mainPanel(
textOutput(outputId = "texto"))
# End. Panel principal de la App. Outputs. ###################################
)
# End. Layout de la App ########################################################
)
# Servidor de la App #############################################################
server <- function(input, output) {
output$texto <- renderText({
fulltext::ft_extract(input$select)$data
})
}
# End. Servidor de la App #########################################################
# Ejecución de la App #############################################################
shinyApp(ui = ui, server = server) |
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 839
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 839
c
c Input Parameter (command line, file):
c input filename QBFLIB/Tentrup/toy/mvs8n.unsat.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 304
c no.of clauses 839
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 839
c
c QBFLIB/Tentrup/toy/mvs8n.unsat.qdimacs 304 839 E1 [] 0 7 297 839 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Tentrup/toy/mvs8n.unsat/mvs8n.unsat.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 597 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 839
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 839
c
c Input Parameter (command line, file):
c input filename QBFLIB/Tentrup/toy/mvs8n.unsat.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 304
c no.of clauses 839
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 839
c
c QBFLIB/Tentrup/toy/mvs8n.unsat.qdimacs 304 839 E1 [] 0 7 297 839 NONE
|
library(testthat)
library(grid)
library(gridtext)
test_check("gridtext")
| /tests/testthat.R | permissive | wilkelab/gridtext | R | false | false | 74 | r | library(testthat)
library(grid)
library(gridtext)
test_check("gridtext")
|
#
# DATA 608 - Assignment 3 - Question 2
#
library(tidyverse)
library(shiny)
library(plotly)
library(rsconnect)
data <- read_csv("cleaned-cdc-mortality-1999-2010-2.csv")
avgs <- data %>% dplyr::mutate(pop_w = Population * Crude.Rate) %>%
dplyr::group_by(Year, ICD.Chapter) %>%
dplyr::summarise(natl_avg = round(sum(pop_w)/sum(Population),1))
df2 <- data %>% left_join(avgs, by = c("Year","ICD.Chapter"))
ui <- fluidPage(
titlePanel("Longitudinal change in U.S. state death rates by cause of death, 1999-2010"),
sidebarPanel(
print("Data source: WONDER, Centers for Disease Control and Prevention, U.S. Department of Health and Human Services"),
width = 12
),
sidebarPanel(
selectizeInput(inputId = "choice1", label = "Cause of death", choices = unique(df2$ICD.Chapter)),
selectizeInput(inputId = "choice2", label = "States", choices = unique(df2$State), multiple = TRUE, selected = "AL"),
checkboxInput(inputId = "USA", label = strong("National Average (Black Dash)"), value = TRUE),
plotlyOutput(outputId = "line"),
width = 12
)
)
server <- function(input, output) {
df_filter <- reactive(df2 %>%
dplyr::filter(ICD.Chapter == input$choice1) %>%
dplyr::filter(State %in% input$choice2))
output$line <- renderPlotly({
xlab <- list(title = "Year")
ylab <- list(title = "Crude death rate (per 100K population)")
fig <- plot_ly(df_filter(), x = ~Year, y = ~Crude.Rate, color = ~State, type = "scatter", mode = "lines") %>% layout(xaxis = xlab, yaxis = ylab, showlegend = TRUE)
if(input$USA) {
(fig %>% add_trace(y = ~natl_avg, name = "National Average", mode = "lines", line = list(color = "black", width = 3, dash = "dash"), showlegend = FALSE))
}
else {
(fig)
}
})
}
shinyApp(ui = ui, server = server)
| /crosemond_data608_hw3_q2.R | no_license | chrosemo/data608 | R | false | false | 1,891 | r | #
# DATA 608 - Assignment 3 - Question 2
#
library(tidyverse)
library(shiny)
library(plotly)
library(rsconnect)
data <- read_csv("cleaned-cdc-mortality-1999-2010-2.csv")
avgs <- data %>% dplyr::mutate(pop_w = Population * Crude.Rate) %>%
dplyr::group_by(Year, ICD.Chapter) %>%
dplyr::summarise(natl_avg = round(sum(pop_w)/sum(Population),1))
df2 <- data %>% left_join(avgs, by = c("Year","ICD.Chapter"))
ui <- fluidPage(
titlePanel("Longitudinal change in U.S. state death rates by cause of death, 1999-2010"),
sidebarPanel(
print("Data source: WONDER, Centers for Disease Control and Prevention, U.S. Department of Health and Human Services"),
width = 12
),
sidebarPanel(
selectizeInput(inputId = "choice1", label = "Cause of death", choices = unique(df2$ICD.Chapter)),
selectizeInput(inputId = "choice2", label = "States", choices = unique(df2$State), multiple = TRUE, selected = "AL"),
checkboxInput(inputId = "USA", label = strong("National Average (Black Dash)"), value = TRUE),
plotlyOutput(outputId = "line"),
width = 12
)
)
server <- function(input, output) {
df_filter <- reactive(df2 %>%
dplyr::filter(ICD.Chapter == input$choice1) %>%
dplyr::filter(State %in% input$choice2))
output$line <- renderPlotly({
xlab <- list(title = "Year")
ylab <- list(title = "Crude death rate (per 100K population)")
fig <- plot_ly(df_filter(), x = ~Year, y = ~Crude.Rate, color = ~State, type = "scatter", mode = "lines") %>% layout(xaxis = xlab, yaxis = ylab, showlegend = TRUE)
if(input$USA) {
(fig %>% add_trace(y = ~natl_avg, name = "National Average", mode = "lines", line = list(color = "black", width = 3, dash = "dash"), showlegend = FALSE))
}
else {
(fig)
}
})
}
shinyApp(ui = ui, server = server)
|
# uses the built in faithful dataset which is the observations of the old faithful
# geyser in yellowstone national park!
head(faithful)
duration = faithful$eruptions
duration
range(duration)
# set break point for the scatter plot
breaks = seq(1.5,5.5,by=0.5)
breaks
# so nextclassify the eruption urations according to the half unit sub intervals with cut
duration.cut = cut(duration, breaks, right=FALSE)
duration.freq = table(duration.cut)
duration.freq
# so waiting period is same
waiting = faithful$waiting
range(waiting)
breaks2 = seq(42, 96,by=4)
waiting.cut = cut(waiting, breaks2, right=FALSE)
waiting.freq = table(waiting.cut)
waiting.freq
waiting.most = max(waiting.freq)
str(waiting.freq)
# can also plot a histogram which is pretty cool!
hist(duration, right=FALSE, main='Old faithful eruptions', xlab='Duration minutes')
hist(waiting)
# so relative frequency is just normalized by the sample size
duration.relfreq = duration.freq / nrow(faithful)
waiting.relfreq = waiting.freq / nrow(faithful)
# you can find the cumulative frequeny by th cumsum function
duration.cumfreq = cumsum(duration.freq)
# next you can plot the graph of the cumulative frequency
# add a zero element then plot the graph
cumfreq0 = c(0, cumsum(duration.freq))
plot(breaks, cumfreq0,
main = "Old faithful eruptions",
xlab="Duration minutes",
ylab="Cumulative eruptions")
# then join the points with lines
lines(breaks, cumfreq0)
# it is really really cool how easy and useful most of the graphs in R are.
# the producitivty of R is seriously great in this domain compared to python
# I do think I might be able to be convinced to switch to some extent
# which is amazing!
# can build an interpolation function with a cumulative distibution function ecdf
fn = ecdf(duration)
plot(fn)
# which is cool, and really easy that!
# there are also stem and leaf plots...
# these are trivially created with the stem fucntion
stem(duration)
# a scatter plot is also trivial to impleent here
# just a plot with two input arguments
plot(duration, waiting, xlab='Eruption Duration', ylab='Time waited')
# and you can plot a linear regression with the lm functoin and a line
# between them with the abline
# - R is truly amazing for this but OTOH, Julia can call all r_libraries
# perfectly so really there is no disadvantage there particularly, except
# presumably around interoperability?
abline(lm(waiting ~ duration))
| /quantitative_data.R | no_license | BerenMillidge/R_tutorials | R | false | false | 2,424 | r | # uses the built in faithful dataset which is the observations of the old faithful
# geyser in yellowstone national park!
head(faithful)
duration = faithful$eruptions
duration
range(duration)
# set break point for the scatter plot
breaks = seq(1.5,5.5,by=0.5)
breaks
# so nextclassify the eruption urations according to the half unit sub intervals with cut
duration.cut = cut(duration, breaks, right=FALSE)
duration.freq = table(duration.cut)
duration.freq
# so waiting period is same
waiting = faithful$waiting
range(waiting)
breaks2 = seq(42, 96,by=4)
waiting.cut = cut(waiting, breaks2, right=FALSE)
waiting.freq = table(waiting.cut)
waiting.freq
waiting.most = max(waiting.freq)
str(waiting.freq)
# can also plot a histogram which is pretty cool!
hist(duration, right=FALSE, main='Old faithful eruptions', xlab='Duration minutes')
hist(waiting)
# so relative frequency is just normalized by the sample size
duration.relfreq = duration.freq / nrow(faithful)
waiting.relfreq = waiting.freq / nrow(faithful)
# you can find the cumulative frequeny by th cumsum function
duration.cumfreq = cumsum(duration.freq)
# next you can plot the graph of the cumulative frequency
# add a zero element then plot the graph
cumfreq0 = c(0, cumsum(duration.freq))
plot(breaks, cumfreq0,
main = "Old faithful eruptions",
xlab="Duration minutes",
ylab="Cumulative eruptions")
# then join the points with lines
lines(breaks, cumfreq0)
# it is really really cool how easy and useful most of the graphs in R are.
# the producitivty of R is seriously great in this domain compared to python
# I do think I might be able to be convinced to switch to some extent
# which is amazing!
# can build an interpolation function with a cumulative distibution function ecdf
fn = ecdf(duration)
plot(fn)
# which is cool, and really easy that!
# there are also stem and leaf plots...
# these are trivially created with the stem fucntion
stem(duration)
# a scatter plot is also trivial to impleent here
# just a plot with two input arguments
plot(duration, waiting, xlab='Eruption Duration', ylab='Time waited')
# and you can plot a linear regression with the lm functoin and a line
# between them with the abline
# - R is truly amazing for this but OTOH, Julia can call all r_libraries
# perfectly so really there is no disadvantage there particularly, except
# presumably around interoperability?
abline(lm(waiting ~ duration))
|
# EMF FUTURES ##################################################################
## Installing and loading packages =============================================
# install.packages("urca")
# install.packages("egcm")
# install.packages("EnvStats")
# install.packages("tidyverse")
# install.packages("pacman")
pacman::p_load(
pacman,
dplyr,
tidyr,
stringr,
lubridate,
httr,
ggvis,
ggplot2,
shiny,
rio,
rmarkdown,
tibble
)
## Setting working directory ===================================================
getwd()
setwd("C:/Users/juanf/OneDrive/GitRepos/emf_futures/data")
## Importing data ==============================================================
# rm(list = ls())
dat <- as_tibble(read.csv("MainData01_CSV.csv"))
# dat <- read.csv("MainData01_CSV.csv")
# names(dat)
# head(dat, n = 5)
# attach(dat)
## Data cleaning and sorting ===================================================
dat$Dates <- as.Date(dat$Dates, "%m/%d/%Y") # Convert char into dates
dat <- dat[order(dat$Dates, decreasing = FALSE),] # Sort by column(s)
## Creating key variables ======================================================
# Calculating lagged values
dat$FMEMAdjPriceLag1 <- lag(dat$FMEMAdjPrice)
dat$EMFAdjPriceLag1 <- lag(dat$EMFAdjPrice)
dat$VWOAdjPriceLag1 <- lag(dat$VWOAdjPrice)
dat$FTSEPriceLag1 <- lag(dat$FTSEPrice)
# Calculating returns - daily
dat$emf_dret <- (dat$EMFAdjPrice / dat$EMFAdjPriceLag1) - 1
dat$fmem_dret <- (dat$FMEMAdjPrice / dat$FMEMAdjPriceLag1) - 1
dat$vwo_dret <- (dat$VWOAdjPrice / dat$VWOAdjPriceLag1) - 1
dat$ftse_dret <- (dat$FTSEPrice / dat$FTSEPriceLag1) - 1
# Subsetting data to keep relevant variables
keepvars <- c(
"emf_dret",
"fmem_dret",
"vwo_dret",
"ftse_dret"
)
dat2 <- dat[keepvars]
dat2 <- na.omit(dat2)
## Summary statistics of key variables =========================================
head(dat2)
summary(dat2)
plot(dat2)
## Calculating the Hedge Ratio =================================================
# (Table 3 - Daily)
hedge.ratio <- function(xvar, yvar){
cov(xvar, yvar) / var(xvar)
}
hr_emf_vwo <- hedge.ratio(dat2$emf_dret, dat2$vwo_dret)
hr_emf_ftse <- hedge.ratio(dat2$emf_dret, dat2$ftse_dret)
hr_fmem_vwo <- hedge.ratio(dat2$fmem_dret, dat2$vwo_dret)
hr_fmem_ftse <- hedge.ratio(dat2$fmem_dret, dat2$ftse_dret)
hr_vwo_ftse <- hedge.ratio(dat2$vwo_dret, dat2$ftse_dret)
## Calculating the hedging effectiveness =======================================
# (Table 12 - OLS Daily)
hedge.effec <- function(xvar, yvar, hr_value){
1 - (var(yvar) + hr_value^2 * var(xvar) - 2*hr_value * cov(xvar, yvar))/
var(yvar)
}
he_emf_vwo <- hedge.effec(dat2$emf_dret, dat2$vwo_dret, hr_emf_vwo)
he_emf_ftse <- hedge.effec(dat2$emf_dret, dat2$ftse_dret, hr_emf_ftse)
he_fmem_vwo <- hedge.effec(dat2$fmem_dret, dat2$vwo_dret, hr_fmem_vwo)
he_fmem_ftse <- hedge.effec(dat2$fmem_dret, dat2$ftse_dret, hr_fmem_ftse)
he_vwo_ftse <- hedge.effec(dat2$vwo_dret, dat2$ftse_dret, hr_vwo_ftse)
## Calculating the mean of the hedged position =================================
# (Table 4)
mean.hedged <- function(fut, spot, hr){
mean(spot) + hr*mean(fut)
}
mh_emf_vwo <- mean.hedged(dat2$emf_dret, dat2$vwo_dret, hr_emf_vwo)
mh_emf_ftse <- mean.hedged(dat2$emf_dret, dat2$ftse_dret, hr_emf_ftse)
mh_fmem_vwo <- mean.hedged(dat2$fmem_dret, dat2$vwo_dret, hr_fmem_vwo)
mh_fmem_ftse <- mean.hedged(dat2$fmem_dret, dat2$ftse_dret, hr_fmem_ftse)
mh_vwo_ftse <- mean.hedged(dat2$vwo_dret, dat2$ftse_dret, hr_vwo_ftse)
| /code/hedge_ratio.r | no_license | jfcabrera7/emf_futures | R | false | false | 3,529 | r | # EMF FUTURES ##################################################################
## Installing and loading packages =============================================
# install.packages("urca")
# install.packages("egcm")
# install.packages("EnvStats")
# install.packages("tidyverse")
# install.packages("pacman")
pacman::p_load(
pacman,
dplyr,
tidyr,
stringr,
lubridate,
httr,
ggvis,
ggplot2,
shiny,
rio,
rmarkdown,
tibble
)
## Setting working directory ===================================================
getwd()
setwd("C:/Users/juanf/OneDrive/GitRepos/emf_futures/data")
## Importing data ==============================================================
# rm(list = ls())
dat <- as_tibble(read.csv("MainData01_CSV.csv"))
# dat <- read.csv("MainData01_CSV.csv")
# names(dat)
# head(dat, n = 5)
# attach(dat)
## Data cleaning and sorting ===================================================
dat$Dates <- as.Date(dat$Dates, "%m/%d/%Y") # Convert char into dates
dat <- dat[order(dat$Dates, decreasing = FALSE),] # Sort by column(s)
## Creating key variables ======================================================
# Calculating lagged values
dat$FMEMAdjPriceLag1 <- lag(dat$FMEMAdjPrice)
dat$EMFAdjPriceLag1 <- lag(dat$EMFAdjPrice)
dat$VWOAdjPriceLag1 <- lag(dat$VWOAdjPrice)
dat$FTSEPriceLag1 <- lag(dat$FTSEPrice)
# Calculating returns - daily
dat$emf_dret <- (dat$EMFAdjPrice / dat$EMFAdjPriceLag1) - 1
dat$fmem_dret <- (dat$FMEMAdjPrice / dat$FMEMAdjPriceLag1) - 1
dat$vwo_dret <- (dat$VWOAdjPrice / dat$VWOAdjPriceLag1) - 1
dat$ftse_dret <- (dat$FTSEPrice / dat$FTSEPriceLag1) - 1
# Subsetting data to keep relevant variables
keepvars <- c(
"emf_dret",
"fmem_dret",
"vwo_dret",
"ftse_dret"
)
dat2 <- dat[keepvars]
dat2 <- na.omit(dat2)
## Summary statistics of key variables =========================================
head(dat2)
summary(dat2)
plot(dat2)
## Calculating the Hedge Ratio =================================================
# (Table 3 - Daily)
hedge.ratio <- function(xvar, yvar){
cov(xvar, yvar) / var(xvar)
}
hr_emf_vwo <- hedge.ratio(dat2$emf_dret, dat2$vwo_dret)
hr_emf_ftse <- hedge.ratio(dat2$emf_dret, dat2$ftse_dret)
hr_fmem_vwo <- hedge.ratio(dat2$fmem_dret, dat2$vwo_dret)
hr_fmem_ftse <- hedge.ratio(dat2$fmem_dret, dat2$ftse_dret)
hr_vwo_ftse <- hedge.ratio(dat2$vwo_dret, dat2$ftse_dret)
## Calculating the hedging effectiveness =======================================
# (Table 12 - OLS Daily)
hedge.effec <- function(xvar, yvar, hr_value){
1 - (var(yvar) + hr_value^2 * var(xvar) - 2*hr_value * cov(xvar, yvar))/
var(yvar)
}
he_emf_vwo <- hedge.effec(dat2$emf_dret, dat2$vwo_dret, hr_emf_vwo)
he_emf_ftse <- hedge.effec(dat2$emf_dret, dat2$ftse_dret, hr_emf_ftse)
he_fmem_vwo <- hedge.effec(dat2$fmem_dret, dat2$vwo_dret, hr_fmem_vwo)
he_fmem_ftse <- hedge.effec(dat2$fmem_dret, dat2$ftse_dret, hr_fmem_ftse)
he_vwo_ftse <- hedge.effec(dat2$vwo_dret, dat2$ftse_dret, hr_vwo_ftse)
## Calculating the mean of the hedged position =================================
# (Table 4)
mean.hedged <- function(fut, spot, hr){
mean(spot) + hr*mean(fut)
}
mh_emf_vwo <- mean.hedged(dat2$emf_dret, dat2$vwo_dret, hr_emf_vwo)
mh_emf_ftse <- mean.hedged(dat2$emf_dret, dat2$ftse_dret, hr_emf_ftse)
mh_fmem_vwo <- mean.hedged(dat2$fmem_dret, dat2$vwo_dret, hr_fmem_vwo)
mh_fmem_ftse <- mean.hedged(dat2$fmem_dret, dat2$ftse_dret, hr_fmem_ftse)
mh_vwo_ftse <- mean.hedged(dat2$vwo_dret, dat2$ftse_dret, hr_vwo_ftse)
|
% --- Source file: calcMin.Rd ---
\name{calcMin}
\alias{calcMin}
\title{Calculate the Minimum of a User-Defined Function}
\concept{minimization}
\description{
Minimization based on the R-stat functions \code{nlm}, \code{nlminb}, and \code{optim}.
Model parameters are scaled and can be active or not in the minimization.
}
\usage{
calcMin(pvec, func, method="nlm", trace=0, maxit=1000, reltol=1e-8,
steptol=1e-6, temp=10, repN=0, \dots)
}
\arguments{
\item{pvec}{Initial values of the model parameters to be optimized.
\code{pvec} is a data frame comprising four columns (
\code{"val","min","max","active"}) and as many rows as there are model
parameters. The \code{"active"} field (logical) determines whether the
parameters are estimated (\code{T}) or remain fixed (\code{F}).}
\item{func}{The user-defined function to be minimized (or maximized).
The function should return a scalar result.}
\item{method}{The minimization method to use: one of \code{nlm}, \code{nlminb},
\code{Nelder-Mead}, \code{BFGS}, \code{CG}, \code{L-BFGS-B}, or
\code{SANN}. Default is \code{nlm}.}
\item{trace}{Non-negative integer. If positive, tracing information on the
progress of the minimization is produced. Higher values may produce more
tracing information: for method \code{"L-BFGS-B"} there are six levels of
tracing. Default is \code{0}.}
\item{maxit}{The maximum number of iterations. Default is \code{1000}.}
\item{reltol}{Relative convergence tolerance. The algorithm stops if it is
unable to reduce the value by a factor of \code{reltol*(abs(val)+reltol)}
at a step. Default is \code{1e-8}.}
\item{steptol}{A positive scalar providing the minimum allowable relative step length.
Default is \code{1e-6}.}
\item{temp}{Temperature controlling the \code{"SANN"} method. It is the
starting temperature for the cooling schedule. Default is \code{10}.}
\item{repN}{Reports the parameter and objective function values on the R-console
every \code{repN} evaluations. Default is \code{0} for no reporting.}
\item{\dots}{Further arguments to be passed to the optimizing function chosen:
\code{nlm}, \code{nlminb}, or \code{optim}.
Beware of partial matching to earlier arguments.}
}
\details{
See \code{optim} for details on the following methods: \code{Nelder-Mead},
\code{BFGS}, \code{CG}, \code{L-BFGS-B}, and \code{SANN}.
}
\value{
A list with components:
\item{Fout}{The output list from the optimizer function chosen through \code{method}.}
\item{iters}{Number of iterations.}
\item{evals}{Number of evaluations.}
\item{cpuTime}{The user CPU time to execute the minimization.}
\item{elapTime}{The total elapsed time to execute the minimization.}
\item{fminS}{The objective function value calculated at the start of the minimization.}
\item{fminE}{The objective function value calculated at the end of the minimization.}
\item{Pstart}{Starting values for the model parameters.}
\item{Pend}{Final values estimated for the model parameters from the minimization.}
\item{AIC}{Akaike's Information Criterion}
\item{message}{Convergence message from the minimization routine.}
}
\author{
Jon T. Schnute, Pacific Biological Station, Fisheries and Oceans Canada, Nanaimo BC
}
\note{
Some arguments to \code{calcMin} have no effect depending on the \code{method} chosen.
}
\seealso{
\code{\link{scalePar}}, \code{\link{restorePar}}, \code{\link{calcMin}}, \code{\link{GT0}} \cr
In the \code{stats} package: \code{nlm}, \code{nlminb}, and \code{optim}.
}
\examples{
local(envir=.PBSmodEnv,expr={
Ufun <- function(P) {
Linf <- P[1]; K <- P[2]; t0 <- P[3]; obs <- afile$len;
pred <- Linf * (1 - exp(-K*(afile$age-t0)));
n <- length(obs); ssq <- sum((obs-pred)^2 );
return(n*log(ssq)); };
oldpar = par(no.readonly = TRUE)
afile <- data.frame(age=1:16,len=c(7.36,14.3,21.8,27.6,31.5,35.3,39,
41.1,43.8,45.1,47.4,48.9,50.1,51.7,51.7,54.1));
pvec <- data.frame(val=c(70,0.5,0),min=c(40,0.01,-2),max=c(100,2,2),
active=c(TRUE,TRUE,TRUE),row.names=c("Linf","K","t0"),
stringsAsFactors=FALSE);
alist <- calcMin(pvec=pvec,func=Ufun,method="nlm",steptol=1e-4,repN=10);
print(alist[-1]); P <- alist$Pend;
#resetGraph();
expandGraph();
xnew <- seq(afile$age[1],afile$age[nrow(afile)],len=100);
ynew <- P[1] * (1 - exp(-P[2]*(xnew-P[3])) );
plot(afile); lines(xnew,ynew,col="red",lwd=2);
addLabel(.05,.88,paste(paste(c("Linf","K","t0"),round(P,c(2,4,4)),
sep=" = "),collapse="\n"),adj=0,cex=0.9);
par(oldpar)
})
}
\keyword{nonlinear}
\keyword{optimize}
| /PBSmodelling/man/calcMin.Rd | no_license | pbs-software/pbs-modelling | R | false | false | 4,679 | rd | % --- Source file: calcMin.Rd ---
\name{calcMin}
\alias{calcMin}
\title{Calculate the Minimum of a User-Defined Function}
\concept{minimization}
\description{
Minimization based on the R-stat functions \code{nlm}, \code{nlminb}, and \code{optim}.
Model parameters are scaled and can be active or not in the minimization.
}
\usage{
calcMin(pvec, func, method="nlm", trace=0, maxit=1000, reltol=1e-8,
steptol=1e-6, temp=10, repN=0, \dots)
}
\arguments{
\item{pvec}{Initial values of the model parameters to be optimized.
\code{pvec} is a data frame comprising four columns (
\code{"val","min","max","active"}) and as many rows as there are model
parameters. The \code{"active"} field (logical) determines whether the
parameters are estimated (\code{T}) or remain fixed (\code{F}).}
\item{func}{The user-defined function to be minimized (or maximized).
The function should return a scalar result.}
\item{method}{The minimization method to use: one of \code{nlm}, \code{nlminb},
\code{Nelder-Mead}, \code{BFGS}, \code{CG}, \code{L-BFGS-B}, or
\code{SANN}. Default is \code{nlm}.}
\item{trace}{Non-negative integer. If positive, tracing information on the
progress of the minimization is produced. Higher values may produce more
tracing information: for method \code{"L-BFGS-B"} there are six levels of
tracing. Default is \code{0}.}
\item{maxit}{The maximum number of iterations. Default is \code{1000}.}
\item{reltol}{Relative convergence tolerance. The algorithm stops if it is
unable to reduce the value by a factor of \code{reltol*(abs(val)+reltol)}
at a step. Default is \code{1e-8}.}
\item{steptol}{A positive scalar providing the minimum allowable relative step length.
Default is \code{1e-6}.}
\item{temp}{Temperature controlling the \code{"SANN"} method. It is the
starting temperature for the cooling schedule. Default is \code{10}.}
\item{repN}{Reports the parameter and objective function values on the R-console
every \code{repN} evaluations. Default is \code{0} for no reporting.}
\item{\dots}{Further arguments to be passed to the optimizing function chosen:
\code{nlm}, \code{nlminb}, or \code{optim}.
Beware of partial matching to earlier arguments.}
}
\details{
See \code{optim} for details on the following methods: \code{Nelder-Mead},
\code{BFGS}, \code{CG}, \code{L-BFGS-B}, and \code{SANN}.
}
\value{
A list with components:
\item{Fout}{The output list from the optimizer function chosen through \code{method}.}
\item{iters}{Number of iterations.}
\item{evals}{Number of evaluations.}
\item{cpuTime}{The user CPU time to execute the minimization.}
\item{elapTime}{The total elapsed time to execute the minimization.}
\item{fminS}{The objective function value calculated at the start of the minimization.}
\item{fminE}{The objective function value calculated at the end of the minimization.}
\item{Pstart}{Starting values for the model parameters.}
\item{Pend}{Final values estimated for the model parameters from the minimization.}
\item{AIC}{Akaike's Information Criterion}
\item{message}{Convergence message from the minimization routine.}
}
\author{
Jon T. Schnute, Pacific Biological Station, Fisheries and Oceans Canada, Nanaimo BC
}
\note{
Some arguments to \code{calcMin} have no effect depending on the \code{method} chosen.
}
\seealso{
\code{\link{scalePar}}, \code{\link{restorePar}}, \code{\link{calcMin}}, \code{\link{GT0}} \cr
In the \code{stats} package: \code{nlm}, \code{nlminb}, and \code{optim}.
}
\examples{
local(envir=.PBSmodEnv,expr={
Ufun <- function(P) {
Linf <- P[1]; K <- P[2]; t0 <- P[3]; obs <- afile$len;
pred <- Linf * (1 - exp(-K*(afile$age-t0)));
n <- length(obs); ssq <- sum((obs-pred)^2 );
return(n*log(ssq)); };
oldpar = par(no.readonly = TRUE)
afile <- data.frame(age=1:16,len=c(7.36,14.3,21.8,27.6,31.5,35.3,39,
41.1,43.8,45.1,47.4,48.9,50.1,51.7,51.7,54.1));
pvec <- data.frame(val=c(70,0.5,0),min=c(40,0.01,-2),max=c(100,2,2),
active=c(TRUE,TRUE,TRUE),row.names=c("Linf","K","t0"),
stringsAsFactors=FALSE);
alist <- calcMin(pvec=pvec,func=Ufun,method="nlm",steptol=1e-4,repN=10);
print(alist[-1]); P <- alist$Pend;
#resetGraph();
expandGraph();
xnew <- seq(afile$age[1],afile$age[nrow(afile)],len=100);
ynew <- P[1] * (1 - exp(-P[2]*(xnew-P[3])) );
plot(afile); lines(xnew,ynew,col="red",lwd=2);
addLabel(.05,.88,paste(paste(c("Linf","K","t0"),round(P,c(2,4,4)),
sep=" = "),collapse="\n"),adj=0,cex=0.9);
par(oldpar)
})
}
\keyword{nonlinear}
\keyword{optimize}
|
context("get_electronic_filing_by_committees")
test_that("expected errors", {
year <- seq(1996, 2018)
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", 16), "Incorrect cycle"), "Cycle should be four-digit year")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", 1995), "Incorrect cycle"), "Cycle should be four-digit year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", 1999), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[2]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[4]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[6]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[8]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[10]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[12]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[14]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[16]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[18]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[20]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[22]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
})
test_that("expected lengths", {
year <- seq(1996, 2018)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", 2016)$results) > 1)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[1])$results) > 1) # false
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[3])$results) > 1) # false
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[5])$results) > 1) # false
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[7])$results) > 1)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[9])$results) > 1)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[11])$results) > 1)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[13])$results) > 1)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[15])$results) > 1)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[17])$results) > 1)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[19])$results) > 1)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[21])$results) > 1)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[23])$results) > 1)
})
| /tests/testthat/test-get_electronic_filing_by_committees.R | no_license | DavytJ/ProPublicaR | R | false | false | 3,704 | r | context("get_electronic_filing_by_committees")
test_that("expected errors", {
year <- seq(1996, 2018)
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", 16), "Incorrect cycle"), "Cycle should be four-digit year")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", 1995), "Incorrect cycle"), "Cycle should be four-digit year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", 1999), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[2]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[4]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[6]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[8]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[10]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[12]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[14]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[16]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[18]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[20]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
expect_warning(expect_error(get_electronic_filing_by_committees("UNITED EGG", year[22]), "Incorrect cycle"), "Cycle should be even-numbered year larger than 1996")
})
test_that("expected lengths", {
year <- seq(1996, 2018)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", 2016)$results) > 1)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[1])$results) > 1) # false
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[3])$results) > 1) # false
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[5])$results) > 1) # false
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[7])$results) > 1)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[9])$results) > 1)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[11])$results) > 1)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[13])$results) > 1)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[15])$results) > 1)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[17])$results) > 1)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[19])$results) > 1)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[21])$results) > 1)
expect_true(length(get_electronic_filing_by_committees("UNITED EGG", year[23])$results) > 1)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{last_true}
\alias{last_true}
\title{Index of the last TRUE value}
\usage{
last_true(x)
}
\description{
Index of the last TRUE value
}
| /R/yuez/man/last_true.Rd | no_license | giantwhale/yuez | R | false | true | 232 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{last_true}
\alias{last_true}
\title{Index of the last TRUE value}
\usage{
last_true(x)
}
\description{
Index of the last TRUE value
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/ridgereg_print.R
\name{print.ridgereg}
\alias{print.ridgereg}
\title{Print values}
\usage{
\method{print}{ridgereg}(x, ...)
}
\arguments{
\item{x}{an object used to select a method.}
\item{...}{further arguments passed to or from other methods.}
}
\value{
print out the coefficients and coefficient names.
}
\description{
\code{print} prints its argument and returns it out for class \code{"ridgereg"}.
}
| /lab7/man/print.ridgereg.Rd | no_license | ClaraSchartner/lab7 | R | false | false | 493 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/ridgereg_print.R
\name{print.ridgereg}
\alias{print.ridgereg}
\title{Print values}
\usage{
\method{print}{ridgereg}(x, ...)
}
\arguments{
\item{x}{an object used to select a method.}
\item{...}{further arguments passed to or from other methods.}
}
\value{
print out the coefficients and coefficient names.
}
\description{
\code{print} prints its argument and returns it out for class \code{"ridgereg"}.
}
|
#Load 'dplyr' & 'plotly' & 'ggplot2' & 'leaflet' packages
library(dplyr)
library(plotly)
library(ggplot2)
library(leaflet)
#Load dataset into a dataframe, with not reading strings as factors
shootings2018_data <- read.csv("data/shootings-2018.csv", stringsAsFactors = FALSE)
#Summary Information -----------------------------------------------------
#Getting original data source
shootings_source <- "http://www.shootingtracker.com"
#Analyze how many shootings occurred
#by defining shootings as number of shooting incidents
total_num_shootings <- nrow(shootings2018_data)
#Analyze how many lives were lost
#by defining lives lost as number of people killed
total_lives_lost <- shootings2018_data %>%
select(num_killed) %>%
#Deal with potential NA values
sum(na.rm = TRUE)
#Analyze which city was impacted most by mass shootings
#by defining impacted as city with most killed and most injured
city_most_impacted <- shootings2018_data %>%
#Checking for multiple states with the same city don't count for
#double by specifing the state of the city
mutate(location = paste(city, ", ", state, sep = "")) %>%
#Calculate the number of people impacted
mutate(impacted_num = num_killed + num_injured) %>%
group_by(location) %>%
summarize(
#Deal with potential NA values
total_impacted = sum(impacted_num, na.rm = TRUE)
) %>%
#Find the most impacted location and pull the location
filter(total_impacted == max(total_impacted)) %>%
pull(location)
#First insight: month with the most shootings occurred
#by defining most shootings as total number of incidents
most_shootings_month <- shootings2018_data %>%
#Get month name
mutate(month = gsub(" ", "", substr(date, 1, nchar(date) - 8))) %>%
count(month) %>%
#Pull month with most shootings
filter(n == max(n)) %>%
pull(month)
#Second insight: State that wass most impacted by mass shootings
#by defining most impacted as state with most killed and most injured
state_most_impacted <- shootings2018_data %>%
#Find number of people impacted
mutate(impacted_num = num_killed + num_injured) %>%
group_by(state) %>%
summarize(
#Deal with potential NA values
total_impacted = sum(impacted_num, na.rm = TRUE)
) %>%
#Find the most impacted state and pull the state
filter(total_impacted == max(total_impacted)) %>%
pull(state)
#Summary Table --------------------------------------------------------
#This table summarizes the number of people impacted every month
#Add a column for the total number of people impacted per incident and a column
#indicating the month of the incident. After, group by month the number of people impacted,
#and sort it in descending order of the number of people impacted
summarize_shootings_data <- shootings2018_data %>%
#Get total number of people impacted
mutate(impacted_num = num_killed + num_injured) %>%
#Get month name
mutate(Month = gsub(" ", "", substr(date, 1, nchar(date) - 8))) %>%
group_by(Month) %>%
summarize(
total_impacted = sum(impacted_num)
) %>%
#Sort in descending order of total number of people impacted
arrange(-total_impacted)
#Description of an Indcident -------------------------------------------
#The incident will be focused on the shooting occued in Seattle
incident <- shootings2018_data %>%
filter(city == "Seattle (Skyway)")
#Get the date of the indcident
date_of_incident <- incident %>%
pull(date)
#Get the location of the incident
#by defining the location as 'city, state'
location_of_incident <- paste(pull(incident, city), ", ",
pull(incident, state), sep = "")
#Get the geoLocation of the incident
#by defining geolocation as latitude and longitude
geolocation_of_incident <- paste("lat = ", pull(incident, lat),
", long = ", pull(incident, long), sep = "")
#Get the number of people injured
num_injured_of_incident <- incident %>%
pull(num_injured)
#Get the number of people killed
num_killed_of_incident <- incident %>%
pull(num_killed)
#External resoure about the incident
incident_extresource <- paste("https://www.kiro7.com/news/local/2-dead-others-injured-",
"after-motorcycle-club-shooting-in-skyway/740813121",
sep = "")
#Interactive Map ------------------------------------------------------
#Create an interactive map from the shooting data, where the size of each data
#is related to the number of people impacted by the shooting as in the number
#of people killed and the number of people injured. For each data point, it
#has the exact latitude and longitude of the city, and the hovering data point
#displays the city and state, with the number of people killed and the people injured
map_of_shootings <- shootings2018_data %>%
#Gets the desired radius
mutate(radius = 5 * ( (num_killed + num_injured) /
max(num_killed + num_injured))) %>%
#Creates the popup data
mutate(popup_data = paste("Location: ", city, ", ", state, "</br>Killed: ",
num_killed, "</br>Injured:", num_injured,
sep = "")) %>%
leaflet() %>%
addTiles() %>%
addCircleMarkers(
lat = ~lat, #Latitude for each data point
lng = ~long, #Longitude for each data point
popup = ~popup_data, #Add Popup data for each data point
stroke = TRUE, #Add borders to each circle
radius = ~radius, #Add the calculated radius
fillOpacity = 0.5 #Circles' opacity
) %>%
setMaxBounds(-130, 24, -60, 50) #Keeping view of the map to fit the US
#Fifteen Biggest Shootings --------------------------------------------------
#Create a bar chart of the 15 biggest shootings by most number of impacted people
fifteen_biggest_shootings <- shootings2018_data %>%
#Create a impacted_num column which total number injured and killed
mutate(impacted_num = num_killed + num_injured) %>%
#Filter the top 15
top_n(15, impacted_num) %>%
#Sort in ascending order
arrange(impacted_num) %>%
#Create a city_and_state column
mutate(city_state = paste(city, ", ", state, sep = "")) %>%
#Set row order
mutate(location = factor(city_state, city_state))
#Create a horizontal bar chart with
#location as vertical(y) and impacted_num as horizontal(x)
#Creates a horizontal bar chart
ggplot(fifteen_biggest_shootings) +
geom_col(mapping = aes(x = location, y = impacted_num)) +
labs(
#Add labels to x and y axis
#Plot title
title = "Fifteen Biggest Mass Shootings in 2018",
#Vertical X axis title
x = "Location",
#Horizontal Y axis title
y = "Number of People Impacted"
) +
coord_flip() #Make horizontal bars with cities on the side for better visibility
| /Foundational Skills for Data Science/Lab5/analysis.R | permissive | KhoaDTran/UW-Data-Science-Coursework-Projects- | R | false | false | 6,727 | r | #Load 'dplyr' & 'plotly' & 'ggplot2' & 'leaflet' packages
library(dplyr)
library(plotly)
library(ggplot2)
library(leaflet)
#Load dataset into a dataframe, with not reading strings as factors
shootings2018_data <- read.csv("data/shootings-2018.csv", stringsAsFactors = FALSE)
#Summary Information -----------------------------------------------------
#Getting original data source
shootings_source <- "http://www.shootingtracker.com"
#Analyze how many shootings occurred
#by defining shootings as number of shooting incidents
total_num_shootings <- nrow(shootings2018_data)
#Analyze how many lives were lost
#by defining lives lost as number of people killed
total_lives_lost <- shootings2018_data %>%
select(num_killed) %>%
#Deal with potential NA values
sum(na.rm = TRUE)
#Analyze which city was impacted most by mass shootings
#by defining impacted as city with most killed and most injured
city_most_impacted <- shootings2018_data %>%
#Checking for multiple states with the same city don't count for
#double by specifing the state of the city
mutate(location = paste(city, ", ", state, sep = "")) %>%
#Calculate the number of people impacted
mutate(impacted_num = num_killed + num_injured) %>%
group_by(location) %>%
summarize(
#Deal with potential NA values
total_impacted = sum(impacted_num, na.rm = TRUE)
) %>%
#Find the most impacted location and pull the location
filter(total_impacted == max(total_impacted)) %>%
pull(location)
#First insight: month with the most shootings occurred
#by defining most shootings as total number of incidents
most_shootings_month <- shootings2018_data %>%
#Get month name
mutate(month = gsub(" ", "", substr(date, 1, nchar(date) - 8))) %>%
count(month) %>%
#Pull month with most shootings
filter(n == max(n)) %>%
pull(month)
#Second insight: State that wass most impacted by mass shootings
#by defining most impacted as state with most killed and most injured
state_most_impacted <- shootings2018_data %>%
#Find number of people impacted
mutate(impacted_num = num_killed + num_injured) %>%
group_by(state) %>%
summarize(
#Deal with potential NA values
total_impacted = sum(impacted_num, na.rm = TRUE)
) %>%
#Find the most impacted state and pull the state
filter(total_impacted == max(total_impacted)) %>%
pull(state)
#Summary Table --------------------------------------------------------
#This table summarizes the number of people impacted every month
#Add a column for the total number of people impacted per incident and a column
#indicating the month of the incident. After, group by month the number of people impacted,
#and sort it in descending order of the number of people impacted
summarize_shootings_data <- shootings2018_data %>%
#Get total number of people impacted
mutate(impacted_num = num_killed + num_injured) %>%
#Get month name
mutate(Month = gsub(" ", "", substr(date, 1, nchar(date) - 8))) %>%
group_by(Month) %>%
summarize(
total_impacted = sum(impacted_num)
) %>%
#Sort in descending order of total number of people impacted
arrange(-total_impacted)
#Description of an Indcident -------------------------------------------
#The incident will be focused on the shooting occued in Seattle
incident <- shootings2018_data %>%
filter(city == "Seattle (Skyway)")
#Get the date of the indcident
date_of_incident <- incident %>%
pull(date)
#Get the location of the incident
#by defining the location as 'city, state'
location_of_incident <- paste(pull(incident, city), ", ",
pull(incident, state), sep = "")
#Get the geoLocation of the incident
#by defining geolocation as latitude and longitude
geolocation_of_incident <- paste("lat = ", pull(incident, lat),
", long = ", pull(incident, long), sep = "")
#Get the number of people injured
num_injured_of_incident <- incident %>%
pull(num_injured)
#Get the number of people killed
num_killed_of_incident <- incident %>%
pull(num_killed)
#External resoure about the incident
incident_extresource <- paste("https://www.kiro7.com/news/local/2-dead-others-injured-",
"after-motorcycle-club-shooting-in-skyway/740813121",
sep = "")
#Interactive Map ------------------------------------------------------
#Create an interactive map from the shooting data, where the size of each data
#is related to the number of people impacted by the shooting as in the number
#of people killed and the number of people injured. For each data point, it
#has the exact latitude and longitude of the city, and the hovering data point
#displays the city and state, with the number of people killed and the people injured
map_of_shootings <- shootings2018_data %>%
#Gets the desired radius
mutate(radius = 5 * ( (num_killed + num_injured) /
max(num_killed + num_injured))) %>%
#Creates the popup data
mutate(popup_data = paste("Location: ", city, ", ", state, "</br>Killed: ",
num_killed, "</br>Injured:", num_injured,
sep = "")) %>%
leaflet() %>%
addTiles() %>%
addCircleMarkers(
lat = ~lat, #Latitude for each data point
lng = ~long, #Longitude for each data point
popup = ~popup_data, #Add Popup data for each data point
stroke = TRUE, #Add borders to each circle
radius = ~radius, #Add the calculated radius
fillOpacity = 0.5 #Circles' opacity
) %>%
setMaxBounds(-130, 24, -60, 50) #Keeping view of the map to fit the US
#Fifteen Biggest Shootings --------------------------------------------------
#Create a bar chart of the 15 biggest shootings by most number of impacted people
fifteen_biggest_shootings <- shootings2018_data %>%
#Create a impacted_num column which total number injured and killed
mutate(impacted_num = num_killed + num_injured) %>%
#Filter the top 15
top_n(15, impacted_num) %>%
#Sort in ascending order
arrange(impacted_num) %>%
#Create a city_and_state column
mutate(city_state = paste(city, ", ", state, sep = "")) %>%
#Set row order
mutate(location = factor(city_state, city_state))
#Create a horizontal bar chart with
#location as vertical(y) and impacted_num as horizontal(x)
#Creates a horizontal bar chart
ggplot(fifteen_biggest_shootings) +
geom_col(mapping = aes(x = location, y = impacted_num)) +
labs(
#Add labels to x and y axis
#Plot title
title = "Fifteen Biggest Mass Shootings in 2018",
#Vertical X axis title
x = "Location",
#Horizontal Y axis title
y = "Number of People Impacted"
) +
coord_flip() #Make horizontal bars with cities on the side for better visibility
|
context("Shiny inputs")
# Slider
test_input(mwSlider(0, 10, 0), c(5, -20, 20), c(5, 0, 10))
# Slider with two values
test_input(
mwSlider(0, 10, 0),
list(c(5, 7), c(-20, 20), c(-20, 5), c(5, 20)),
list(c(5, 7), c(0, 10), c(0, 5), c(5, 10))
)
# Text
test_input(mwText(), list("1", 1, NULL), list("1", "1", ""))
# Numeric
test_input(mwNumeric(0), list(5, -20, 20, NULL, "a"), list(5, -20, 20, NULL, NULL))
test_input(mwNumeric(0, min = 0, max = 10), c(5, -20, 20), c(5, 0, 10))
# Password
test_input(mwPassword(), list("1", 1, NULL), list("1", "1", ""))
# Select
test_input(mwSelect(1:4), list(1, 2, 5, NULL), list(1, 2, 1, 1))
test_input(
mwSelect(1:4, multiple = TRUE),
list(1, 5, 3:5),
list(1, integer(0), 3:4)
)
# Select where choices have distinct label and values
test_input(
mwSelect(list(a = 1, b = 2)),
list(1, 2, 5, NULL),
list(1, 2, 1, 1)
)
test_input(
mwSelect(list(a = 1, b = 2), multiple = TRUE),
list(1, 2, 5, 1:3),
list(1, 2, integer(0), 1:2)
)
# Checkbox
test_input(
mwCheckbox(),
list(TRUE, FALSE, NULL, NA, "test"),
list(TRUE, FALSE, FALSE, FALSE, FALSE)
)
# Radio buttons
test_input(mwRadio(1:4), list(1, 2, 5, NULL), list(1, 2, 1, 1))
test_input(
mwRadio(list(a = 1, b = 2)),
list(1, 2, 5, NULL),
list(1, 2, 1, 1)
)
# Date picker
test_input(
mwDate(),
list(Sys.Date(), "2017-01-01", NULL),
list(Sys.Date(), as.Date("2017-01-01"), Sys.Date())
)
# Date with min and max dates
test_input(
mwDate(min = "2017-01-01", max = "2017-12-31"),
list("2017-06-01", "2016-06-01", "2018-06-01"),
list(as.Date("2017-06-01"), as.Date("2017-01-01"), as.Date("2017-12-31"))
)
# Date range
defaultRange <- c(Sys.Date(), Sys.Date())
test_input(
mwDateRange(),
list(defaultRange, as.character(defaultRange), NULL),
list(defaultRange, defaultRange, defaultRange)
)
# Date range with min and max dates
test_input(
mwDateRange(min = "2017-01-01", max = "2017-12-31"),
list(c("2016-01-01", "2018-01-01")),
list(as.Date(c("2017-01-01", "2017-12-31")))
)
# Checkbox group
test_input(
mwCheckboxGroup(1:4),
list(1, 5, 3:5),
list(1, integer(0), 3:4)
)
test_input(
mwCheckboxGroup(list(a = 1, b = 2)),
list(1, 2, 5, 1:3),
list(1, 2, integer(0), 1:2)
)
# Groups of input
test_input(mwGroup(a = mwText(), b = mwText()))
| /tests/testthat/test-inputs.R | no_license | cran/manipulateWidget | R | false | false | 2,388 | r | context("Shiny inputs")
# Slider
test_input(mwSlider(0, 10, 0), c(5, -20, 20), c(5, 0, 10))
# Slider with two values
test_input(
mwSlider(0, 10, 0),
list(c(5, 7), c(-20, 20), c(-20, 5), c(5, 20)),
list(c(5, 7), c(0, 10), c(0, 5), c(5, 10))
)
# Text
test_input(mwText(), list("1", 1, NULL), list("1", "1", ""))
# Numeric
test_input(mwNumeric(0), list(5, -20, 20, NULL, "a"), list(5, -20, 20, NULL, NULL))
test_input(mwNumeric(0, min = 0, max = 10), c(5, -20, 20), c(5, 0, 10))
# Password
test_input(mwPassword(), list("1", 1, NULL), list("1", "1", ""))
# Select
test_input(mwSelect(1:4), list(1, 2, 5, NULL), list(1, 2, 1, 1))
test_input(
mwSelect(1:4, multiple = TRUE),
list(1, 5, 3:5),
list(1, integer(0), 3:4)
)
# Select where choices have distinct label and values
test_input(
mwSelect(list(a = 1, b = 2)),
list(1, 2, 5, NULL),
list(1, 2, 1, 1)
)
test_input(
mwSelect(list(a = 1, b = 2), multiple = TRUE),
list(1, 2, 5, 1:3),
list(1, 2, integer(0), 1:2)
)
# Checkbox
test_input(
mwCheckbox(),
list(TRUE, FALSE, NULL, NA, "test"),
list(TRUE, FALSE, FALSE, FALSE, FALSE)
)
# Radio buttons
test_input(mwRadio(1:4), list(1, 2, 5, NULL), list(1, 2, 1, 1))
test_input(
mwRadio(list(a = 1, b = 2)),
list(1, 2, 5, NULL),
list(1, 2, 1, 1)
)
# Date picker
test_input(
mwDate(),
list(Sys.Date(), "2017-01-01", NULL),
list(Sys.Date(), as.Date("2017-01-01"), Sys.Date())
)
# Date with min and max dates
test_input(
mwDate(min = "2017-01-01", max = "2017-12-31"),
list("2017-06-01", "2016-06-01", "2018-06-01"),
list(as.Date("2017-06-01"), as.Date("2017-01-01"), as.Date("2017-12-31"))
)
# Date range
defaultRange <- c(Sys.Date(), Sys.Date())
test_input(
mwDateRange(),
list(defaultRange, as.character(defaultRange), NULL),
list(defaultRange, defaultRange, defaultRange)
)
# Date range with min and max dates
test_input(
mwDateRange(min = "2017-01-01", max = "2017-12-31"),
list(c("2016-01-01", "2018-01-01")),
list(as.Date(c("2017-01-01", "2017-12-31")))
)
# Checkbox group
test_input(
mwCheckboxGroup(1:4),
list(1, 5, 3:5),
list(1, integer(0), 3:4)
)
test_input(
mwCheckboxGroup(list(a = 1, b = 2)),
list(1, 2, 5, 1:3),
list(1, 2, integer(0), 1:2)
)
# Groups of input
test_input(mwGroup(a = mwText(), b = mwText()))
|
args <- commandArgs(trailingOnly = TRUE)
list<-unlist(strsplit(args,","))
todList<-unlist(strsplit(list[1]," "))
hhmmss<-todList[5]
hh<-as.numeric(unlist(strsplit(hhmmss,":"))[1])
mm<-as.numeric(unlist(strsplit(hhmmss,":"))[2])
timeOfDay<-(hh+mm/60.0)/24.0
#print(timeOfDay)
library("neuralnet")
lat <-as.numeric(list[2])
lon<-as.numeric( list[3])
loadModel<-function(lat,lon, timeOfDay){
if(lat>104.01696 | lat<103.61323 | lon>1.46993 | lon<1.23348){
print(-1)
}
else{
lngGridSize = 0.05
latGridSize = 0.05
num<-floor((lat-103.61323)/latGridSize)*(5)+floor((lon-1.23348)/lngGridSize)
name<-paste("~/TaxiDataPlotPackage/grid",num,".rda",sep='')
tryCatch(load(file = name),error=function(cond) { print(-1)})
test.results <- compute(trainModel,timeOfDay)
print(test.results$net.result)
}
}
loadModel(lat,lon,timeOfDay)
| /RScripts/getPrediction.r | no_license | paavoap/sgslhackfest2016 | R | false | false | 891 | r | args <- commandArgs(trailingOnly = TRUE)
list<-unlist(strsplit(args,","))
todList<-unlist(strsplit(list[1]," "))
hhmmss<-todList[5]
hh<-as.numeric(unlist(strsplit(hhmmss,":"))[1])
mm<-as.numeric(unlist(strsplit(hhmmss,":"))[2])
timeOfDay<-(hh+mm/60.0)/24.0
#print(timeOfDay)
library("neuralnet")
lat <-as.numeric(list[2])
lon<-as.numeric( list[3])
loadModel<-function(lat,lon, timeOfDay){
if(lat>104.01696 | lat<103.61323 | lon>1.46993 | lon<1.23348){
print(-1)
}
else{
lngGridSize = 0.05
latGridSize = 0.05
num<-floor((lat-103.61323)/latGridSize)*(5)+floor((lon-1.23348)/lngGridSize)
name<-paste("~/TaxiDataPlotPackage/grid",num,".rda",sep='')
tryCatch(load(file = name),error=function(cond) { print(-1)})
test.results <- compute(trainModel,timeOfDay)
print(test.results$net.result)
}
}
loadModel(lat,lon,timeOfDay)
|
peakfinder <- function(data_umi) {
dd <- density(data_umi)
#define window size
smallBins <-
ifelse(round(length(dd$x) * 0.01, 0) %% 2,
round(length(dd$x) * 0.01, 0),
round(length(dd$x) * 0.01, 0) + 1)
#add tails to begining and end of the density plot
zero_beg <-
seq(min(dd$x) - 0.01, min(dd$x), length.out = ((1 + smallBins) / 2))
zero_end <-
seq(max(dd$x) + 0.01, max(dd$x), length.out = ((1 + smallBins) / 2))
dd$x <- c(zero_beg, dd$x, zero_end)
dd$y <-
c(rep(0, (1 + smallBins) / 2), dd$y, rep(0, (1 + smallBins) / 2))
#define range for local maximums searching
isRange <-
((1 + smallBins) / 2):(length(dd$y) - (smallBins - 1) / 2)
#find local maximums inside defined window
isLocalMax <-
sapply(isRange, function(i)
which.max(dd$y[(i - ((smallBins / 2) - 0.5)):(i + ((smallBins / 2) - 0.5))]) == (smallBins / 2) + 0.5)
#filter local maximums with too small values
peaks_x <- dd$x[which(isLocalMax)]
peaks_y <- dd$y[which(isLocalMax)]
peaks <- as.data.frame(cbind(peaks_x, peaks_y))
peaks <- peaks %>% filter(peaks_y > max(dd$y) * 0.1)
peaks <- peaks$peaks_x
peaks
}
| /templates/peakfinder.R | no_license | mariafiruleva/automated_processing_scrnaseq | R | false | false | 1,164 | r |
peakfinder <- function(data_umi) {
dd <- density(data_umi)
#define window size
smallBins <-
ifelse(round(length(dd$x) * 0.01, 0) %% 2,
round(length(dd$x) * 0.01, 0),
round(length(dd$x) * 0.01, 0) + 1)
#add tails to begining and end of the density plot
zero_beg <-
seq(min(dd$x) - 0.01, min(dd$x), length.out = ((1 + smallBins) / 2))
zero_end <-
seq(max(dd$x) + 0.01, max(dd$x), length.out = ((1 + smallBins) / 2))
dd$x <- c(zero_beg, dd$x, zero_end)
dd$y <-
c(rep(0, (1 + smallBins) / 2), dd$y, rep(0, (1 + smallBins) / 2))
#define range for local maximums searching
isRange <-
((1 + smallBins) / 2):(length(dd$y) - (smallBins - 1) / 2)
#find local maximums inside defined window
isLocalMax <-
sapply(isRange, function(i)
which.max(dd$y[(i - ((smallBins / 2) - 0.5)):(i + ((smallBins / 2) - 0.5))]) == (smallBins / 2) + 0.5)
#filter local maximums with too small values
peaks_x <- dd$x[which(isLocalMax)]
peaks_y <- dd$y[which(isLocalMax)]
peaks <- as.data.frame(cbind(peaks_x, peaks_y))
peaks <- peaks %>% filter(peaks_y > max(dd$y) * 0.1)
peaks <- peaks$peaks_x
peaks
}
|
library(UsingR)
data(galton)
library(ggplot2)
library(reshape2)
library(manipulate)
# Plotting histograms of Childs and Parents height
longGalton <- melt(galton, measure.vars = c("child","parent"))
g <- ggplot(longGalton, aes(x=value)) +
geom_histogram(aes(y=..density.., fill = variable), binwidth=1, color="black") +
geom_density() +
facet_grid(. ~ variable)
g
# Using manipulate to explore the mean
myHist <- function(mu){
g <- ggplot(galton, aes(x=child)) +
geom_histogram(fill = "salmon", binwidth=1, aes(y=..density..), color="black") +
geom_density() +
geom_vline(xintercept=mu, size=2)
mse <- round(mean((galton$child - mu)^2), 3)
g <- g + labs(title=paste('mu = ', mu, ' MSE = ', mse))
g
}
manipulate(myHist(mu), mu = slider(62,74, step = 0.1))
| /statistical-inference/centerOfMass.R | no_license | rudra-shukla/datasciencecoursera | R | false | false | 809 | r | library(UsingR)
data(galton)
library(ggplot2)
library(reshape2)
library(manipulate)
# Plotting histograms of Childs and Parents height
longGalton <- melt(galton, measure.vars = c("child","parent"))
g <- ggplot(longGalton, aes(x=value)) +
geom_histogram(aes(y=..density.., fill = variable), binwidth=1, color="black") +
geom_density() +
facet_grid(. ~ variable)
g
# Using manipulate to explore the mean
myHist <- function(mu){
g <- ggplot(galton, aes(x=child)) +
geom_histogram(fill = "salmon", binwidth=1, aes(y=..density..), color="black") +
geom_density() +
geom_vline(xintercept=mu, size=2)
mse <- round(mean((galton$child - mu)^2), 3)
g <- g + labs(title=paste('mu = ', mu, ' MSE = ', mse))
g
}
manipulate(myHist(mu), mu = slider(62,74, step = 0.1))
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
rcpparma_hello_world <- function() {
.Call(`_DMVI_rcpparma_hello_world`)
}
rcpparma_outerproduct <- function(x) {
.Call(`_DMVI_rcpparma_outerproduct`, x)
}
rcpparma_innerproduct <- function(x) {
.Call(`_DMVI_rcpparma_innerproduct`, x)
}
rcpparma_bothproducts <- function(x) {
.Call(`_DMVI_rcpparma_bothproducts`, x)
}
| /R/RcppExports.R | no_license | mkoslovsky/DMVI | R | false | false | 465 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
rcpparma_hello_world <- function() {
.Call(`_DMVI_rcpparma_hello_world`)
}
rcpparma_outerproduct <- function(x) {
.Call(`_DMVI_rcpparma_outerproduct`, x)
}
rcpparma_innerproduct <- function(x) {
.Call(`_DMVI_rcpparma_innerproduct`, x)
}
rcpparma_bothproducts <- function(x) {
.Call(`_DMVI_rcpparma_bothproducts`, x)
}
|
utils::globalVariables(c("packmeta", "deptable", "sysdata"))
| /R/globalvars.R | no_license | talegari/pkggraph | R | false | false | 61 | r | utils::globalVariables(c("packmeta", "deptable", "sysdata"))
|
library(syuzhet)
filename <- "alice.txt"
book = gsub( "[\r\n]", " ", readChar(filename, file.info(filename)$size) )
sentences <- get_sentences(book)
sentiment_vector <- get_sentiment( sentences, method="afinn" )
# plot( sentiment_vector, type="l", main="Example Plot Trajectory", xlab = "Narrative Time", ylab= "Emotional Valence" )
ft_values <- get_transformed_values(sentiment_vector, low_pass_size = 3, x_reverse_len = 100,scale_vals = TRUE,scale_range = FALSE)
percent_vals <- get_percentage_values(sentiment_vector)
#plot(percent_vals, type="l", main="Sentiment Arches in Macbeth", xlab = "Narrative Time", ylab= "Emotional Valence", col="red")
#var(sentiment_vector)
plot(ft_values, type ="h", main ="Sentiment Arches in Carroll's Alice in Wonderland", xlab = "Narrative Time", ylab = "Emotional Valence", col = "red")
| /timbr/source/read.r | no_license | JessieSalas/tambr | R | false | false | 836 | r | library(syuzhet)
filename <- "alice.txt"
book = gsub( "[\r\n]", " ", readChar(filename, file.info(filename)$size) )
sentences <- get_sentences(book)
sentiment_vector <- get_sentiment( sentences, method="afinn" )
# plot( sentiment_vector, type="l", main="Example Plot Trajectory", xlab = "Narrative Time", ylab= "Emotional Valence" )
ft_values <- get_transformed_values(sentiment_vector, low_pass_size = 3, x_reverse_len = 100,scale_vals = TRUE,scale_range = FALSE)
percent_vals <- get_percentage_values(sentiment_vector)
#plot(percent_vals, type="l", main="Sentiment Arches in Macbeth", xlab = "Narrative Time", ylab= "Emotional Valence", col="red")
#var(sentiment_vector)
plot(ft_values, type ="h", main ="Sentiment Arches in Carroll's Alice in Wonderland", xlab = "Narrative Time", ylab = "Emotional Valence", col = "red")
|
setwd("C:/ARCHIVE/DATA_SCIENTIST/EXPLORATORY_DATA/ASSIGNMENT1")
if (!file.exists("household_power_consumption.txt")) {
## setwd("C:/ARCHIVE/DATA_SCIENTIST/EXPLORATORY_DATA/ASSIGNMENT1")
temp <- tempfile()
fileurl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileurl,temp)
dir()
file1 <- unzip(temp)
unlink(temp)
cat("Unzip file in working Dir ,",getwd())
dir() } else
{ file1 <- "household_power_consumption.txt"}
cat("Read data in Current Dir")
dir()
power_table <- read.table(file1,header = TRUE,sep=";",na = "?")
object.size(power_table)
str(power_table)
head(power_table)
power_table$Date <- as.Date(power_table$Date, format="%d/%m/%Y")
head(power_table)
power_table_subset <- power_table[(power_table$Date == "2007-02-01") | (power_table$Date == "2007-02-02"),]
head(power_table_subset)
summary(power_table_subset)
str(power_table_subset)
power_table_subset$Global_active_power <- as.numeric(as.character(power_table_subset$Global_active_power))
power_table_subset$Global_reactive_power <- as.numeric(as.character(power_table_subset$Global_reactive_power))
power_table_subset$Voltage <- as.numeric(as.character(power_table_subset$Voltage))
date_time <- paste(power_table_subset$Date,power_table_subset$Time)
date_time
power_table_subset$DateTime <- strptime(date_time,format = "%Y-%m-%d %H:%M:%S")
head(power_table_subset$DateTime)
power_table_subset$Sub_metering_1 <- as.numeric(as.character(power_table_subset$Sub_metering_1))
power_table_subset$Sub_metering_2 <- as.numeric(as.character(power_table_subset$Sub_metering_2))
power_table_subset$Sub_metering_3 <- as.numeric(as.character(power_table_subset$Sub_metering_3))
| /loadData.R | no_license | abbahb/ExData_Plotting1 | R | false | false | 1,697 | r | setwd("C:/ARCHIVE/DATA_SCIENTIST/EXPLORATORY_DATA/ASSIGNMENT1")
if (!file.exists("household_power_consumption.txt")) {
## setwd("C:/ARCHIVE/DATA_SCIENTIST/EXPLORATORY_DATA/ASSIGNMENT1")
temp <- tempfile()
fileurl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileurl,temp)
dir()
file1 <- unzip(temp)
unlink(temp)
cat("Unzip file in working Dir ,",getwd())
dir() } else
{ file1 <- "household_power_consumption.txt"}
cat("Read data in Current Dir")
dir()
power_table <- read.table(file1,header = TRUE,sep=";",na = "?")
object.size(power_table)
str(power_table)
head(power_table)
power_table$Date <- as.Date(power_table$Date, format="%d/%m/%Y")
head(power_table)
power_table_subset <- power_table[(power_table$Date == "2007-02-01") | (power_table$Date == "2007-02-02"),]
head(power_table_subset)
summary(power_table_subset)
str(power_table_subset)
power_table_subset$Global_active_power <- as.numeric(as.character(power_table_subset$Global_active_power))
power_table_subset$Global_reactive_power <- as.numeric(as.character(power_table_subset$Global_reactive_power))
power_table_subset$Voltage <- as.numeric(as.character(power_table_subset$Voltage))
date_time <- paste(power_table_subset$Date,power_table_subset$Time)
date_time
power_table_subset$DateTime <- strptime(date_time,format = "%Y-%m-%d %H:%M:%S")
head(power_table_subset$DateTime)
power_table_subset$Sub_metering_1 <- as.numeric(as.character(power_table_subset$Sub_metering_1))
power_table_subset$Sub_metering_2 <- as.numeric(as.character(power_table_subset$Sub_metering_2))
power_table_subset$Sub_metering_3 <- as.numeric(as.character(power_table_subset$Sub_metering_3))
|
# This script runs regressions to compare within-, between-, and pooled estimator of income effect on overall expenditure.
library(ggplot2)
library(reshape2)
library(data.table)
library(plm)
library(gridExtra)
library(scales)
library(systemfit)
library(stargazer)
library(gridExtra)
options(error = quote({dump.frames(to.file = TRUE)}))
# setwd("~/Documents/Research/Store switching/processed data")
# plot.wd <- '/Users/chaoqunchen/Desktop'
# source('~/Documents/Research/Store switching/Exercise/main/income_effect/plm_vcovHC.R')
# setwd("/home/brgordon/ccv103/Exercise/run")
# setwd("/kellogg/users/marketing/2661703/Expenditure")
# setwd("/sscc/home/c/ccv103/Exercise/run")
setwd("U:/Users/ccv103/Documents/Research/Store switching/run")
plot.wd <- paste(getwd(), "/results", sep="")
ww <- 6.5
ww1 <- 8.5
ar <- .8 #.6
week.price <- FALSE
cpi.adj <- TRUE
write2csv <- FALSE
make_plot <- TRUE
fname <- "expenditure_reg_btw"
if(cpi.adj) { fname <- paste(fname, "_cpi", sep="")}
if(week.price){ fname <- paste(fname, "_wkprc", sep="")}
# outxls <- paste(plot.wd, "/", fname, "_", gsub("-", "", as.character(Sys.Date())), ".xlsx", sep="")
# mywb <- createWorkbook()
# sht1 <- createSheet(mywb, "Regression")
# sht2 <- createSheet(mywb, "SUR")
if(week.price){
load("hh_biweek_exp_20150812.rdata")
}else{
load("hh_biweek_exp.rdata")
}
codebook <- read.csv("code_book.csv")
source("plm_vcovHC.R")
# Extract 5% random sample
# length(unique(hh_exp$household_code))
# sel <- sample(unique(hh_exp$household_code), .01*length(unique(hh_exp$household_code)) )
# hh_exp_save <- hh_exp
# hh_exp <- subset(hh_exp, household_code %in% sel)
#############
# Functions #
#############
my_forward <- function(x){
return(c(x[-1], NA))
}
my_lag <- function(x){
return(c(NA, x[-length(x)]))
}
mytransition <- function(x1, x2){
if(class(x1)!="factor" | class(x2) != "factor"){
x1 <- factor(x1)
x2 <- factor(x2)
}
onem <- diag(length(levels(x1)))
out <- table(x1, x2)
sel <- which(apply(out, 1, function(x) all(x==0)))
if(length(sel)>0) { out[sel,] <- onem[sel,] }
return(out/rowSums(out))
}
get_legend<-function(myggplot){
tmp <- ggplot_gtable(ggplot_build(myggplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)
}
#################
# Organize data #
#################
# Retail formats
fmt_name <- as.character(sort(unique(fmt_attr$channel_type)))
R <- length(fmt_name)
# Conver some date and factor variables
if(week.price){
# Segment households based on their initial income level
panelist <- data.table(hh_exp)
setkeyv(panelist, c("household_code","year","biweek"))
panelist <- panelist[,list(income = first_income[1], first_famsize = famsize[1]), by=list(household_code)]
tmp <- quantile(panelist$income, c(0, .33, .67, 1))
num_grp <- 3
panelist <- panelist[, first_incomeg := cut(panelist$income, tmp, labels = paste("T", 1:num_grp, sep=""), include.lowest = T)]
hh_exp <- merge(hh_exp, data.frame(panelist)[,c("household_code", "first_incomeg")], by = "household_code", all.x=T )
hh_exp$first_incomeg <- factor(hh_exp$first_incomeg, levels = c("T1", "T2", "T3"))
cat("Table of initial income distribution:\n"); print(table(panelist$first_incomeg)); cat("\n")
cat("Table of segments in the expenditure data:\n"); print(table(hh_exp$first_incomeg)); cat("\n")
}
hh_exp$month <- month(as.Date("2004-1-1", format="%Y-%m-%d") + 14*(hh_exp$biweek-1))
hh_exp$famsize <- factor(hh_exp$famsize, levels = c("Single","Two", "Three+"))
hh_exp$condo <- factor(hh_exp$condo, levels = c(0, 1))
hh_exp$employed <- factor(hh_exp$employed, levels = c(0, 1))
hh_exp$first_incomeg <- as.character(hh_exp$first_incomeg)
hh_exp$first_incomeg <- factor(hh_exp$first_incomeg, levels = paste("T", 1:3, sep=""), labels = c("Low", "Med", "High"))
cat("Table of segments in the expenditure data:\n"); print(table(hh_exp$first_incomeg)); cat("\n")
# Compute expenditure share
sel <- paste("DOL_", gsub("\\s", "_", fmt_name), sep="")
sel1 <- gsub("DOL", "SHR", sel)
for(i in 1:length(fmt_name)){
hh_exp[,sel1[i]] <- hh_exp[,sel[i]]/hh_exp$dol
}
# ------------------- #
# Household-year data #
pan_yr <- data.table(hh_exp)
pan_yr <- pan_yr[,list(Income = unique(income_midvalue), Inc = unique(income_real)), by = list(first_incomeg, household_code, year, cpi)]
setkeyv(pan_yr, c("household_code", "year"))
pan_yr <- pan_yr[, ':='(ln_income = log(Income), cpi_adj_income = Income/cpi, recession = 1*(year >= 2008))]
pan_yr <- pan_yr[,':='(year_id= year - year[1] + 1, tenure = length(year), move = c(0, 1*(diff(Inc)!=0)))
# move = 1*(!all(Inc==Inc[1])))
, by = list(household_code)]
pan_yr <- pan_yr[,move_all := sum(move), by = list(household_code)]
# --------------- #
# Regression data #
mydata <- hh_exp
if(cpi.adj){
mydata$income_midvalue <- mydata$income_midvalue/mydata$cpi
mydata$stone_price <- mydata$stone_price/mydata$cpi
}
mydata$ln_income <- log(mydata$income_midvalue)
sum(mydata$dol == 0 )/nrow(mydata)
annual.week <- 26
mydata$week_income <- mydata$income_midvalue/annual.week
mydata$month <- factor(mydata$month)
mydata$ln_dol <- log(mydata$dol)
mydata$recession <- factor(mydata$recession)
# Add price
if(week.price){
tmp <- dcast(price_dat, scantrack_market_descr+biweek ~ channel_type, value.var = "bsk_price_paid_2004")
colnames(tmp) <- c("scantrack_market_descr", "biweek", paste("PRC_", gsub(" ", "_", fmt_name), sep=""))
mydata <- merge(mydata, tmp, by = c("scantrack_market_descr", "biweek"), all.x = T)
}else{
tmp <- dcast(price_dat, scantrack_market_descr+year ~ channel_type, value.var = "bsk_price_paid_2004")
colnames(tmp) <- c("scantrack_market_descr", "year", paste("PRC_", gsub(" ", "_", fmt_name), sep=""))
dim(mydata)
mydata <- merge(mydata, tmp, by = c("scantrack_market_descr", "year"), all.x = T)
}
dim(mydata)
mydata$year <- as.factor(mydata$year)
# Shopping incidence
tmp_dv <- paste("SHR_", gsub("\\s", "_", fmt_name), sep="")
# for(i in tmp_dv){
# mydata[,i] <- mydata[,i]*100
# }
sel <- paste("DOL_", gsub("\\s", "_", fmt_name), sep="")
sel1 <- gsub("DOL", "IC", sel)
for(i in 1:length(fmt_name)){
mydata[,sel1[i]] <- 1*(mydata[,sel[i]]>0)
}
# Add lagged purchases
mydata <- data.table(mydata)
setkeyv(mydata, c("household_code", "biweek"))
mydata <- mydata[,lag_dol := my_lag(dol), by = list(household_code)]
mydata <- data.frame(mydata)
# mydata <- subset(mydata, dol > 0)
# mydata <- subset(mydata, !is.na(lag_dol))
mydata$ln_income_low <- 1*(mydata$first_incomeg == "Low")*mydata$ln_income
mydata$ln_income_med <- 1*(mydata$first_incomeg == "Med")*mydata$ln_income
mydata$ln_income_high <- 1*(mydata$first_incomeg == "High")*mydata$ln_income
mydata$hh_year <- paste(mydata$household_code, mydata$year, sep = "*")
# Only keep relevant columns
mydata <- mydata[, c("household_code", "biweek","dol", "ln_dol",
paste("DOL_", gsub("\\s", "_", fmt_name), sep=""),
paste("SHR_", gsub("\\s", "_", fmt_name), sep=""),
paste("PRC_", gsub("\\s", "_", fmt_name), sep=""),
paste("IC_", gsub("\\s", "_", fmt_name), sep=""),
"first_incomeg", "ln_income", "ln_income_low", "ln_income_med", "ln_income_high", "week_income",
"year", "month", "lag_dol", "hh_year",
"stone_price", "famsize", "condo", "employed", "NumChild")]
cat("Number of observations with 0 expenditure =", sum(mydata$dol==0), ", or, ", round(sum(mydata$dol==0)/nrow(mydata), 4), ".\n")
############################################
# Single-equation fixed effect regressions #
############################################
#---------------------------------------#
# Set up DV and IV, regression models
dv_vec <- "ln_dol"
myfml <- list(Homogeneity = as.formula("y ~ ln_income + month + famsize + NumChild"),
HomoYear = as.formula("y ~ ln_income + year + month + famsize + NumChild"),
HomoPrice = as.formula("y ~ ln_income + year + month + stone_price + famsize + NumChild"),
Heterogeneity = as.formula("y ~ ln_income*first_incomeg + year + month + famsize + NumChild"),
HeteroYear = as.formula("y ~ ln_income*first_incomeg + month + famsize + NumChild"),
HeteroPrice = as.formula("y ~ ln_income*first_incomeg + year + month + stone_price + famsize + NumChild"))
# Run regressions
regrs <- data.frame()
prc <- proc.time()
sel <- mydata$dol > 0 # Only focus on positive expenditure
modn <- c("pooling", "between", "within")
reg.ls <- setNames(vector("list", length(myfml)*3), paste(rep(names(myfml), 3), modn, sep="*"))
for(j in 1:length(myfml)){
rm(list = intersect(ls(), "myfit"))
tmp <- as.formula(substitute(y ~ x, list(y = as.name(dv_vec), x = terms(myfml[[j]])[[3]])) )
if(!dv_vec %in% c("ln_dol", "dol")){
tmp <- update(tmp, . ~ . + lag_dol)
}
for(k in 1:length(modn)){
myfit <- plm(tmp, data = mydata[sel,], index = c("household_code","biweek"), model=modn[k])
tmpidx <- (j-1)*length(modn) + k
reg.ls[[tmpidx]] <- myfit
tmp2 <- summary(myfit)
tmp3 <- data.frame(model = names(myfml)[j], DV = dv_vec, method = modn[k], tmp2$coefficients)
tmp3$Var<- rownames(tmp3)
rownames(tmp3) <- NULL
regrs <- rbind(regrs, tmp3)
}
print(j)
}
# Compute cluster-robust se
# NOTE: between estimator does not have clustered-se
tmp.coef <- lapply(reg.ls, coeftest)
unc.se <- lapply(tmp.coef, function(x) x[, "Std. Error"])
sel <- grep("between", names(reg.ls))
cls.se1 <- vector("list", length(reg.ls))
for(i in 1:length(reg.ls)){
if(i %in% sel){
cls.se1[[i]] <- unc.se[[i]]
}else{
cls.se1[[i]] <- sqrt(diag(vcovHC.plm.new(reg.ls[[i]], method = "arellano", type = "HC1", cluster = "define", clustervec = mydata[sel,"household_code"])))
}
}
# Export to HTML
stargazer(reg.ls, type = "html", title = "Single-equation fixed effects regression",
align = TRUE, no.space = TRUE,
se = cls.se1, omit.stat = "f",
column.labels = rep(modn, length(myfml)),
omit = c("year", "month"), order=c("ln_income"),
covariate.labels = c("log(I)", "log(I)*Med", "log(I)*High", "Med", "High", "Stone price"),
add.lines = list(c("Year FE", rep(rep(c("N","Y","Y"), each=3),2)),
c("Month FE", rep("Y", length(reg.ls))),
c("HH FE", rep(c("N", "N", "Y"), length(myfml)))),
notes = c("se clustered over households"),
out = paste(plot.wd, "/", fname, "_se_", dv_vec, ".html", sep=""))
# Export to Latex
stargazer(reg.ls, type = "latex", title = "Single-equation fixed effects regression",
align = TRUE, no.space = TRUE,
se = cls.se1, omit.stat = "f",
column.labels = rep(modn, length(myfml)),
omit = c("year", "month"), order=c("ln_income"),
covariate.labels = c("log(I)", "log(I)*Med", "log(I)*High", "Med", "High", "Stone price"),
add.lines = list(c("Year FE", rep(rep(c("N","Y","Y"), each=3),2)),
c("Month FE", rep("Y", length(reg.ls))),
c("HH FE", rep(c("N", "N", "Y"), length(myfml)))),
notes = c("se clustered over households"),
out = paste(plot.wd, "/", fname, "_se_", dv_vec, ".tex", sep=""))
use.time <- proc.time() - prc
cat("Regressions for log(dol) finishes, using", use.time[3]/60, "min.\n")
#############################################################################
# Single-equation fixed effect regressions measureing propensity to consume #
#############################################################################
#---------------------------------------#
# Set up DV and IV, regression models
dv_vec <- "dol"
myfml <- list(Homogeneity = as.formula("y ~ week_income + month"),
HomoYear = as.formula("y ~ week_income + year + month"),
HomoPrice = as.formula("y ~ week_income + year + month + stone_price"),
Heterogeneity = as.formula("y ~ week_income*first_incomeg + month"),
HeteroYear = as.formula("y ~ week_income*first_incomeg + year + month"),
HeteroPrice = as.formula("y ~ week_income*first_incomeg + year + month + stone_price")
)
# Run regressions
regrs.ptc <- data.frame()
prc <- proc.time()
sel <- mydata$dol > 0
reg.linear.ls <- setNames(vector("list", length(myfml)*3), paste(rep(names(myfml), 3), modn, sep="*"))
for(j in 1:length(myfml)){
rm(list = "myfit")
tmp <- as.formula(substitute(y ~ x, list(y = as.name(dv_vec), x = terms(myfml[[j]])[[3]])) )
if(!dv_vec %in% c("ln_dol", "dol")){
tmp <- update(tmp, . ~ . + lag_dol)
}
for(k in 1:length(modn)){
myfit <- plm(tmp, data = mydata[sel,], index = c("household_code","biweek"), model=modn[k])
tmpidx <- (j-1)*length(modn) + k
reg.linear.ls[[tmpidx]] <- myfit
tmp2 <- summary(myfit)
tmp3 <- data.frame(model = names(myfml)[j], DV = dv_vec, method = modn[k], tmp2$coefficients)
tmp3$Var<- rownames(tmp3)
rownames(tmp3) <- NULL
regrs.ptc <- rbind(regrs.ptc, tmp3)
}
}
# Compute cluster-robust se
tmp.coef <- lapply(reg.linear.ls, coeftest)
unc.se <- lapply(tmp.coef, function(x) x[, "Std. Error"])
sel <- grep("between", names(reg.ls))
cls.se1 <- vector("list", length(reg.ls))
for(i in 1:length(reg.ls)){
if(i %in% sel){
cls.se1[[i]] <- unc.se[[i]]
}else{
cls.se1[[i]] <- sqrt(diag(vcovHC.plm.new(reg.linear.ls[[i]], method = "arellano", type = "HC1", cluster = "define", clustervec = mydata[sel,"household_code"])))
}
}
# Export estimation table
stargazer(reg.linear.ls, type = "html", title = "Single-equation fixed effects regression of expenditure on weekly income",
align = TRUE, no.space = TRUE, se = cls.se1, omit.stat = "f",
column.labels = rep(modn, length(myfml)),
omit = c("year", "month"), order=c("week_income"),
covariate.labels = c("I", "I*Med", "I*High", "Med", "High", "Stone price"),
add.lines = list(c("Year FE", rep(rep(c("N","Y","Y"), each=3),2)),
c("Month FE", rep("Y", length(reg.ls))),
c("HH FE", rep(c("N", "N", "Y"), length(myfml)))),
notes = c("se clustered over households"),
out = paste(plot.wd, "/", fname, "_septc_", dv_vec, ".html", sep=""))
stargazer(reg.linear.ls, type = "latex", title = "Single-equation fixed effects regression of expenditure on weekly income",
align = TRUE, no.space = TRUE, se = cls.se1, omit.stat = "f",
column.labels = rep(modn, length(myfml)),
omit = c("year", "month"), order=c("week_income"),
covariate.labels = c("I", "I*Med", "I*High", "Med", "High", "Stone price"),
add.lines = list(c("Year FE", rep(rep(c("N","Y","Y"), each=3),2)),
c("Month FE", rep("Y", length(reg.ls))),
c("HH FE", rep(c("N", "N", "Y"), length(myfml)))),
notes = c("se clustered over households"),
out = paste(plot.wd, "/", fname, "_septc_", dv_vec, ".tex", sep=""))
use.time <- proc.time() - prc
cat("Regressions for", dv_vec, "finishes, using", use.time[3]/60, "min.\n")
# Save results
rm(list = intersect(ls(), c("tmp", "tmp1", "tmp2", "tmp3", "tmp4", "use.time", "make_plot", "prc","tmp.coef", "tmp_dv",
"i", "j", "sel", "sel1", "myfml", "tmpidx")))
save.image(file = paste(plot.wd, "/", fname, "_", as.character(Sys.Date()), ".rdata", sep=""))
cat("This program is done.") | /main/income_effect/income_effect_reg_exp_between.R | no_license | Superet/Expenditure | R | false | false | 15,056 | r | # This script runs regressions to compare within-, between-, and pooled estimator of income effect on overall expenditure.
library(ggplot2)
library(reshape2)
library(data.table)
library(plm)
library(gridExtra)
library(scales)
library(systemfit)
library(stargazer)
library(gridExtra)
options(error = quote({dump.frames(to.file = TRUE)}))
# setwd("~/Documents/Research/Store switching/processed data")
# plot.wd <- '/Users/chaoqunchen/Desktop'
# source('~/Documents/Research/Store switching/Exercise/main/income_effect/plm_vcovHC.R')
# setwd("/home/brgordon/ccv103/Exercise/run")
# setwd("/kellogg/users/marketing/2661703/Expenditure")
# setwd("/sscc/home/c/ccv103/Exercise/run")
setwd("U:/Users/ccv103/Documents/Research/Store switching/run")
plot.wd <- paste(getwd(), "/results", sep="")
ww <- 6.5
ww1 <- 8.5
ar <- .8 #.6
week.price <- FALSE
cpi.adj <- TRUE
write2csv <- FALSE
make_plot <- TRUE
fname <- "expenditure_reg_btw"
if(cpi.adj) { fname <- paste(fname, "_cpi", sep="")}
if(week.price){ fname <- paste(fname, "_wkprc", sep="")}
# outxls <- paste(plot.wd, "/", fname, "_", gsub("-", "", as.character(Sys.Date())), ".xlsx", sep="")
# mywb <- createWorkbook()
# sht1 <- createSheet(mywb, "Regression")
# sht2 <- createSheet(mywb, "SUR")
if(week.price){
load("hh_biweek_exp_20150812.rdata")
}else{
load("hh_biweek_exp.rdata")
}
codebook <- read.csv("code_book.csv")
source("plm_vcovHC.R")
# Extract 5% random sample
# length(unique(hh_exp$household_code))
# sel <- sample(unique(hh_exp$household_code), .01*length(unique(hh_exp$household_code)) )
# hh_exp_save <- hh_exp
# hh_exp <- subset(hh_exp, household_code %in% sel)
#############
# Functions #
#############
my_forward <- function(x){
return(c(x[-1], NA))
}
my_lag <- function(x){
return(c(NA, x[-length(x)]))
}
mytransition <- function(x1, x2){
if(class(x1)!="factor" | class(x2) != "factor"){
x1 <- factor(x1)
x2 <- factor(x2)
}
onem <- diag(length(levels(x1)))
out <- table(x1, x2)
sel <- which(apply(out, 1, function(x) all(x==0)))
if(length(sel)>0) { out[sel,] <- onem[sel,] }
return(out/rowSums(out))
}
get_legend<-function(myggplot){
tmp <- ggplot_gtable(ggplot_build(myggplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)
}
#################
# Organize data #
#################
# Retail formats
fmt_name <- as.character(sort(unique(fmt_attr$channel_type)))
R <- length(fmt_name)
# Conver some date and factor variables
if(week.price){
# Segment households based on their initial income level
panelist <- data.table(hh_exp)
setkeyv(panelist, c("household_code","year","biweek"))
panelist <- panelist[,list(income = first_income[1], first_famsize = famsize[1]), by=list(household_code)]
tmp <- quantile(panelist$income, c(0, .33, .67, 1))
num_grp <- 3
panelist <- panelist[, first_incomeg := cut(panelist$income, tmp, labels = paste("T", 1:num_grp, sep=""), include.lowest = T)]
hh_exp <- merge(hh_exp, data.frame(panelist)[,c("household_code", "first_incomeg")], by = "household_code", all.x=T )
hh_exp$first_incomeg <- factor(hh_exp$first_incomeg, levels = c("T1", "T2", "T3"))
cat("Table of initial income distribution:\n"); print(table(panelist$first_incomeg)); cat("\n")
cat("Table of segments in the expenditure data:\n"); print(table(hh_exp$first_incomeg)); cat("\n")
}
hh_exp$month <- month(as.Date("2004-1-1", format="%Y-%m-%d") + 14*(hh_exp$biweek-1))
hh_exp$famsize <- factor(hh_exp$famsize, levels = c("Single","Two", "Three+"))
hh_exp$condo <- factor(hh_exp$condo, levels = c(0, 1))
hh_exp$employed <- factor(hh_exp$employed, levels = c(0, 1))
hh_exp$first_incomeg <- as.character(hh_exp$first_incomeg)
hh_exp$first_incomeg <- factor(hh_exp$first_incomeg, levels = paste("T", 1:3, sep=""), labels = c("Low", "Med", "High"))
cat("Table of segments in the expenditure data:\n"); print(table(hh_exp$first_incomeg)); cat("\n")
# Compute expenditure share
sel <- paste("DOL_", gsub("\\s", "_", fmt_name), sep="")
sel1 <- gsub("DOL", "SHR", sel)
for(i in 1:length(fmt_name)){
hh_exp[,sel1[i]] <- hh_exp[,sel[i]]/hh_exp$dol
}
# ------------------- #
# Household-year data #
pan_yr <- data.table(hh_exp)
pan_yr <- pan_yr[,list(Income = unique(income_midvalue), Inc = unique(income_real)), by = list(first_incomeg, household_code, year, cpi)]
setkeyv(pan_yr, c("household_code", "year"))
pan_yr <- pan_yr[, ':='(ln_income = log(Income), cpi_adj_income = Income/cpi, recession = 1*(year >= 2008))]
pan_yr <- pan_yr[,':='(year_id= year - year[1] + 1, tenure = length(year), move = c(0, 1*(diff(Inc)!=0)))
# move = 1*(!all(Inc==Inc[1])))
, by = list(household_code)]
pan_yr <- pan_yr[,move_all := sum(move), by = list(household_code)]
# --------------- #
# Regression data #
mydata <- hh_exp
if(cpi.adj){
mydata$income_midvalue <- mydata$income_midvalue/mydata$cpi
mydata$stone_price <- mydata$stone_price/mydata$cpi
}
mydata$ln_income <- log(mydata$income_midvalue)
sum(mydata$dol == 0 )/nrow(mydata)
annual.week <- 26
mydata$week_income <- mydata$income_midvalue/annual.week
mydata$month <- factor(mydata$month)
mydata$ln_dol <- log(mydata$dol)
mydata$recession <- factor(mydata$recession)
# Add price
if(week.price){
tmp <- dcast(price_dat, scantrack_market_descr+biweek ~ channel_type, value.var = "bsk_price_paid_2004")
colnames(tmp) <- c("scantrack_market_descr", "biweek", paste("PRC_", gsub(" ", "_", fmt_name), sep=""))
mydata <- merge(mydata, tmp, by = c("scantrack_market_descr", "biweek"), all.x = T)
}else{
tmp <- dcast(price_dat, scantrack_market_descr+year ~ channel_type, value.var = "bsk_price_paid_2004")
colnames(tmp) <- c("scantrack_market_descr", "year", paste("PRC_", gsub(" ", "_", fmt_name), sep=""))
dim(mydata)
mydata <- merge(mydata, tmp, by = c("scantrack_market_descr", "year"), all.x = T)
}
dim(mydata)
mydata$year <- as.factor(mydata$year)
# Shopping incidence
tmp_dv <- paste("SHR_", gsub("\\s", "_", fmt_name), sep="")
# for(i in tmp_dv){
# mydata[,i] <- mydata[,i]*100
# }
sel <- paste("DOL_", gsub("\\s", "_", fmt_name), sep="")
sel1 <- gsub("DOL", "IC", sel)
for(i in 1:length(fmt_name)){
mydata[,sel1[i]] <- 1*(mydata[,sel[i]]>0)
}
# Add lagged purchases
mydata <- data.table(mydata)
setkeyv(mydata, c("household_code", "biweek"))
mydata <- mydata[,lag_dol := my_lag(dol), by = list(household_code)]
mydata <- data.frame(mydata)
# mydata <- subset(mydata, dol > 0)
# mydata <- subset(mydata, !is.na(lag_dol))
mydata$ln_income_low <- 1*(mydata$first_incomeg == "Low")*mydata$ln_income
mydata$ln_income_med <- 1*(mydata$first_incomeg == "Med")*mydata$ln_income
mydata$ln_income_high <- 1*(mydata$first_incomeg == "High")*mydata$ln_income
mydata$hh_year <- paste(mydata$household_code, mydata$year, sep = "*")
# Only keep relevant columns
mydata <- mydata[, c("household_code", "biweek","dol", "ln_dol",
paste("DOL_", gsub("\\s", "_", fmt_name), sep=""),
paste("SHR_", gsub("\\s", "_", fmt_name), sep=""),
paste("PRC_", gsub("\\s", "_", fmt_name), sep=""),
paste("IC_", gsub("\\s", "_", fmt_name), sep=""),
"first_incomeg", "ln_income", "ln_income_low", "ln_income_med", "ln_income_high", "week_income",
"year", "month", "lag_dol", "hh_year",
"stone_price", "famsize", "condo", "employed", "NumChild")]
cat("Number of observations with 0 expenditure =", sum(mydata$dol==0), ", or, ", round(sum(mydata$dol==0)/nrow(mydata), 4), ".\n")
############################################
# Single-equation fixed effect regressions #
############################################
#---------------------------------------#
# Set up DV and IV, regression models
dv_vec <- "ln_dol"
myfml <- list(Homogeneity = as.formula("y ~ ln_income + month + famsize + NumChild"),
HomoYear = as.formula("y ~ ln_income + year + month + famsize + NumChild"),
HomoPrice = as.formula("y ~ ln_income + year + month + stone_price + famsize + NumChild"),
Heterogeneity = as.formula("y ~ ln_income*first_incomeg + year + month + famsize + NumChild"),
HeteroYear = as.formula("y ~ ln_income*first_incomeg + month + famsize + NumChild"),
HeteroPrice = as.formula("y ~ ln_income*first_incomeg + year + month + stone_price + famsize + NumChild"))
# Run regressions
regrs <- data.frame()
prc <- proc.time()
sel <- mydata$dol > 0 # Only focus on positive expenditure
modn <- c("pooling", "between", "within")
reg.ls <- setNames(vector("list", length(myfml)*3), paste(rep(names(myfml), 3), modn, sep="*"))
for(j in 1:length(myfml)){
rm(list = intersect(ls(), "myfit"))
tmp <- as.formula(substitute(y ~ x, list(y = as.name(dv_vec), x = terms(myfml[[j]])[[3]])) )
if(!dv_vec %in% c("ln_dol", "dol")){
tmp <- update(tmp, . ~ . + lag_dol)
}
for(k in 1:length(modn)){
myfit <- plm(tmp, data = mydata[sel,], index = c("household_code","biweek"), model=modn[k])
tmpidx <- (j-1)*length(modn) + k
reg.ls[[tmpidx]] <- myfit
tmp2 <- summary(myfit)
tmp3 <- data.frame(model = names(myfml)[j], DV = dv_vec, method = modn[k], tmp2$coefficients)
tmp3$Var<- rownames(tmp3)
rownames(tmp3) <- NULL
regrs <- rbind(regrs, tmp3)
}
print(j)
}
# Compute cluster-robust se
# NOTE: between estimator does not have clustered-se
tmp.coef <- lapply(reg.ls, coeftest)
unc.se <- lapply(tmp.coef, function(x) x[, "Std. Error"])
sel <- grep("between", names(reg.ls))
cls.se1 <- vector("list", length(reg.ls))
for(i in 1:length(reg.ls)){
if(i %in% sel){
cls.se1[[i]] <- unc.se[[i]]
}else{
cls.se1[[i]] <- sqrt(diag(vcovHC.plm.new(reg.ls[[i]], method = "arellano", type = "HC1", cluster = "define", clustervec = mydata[sel,"household_code"])))
}
}
# Export to HTML
stargazer(reg.ls, type = "html", title = "Single-equation fixed effects regression",
align = TRUE, no.space = TRUE,
se = cls.se1, omit.stat = "f",
column.labels = rep(modn, length(myfml)),
omit = c("year", "month"), order=c("ln_income"),
covariate.labels = c("log(I)", "log(I)*Med", "log(I)*High", "Med", "High", "Stone price"),
add.lines = list(c("Year FE", rep(rep(c("N","Y","Y"), each=3),2)),
c("Month FE", rep("Y", length(reg.ls))),
c("HH FE", rep(c("N", "N", "Y"), length(myfml)))),
notes = c("se clustered over households"),
out = paste(plot.wd, "/", fname, "_se_", dv_vec, ".html", sep=""))
# Export to Latex
stargazer(reg.ls, type = "latex", title = "Single-equation fixed effects regression",
align = TRUE, no.space = TRUE,
se = cls.se1, omit.stat = "f",
column.labels = rep(modn, length(myfml)),
omit = c("year", "month"), order=c("ln_income"),
covariate.labels = c("log(I)", "log(I)*Med", "log(I)*High", "Med", "High", "Stone price"),
add.lines = list(c("Year FE", rep(rep(c("N","Y","Y"), each=3),2)),
c("Month FE", rep("Y", length(reg.ls))),
c("HH FE", rep(c("N", "N", "Y"), length(myfml)))),
notes = c("se clustered over households"),
out = paste(plot.wd, "/", fname, "_se_", dv_vec, ".tex", sep=""))
use.time <- proc.time() - prc
cat("Regressions for log(dol) finishes, using", use.time[3]/60, "min.\n")
#############################################################################
# Single-equation fixed effect regressions measureing propensity to consume #
#############################################################################
#---------------------------------------#
# Set up DV and IV, regression models
dv_vec <- "dol"
myfml <- list(Homogeneity = as.formula("y ~ week_income + month"),
HomoYear = as.formula("y ~ week_income + year + month"),
HomoPrice = as.formula("y ~ week_income + year + month + stone_price"),
Heterogeneity = as.formula("y ~ week_income*first_incomeg + month"),
HeteroYear = as.formula("y ~ week_income*first_incomeg + year + month"),
HeteroPrice = as.formula("y ~ week_income*first_incomeg + year + month + stone_price")
)
# Run regressions
regrs.ptc <- data.frame()
prc <- proc.time()
sel <- mydata$dol > 0
reg.linear.ls <- setNames(vector("list", length(myfml)*3), paste(rep(names(myfml), 3), modn, sep="*"))
for(j in 1:length(myfml)){
rm(list = "myfit")
tmp <- as.formula(substitute(y ~ x, list(y = as.name(dv_vec), x = terms(myfml[[j]])[[3]])) )
if(!dv_vec %in% c("ln_dol", "dol")){
tmp <- update(tmp, . ~ . + lag_dol)
}
for(k in 1:length(modn)){
myfit <- plm(tmp, data = mydata[sel,], index = c("household_code","biweek"), model=modn[k])
tmpidx <- (j-1)*length(modn) + k
reg.linear.ls[[tmpidx]] <- myfit
tmp2 <- summary(myfit)
tmp3 <- data.frame(model = names(myfml)[j], DV = dv_vec, method = modn[k], tmp2$coefficients)
tmp3$Var<- rownames(tmp3)
rownames(tmp3) <- NULL
regrs.ptc <- rbind(regrs.ptc, tmp3)
}
}
# Compute cluster-robust se
tmp.coef <- lapply(reg.linear.ls, coeftest)
unc.se <- lapply(tmp.coef, function(x) x[, "Std. Error"])
sel <- grep("between", names(reg.ls))
cls.se1 <- vector("list", length(reg.ls))
for(i in 1:length(reg.ls)){
if(i %in% sel){
cls.se1[[i]] <- unc.se[[i]]
}else{
cls.se1[[i]] <- sqrt(diag(vcovHC.plm.new(reg.linear.ls[[i]], method = "arellano", type = "HC1", cluster = "define", clustervec = mydata[sel,"household_code"])))
}
}
# Export estimation table
stargazer(reg.linear.ls, type = "html", title = "Single-equation fixed effects regression of expenditure on weekly income",
align = TRUE, no.space = TRUE, se = cls.se1, omit.stat = "f",
column.labels = rep(modn, length(myfml)),
omit = c("year", "month"), order=c("week_income"),
covariate.labels = c("I", "I*Med", "I*High", "Med", "High", "Stone price"),
add.lines = list(c("Year FE", rep(rep(c("N","Y","Y"), each=3),2)),
c("Month FE", rep("Y", length(reg.ls))),
c("HH FE", rep(c("N", "N", "Y"), length(myfml)))),
notes = c("se clustered over households"),
out = paste(plot.wd, "/", fname, "_septc_", dv_vec, ".html", sep=""))
stargazer(reg.linear.ls, type = "latex", title = "Single-equation fixed effects regression of expenditure on weekly income",
align = TRUE, no.space = TRUE, se = cls.se1, omit.stat = "f",
column.labels = rep(modn, length(myfml)),
omit = c("year", "month"), order=c("week_income"),
covariate.labels = c("I", "I*Med", "I*High", "Med", "High", "Stone price"),
add.lines = list(c("Year FE", rep(rep(c("N","Y","Y"), each=3),2)),
c("Month FE", rep("Y", length(reg.ls))),
c("HH FE", rep(c("N", "N", "Y"), length(myfml)))),
notes = c("se clustered over households"),
out = paste(plot.wd, "/", fname, "_septc_", dv_vec, ".tex", sep=""))
use.time <- proc.time() - prc
cat("Regressions for", dv_vec, "finishes, using", use.time[3]/60, "min.\n")
# Save results
rm(list = intersect(ls(), c("tmp", "tmp1", "tmp2", "tmp3", "tmp4", "use.time", "make_plot", "prc","tmp.coef", "tmp_dv",
"i", "j", "sel", "sel1", "myfml", "tmpidx")))
save.image(file = paste(plot.wd, "/", fname, "_", as.character(Sys.Date()), ".rdata", sep=""))
cat("This program is done.") |
#######################################################
# Script models optimisation
#
# By Menyssa CHERIFA
# GITHUB : https://github.com/afroinshape/AHE.git
#######################################################
#######################################
# # Lecture
#######################################
dest_r <- "/home/mcherifa/Mimic/scripts/R/toolbox/"
dest_d <- "/home/mcherifa/Mimic/data/clean/"
source(paste0(dest_r,"packages.R"))
source(paste0(dest_r,"fonctions.R"))
# Chargement des données
df <- readRDS(paste0(dest_d,"fichier_wide_periode.rds"))
# Management
df <- df[which(df$eevent == 0),]
df <- subset(df, select = c( - id, - periode, - identif.x,- identif.y,-eevent))
# Variables factors
df[,c(1:5)] <- data.frame(lapply(df[,c(1:5)], as.factor))
# Premières lignes du data
# head(df)
#######################################
# Deep learning optimisation
#######################################
# 1 - Function f( size, decay)
deep_optimisation <- function(i,j){
cl <- makeCluster(detectCores()-1)
registerDoParallel(cl)
set.seed(graine)
rn <- nnet::nnet(event ~ ., data = train ,size = i,
decay = j ,MaxNWts = 100000, maxit = 100, trace = F)
AUC <- pROC::roc(as.numeric(test$event),
as.numeric(predict(rn,newdata = test,type = "raw")))$auc
stopCluster(cl)
return(AUC)
}
# 2 - Function f(maxit)
deep_optimisation_maxit <- function(i){
cl <- makeCluster(detectCores()-1)
registerDoParallel(cl)
set.seed(graine)
rn <- nnet::nnet(event ~ ., data = train ,size = 13,
decay = 1.6 ,MaxNWts = 100000, maxit = i, trace = F)
AUC <- pROC::roc(as.numeric(test$event),
as.numeric(predict(rn,newdata = test,type = "raw")))$auc
stopCluster(cl)
return(AUC)
}
############ 1
i <- seq(1, 20,by = 1)
j <- seq(0, 5,by = 0.1 )
beta <- foreach(siz = i,.combine = 'cbind') %:%
foreach(dec = j, .combine ='c') %dopar% {
deep_optimisation(siz,dec)
}
size <- i[which(beta == max(beta), arr.ind = T)[1]]
decay <- j[which(beta == max(beta), arr.ind = T)[2]]
print(size) # 13
print(decay) # 1.6
############ 2
#i <- seq(100, 1000,by = 50)
#beta <- foreach(iter = i,.combine = 'c') %dopar% {
# deep_optimisation_maxit(iter)
#}
| /scripts/R/toolbox/optimisation.R | no_license | mcherifa/MIMIC-II | R | false | false | 2,312 | r | #######################################################
# Script models optimisation
#
# By Menyssa CHERIFA
# GITHUB : https://github.com/afroinshape/AHE.git
#######################################################
#######################################
# # Lecture
#######################################
dest_r <- "/home/mcherifa/Mimic/scripts/R/toolbox/"
dest_d <- "/home/mcherifa/Mimic/data/clean/"
source(paste0(dest_r,"packages.R"))
source(paste0(dest_r,"fonctions.R"))
# Chargement des données
df <- readRDS(paste0(dest_d,"fichier_wide_periode.rds"))
# Management
df <- df[which(df$eevent == 0),]
df <- subset(df, select = c( - id, - periode, - identif.x,- identif.y,-eevent))
# Variables factors
df[,c(1:5)] <- data.frame(lapply(df[,c(1:5)], as.factor))
# Premières lignes du data
# head(df)
#######################################
# Deep learning optimisation
#######################################
# 1 - Function f( size, decay)
deep_optimisation <- function(i,j){
cl <- makeCluster(detectCores()-1)
registerDoParallel(cl)
set.seed(graine)
rn <- nnet::nnet(event ~ ., data = train ,size = i,
decay = j ,MaxNWts = 100000, maxit = 100, trace = F)
AUC <- pROC::roc(as.numeric(test$event),
as.numeric(predict(rn,newdata = test,type = "raw")))$auc
stopCluster(cl)
return(AUC)
}
# 2 - Function f(maxit)
deep_optimisation_maxit <- function(i){
cl <- makeCluster(detectCores()-1)
registerDoParallel(cl)
set.seed(graine)
rn <- nnet::nnet(event ~ ., data = train ,size = 13,
decay = 1.6 ,MaxNWts = 100000, maxit = i, trace = F)
AUC <- pROC::roc(as.numeric(test$event),
as.numeric(predict(rn,newdata = test,type = "raw")))$auc
stopCluster(cl)
return(AUC)
}
############ 1
i <- seq(1, 20,by = 1)
j <- seq(0, 5,by = 0.1 )
beta <- foreach(siz = i,.combine = 'cbind') %:%
foreach(dec = j, .combine ='c') %dopar% {
deep_optimisation(siz,dec)
}
size <- i[which(beta == max(beta), arr.ind = T)[1]]
decay <- j[which(beta == max(beta), arr.ind = T)[2]]
print(size) # 13
print(decay) # 1.6
############ 2
#i <- seq(100, 1000,by = 50)
#beta <- foreach(iter = i,.combine = 'c') %dopar% {
# deep_optimisation_maxit(iter)
#}
|
#' Grade Level Readability by Grouping Variables
#'
#' Calculate the Flesch Kincaid, Gunning Fog Index, Coleman Liau, SMOG,
#' Automated Readability Index and an average of the 5 readability scores.
#'
#' @param x A character vector.
#' @param grouping.var The grouping variable(s). Takes a single grouping
#' variable or a list of 1 or more grouping variables.
#' @param order.by.readability logical. If \code{TRUE} orders the results
#' descending by readability score.
#' @param group.names A vector of names that corresponds to group. Generally
#' for internal use.
#' @param \ldots ignored
#' @return Returns a \code{\link[base]{data.frame}}
#' (\code{\link[data.table]{data.table}}) readability scores.
#' @export
#' @references Coleman, M., & Liau, T. L. (1975). A computer readability formula
#' designed for machine scoring. Journal of Applied Psychology, Vol. 60,
#' pp. 283-284.
#'
#' Flesch R. (1948). A new readability yardstick. Journal of Applied Psychology.
#' Vol. 32(3), pp. 221-233. doi: 10.1037/h0057532.
#'
#' Gunning, Robert (1952). The Technique of Clear Writing. McGraw-Hill. pp. 36-37.
#'
#' McLaughlin, G. H. (1969). SMOG Grading: A New Readability Formula.
#' Journal of Reading, Vol. 12(8), pp. 639-646.
#'
#' Smith, E. A. & Senter, R. J. (1967) Automated readability index.
#' Technical Report AMRLTR-66-220, University of Cincinnati, Cincinnati, Ohio.
#' @keywords readability, Automated Readability Index, Coleman Liau, SMOG,
#' Flesch-Kincaid, Fry, Linsear Write
#' @export
#' @importFrom data.table :=
#' @examples
#' \dontrun{
#' library(syllable)
#'
#' (x1 <- with(presidential_debates_2012, readability(dialogue, NULL)))
#'
#' (x2 <- with(presidential_debates_2012, readability(dialogue, list(person, time))))
#' plot(x2)
#'
#' (x2b <- with(presidential_debates_2012, readability(dialogue, list(person, time),
#' order.by.readability = FALSE)))
#'
#' (x3 <- with(presidential_debates_2012, readability(dialogue, TRUE)))
#' }
readability <- function(x, grouping.var, order.by.readability = TRUE, group.names, ...){
n.sents <- n.words <- n.complexes <- n.polys <- n.chars <- Flesch_Kincaid <-
Gunning_Fog_Index <- Coleman_Liau <- SMOG <- Automated_Readability_Index <-
Average_Grade_Level <- n.sylls <- NULL
if(is.null(grouping.var)) {
G <- "all"
grouping <- rep("all", length(x))
} else {
if (isTRUE(grouping.var)) {
G <- "id"
grouping <- seq_along(x)
} else {
if (is.list(grouping.var) & length(grouping.var) > 1) {
m <- unlist(as.character(substitute(grouping.var))[-1])
G <- sapply(strsplit(m, "$", fixed=TRUE), function(x) {
x[length(x)]
}
)
grouping <- grouping.var
} else {
G <- as.character(substitute(grouping.var))
G <- G[length(G)]
grouping <- unlist(grouping.var)
}
}
}
if(!missing(group.names)) {
G <- group.names
}
y <- syllable::readability_word_stats_by(x, grouping, group.names = G)
grouping <- attributes(y)[["groups"]]
out <- y[, list(
Flesch_Kincaid = flesch_kincaid_(n.words, n.sents, n.sylls),
Gunning_Fog_Index = gunning_fog_(n.words, n.sents, n.complexes),
Coleman_Liau = coleman_liau_(n.words, n.sents, n.chars),
SMOG = smog_(n.sents, n.polys),
Automated_Readability_Index = automated_readability_index_(n.words, n.sents, n.chars)
), by = grouping][, list(
Average_Grade_Level = mean(c(Flesch_Kincaid, Gunning_Fog_Index, Coleman_Liau, SMOG, Automated_Readability_Index), na.rm=TRUE)
), by = c(
grouping, "Flesch_Kincaid", "Gunning_Fog_Index", "Coleman_Liau", "SMOG", "Automated_Readability_Index"
)]
if (isTRUE(order.by.readability)){
data.table::setorder(out, -Average_Grade_Level)
}
class(out) <- unique(c("readability", class(out)))
attributes(out)[["groups"]] <- grouping
out
}
#' Plots a readability Object
#'
#' Plots a readability object
#'
#' @param x A \code{readability} object.
#' @param \ldots ignored.
#' @method plot readability
#' @export
plot.readability <- function(x, ...){
Value <- NULL
x[["grouping.var"]] <- apply(x[, attributes(x)[["groups"]], with = FALSE], 1, paste, collapse = ".")
x[["grouping.var"]] <- factor(x[["grouping.var"]], levels = rev(x[["grouping.var"]]))
x <- x[, attributes(x)[["groups"]]:=NULL]
y <- tidyr::gather_(x, "Measure", "Value", c("Flesch_Kincaid", "Gunning_Fog_Index",
"Coleman_Liau", "SMOG", "Automated_Readability_Index"))
y[["Measure"]] <- gsub("_", " ", y[["Measure"]])
data.table::setDT(y)
center_dat <- y[, list(upper = mean(Value) + SE(Value), lower = mean(Value) - SE(Value),
means = mean(Value)), keyby = "grouping.var"]
nms <- gsub("(^|[[:space:]])([[:alpha:]])", "\\1\\U\\2", attributes(x)[["groups"]], perl=TRUE)
xaxis <- floor(min(y[["Value"]])):ceiling(max(y[["Value"]]))
ggplot2::ggplot(y, ggplot2::aes_string(y = "grouping.var")) +
ggplot2::geom_vline(xintercept = mean(center_dat[["means"]]), size=.75, alpha = .25, linetype="dashed") +
# ggplot2::geom_point(ggplot2::aes_string(color = "Measure", x = "Value"), size=1.4) +
ggplot2::geom_point(ggplot2::aes_string(color = "Measure", x = "Value"), size=2, shape=1) +
ggplot2::geom_errorbarh(data = center_dat, size=.75, alpha=.4,
ggplot2::aes_string(x = "means", xmin="upper", xmax="lower"), height = .3) +
ggplot2::geom_point(data = center_dat, ggplot2::aes_string(x = "means"), alpha = .5, shape=15, size=3) +
ggplot2::geom_point(data = center_dat, ggplot2::aes_string(x = "means"), size=1) +
ggplot2::scale_x_continuous(breaks = xaxis) +
ggplot2::ylab(paste(nms, collapse = " & ")) +
ggplot2::xlab("Grade Level") +
ggplot2::theme_bw() +
ggplot2::scale_color_discrete(name="Readability\nScore")
}
#' Prints a readability Object
#'
#' Prints a readability object
#'
#' @param x A \code{readability} object.
#' @param digits The number of digits to print.
#' @param \ldots ignored.
#' @method print readability
#' @export
print.readability <- function(x, digits = 1, ...){
key_id <- NULL
colord <-colnames(x)
cols <- c("Flesch_Kincaid", "Gunning_Fog_Index", "Coleman_Liau",
"SMOG", "Automated_Readability_Index", "Average_Grade_Level")
x[["key_id"]] <- 1:nrow(x)
y <- tidyr::gather_(x, "measure", "value", cols)
y[["value"]] <- digit_format(y[["value"]], digits)
y <- tidyr::spread_(y, "measure", "value")
data.table::setDT(y)
y <- y[order(key_id)]
y[, "key_id"] <- NULL
data.table::setcolorder(y, colord)
print(y)
}
| /R/readability.R | no_license | ctzn-vishal/readability | R | false | false | 6,865 | r | #' Grade Level Readability by Grouping Variables
#'
#' Calculate the Flesch Kincaid, Gunning Fog Index, Coleman Liau, SMOG,
#' Automated Readability Index and an average of the 5 readability scores.
#'
#' @param x A character vector.
#' @param grouping.var The grouping variable(s). Takes a single grouping
#' variable or a list of 1 or more grouping variables.
#' @param order.by.readability logical. If \code{TRUE} orders the results
#' descending by readability score.
#' @param group.names A vector of names that corresponds to group. Generally
#' for internal use.
#' @param \ldots ignored
#' @return Returns a \code{\link[base]{data.frame}}
#' (\code{\link[data.table]{data.table}}) readability scores.
#' @export
#' @references Coleman, M., & Liau, T. L. (1975). A computer readability formula
#' designed for machine scoring. Journal of Applied Psychology, Vol. 60,
#' pp. 283-284.
#'
#' Flesch R. (1948). A new readability yardstick. Journal of Applied Psychology.
#' Vol. 32(3), pp. 221-233. doi: 10.1037/h0057532.
#'
#' Gunning, Robert (1952). The Technique of Clear Writing. McGraw-Hill. pp. 36-37.
#'
#' McLaughlin, G. H. (1969). SMOG Grading: A New Readability Formula.
#' Journal of Reading, Vol. 12(8), pp. 639-646.
#'
#' Smith, E. A. & Senter, R. J. (1967) Automated readability index.
#' Technical Report AMRLTR-66-220, University of Cincinnati, Cincinnati, Ohio.
#' @keywords readability, Automated Readability Index, Coleman Liau, SMOG,
#' Flesch-Kincaid, Fry, Linsear Write
#' @export
#' @importFrom data.table :=
#' @examples
#' \dontrun{
#' library(syllable)
#'
#' (x1 <- with(presidential_debates_2012, readability(dialogue, NULL)))
#'
#' (x2 <- with(presidential_debates_2012, readability(dialogue, list(person, time))))
#' plot(x2)
#'
#' (x2b <- with(presidential_debates_2012, readability(dialogue, list(person, time),
#' order.by.readability = FALSE)))
#'
#' (x3 <- with(presidential_debates_2012, readability(dialogue, TRUE)))
#' }
readability <- function(x, grouping.var, order.by.readability = TRUE, group.names, ...){
n.sents <- n.words <- n.complexes <- n.polys <- n.chars <- Flesch_Kincaid <-
Gunning_Fog_Index <- Coleman_Liau <- SMOG <- Automated_Readability_Index <-
Average_Grade_Level <- n.sylls <- NULL
if(is.null(grouping.var)) {
G <- "all"
grouping <- rep("all", length(x))
} else {
if (isTRUE(grouping.var)) {
G <- "id"
grouping <- seq_along(x)
} else {
if (is.list(grouping.var) & length(grouping.var) > 1) {
m <- unlist(as.character(substitute(grouping.var))[-1])
G <- sapply(strsplit(m, "$", fixed=TRUE), function(x) {
x[length(x)]
}
)
grouping <- grouping.var
} else {
G <- as.character(substitute(grouping.var))
G <- G[length(G)]
grouping <- unlist(grouping.var)
}
}
}
if(!missing(group.names)) {
G <- group.names
}
y <- syllable::readability_word_stats_by(x, grouping, group.names = G)
grouping <- attributes(y)[["groups"]]
out <- y[, list(
Flesch_Kincaid = flesch_kincaid_(n.words, n.sents, n.sylls),
Gunning_Fog_Index = gunning_fog_(n.words, n.sents, n.complexes),
Coleman_Liau = coleman_liau_(n.words, n.sents, n.chars),
SMOG = smog_(n.sents, n.polys),
Automated_Readability_Index = automated_readability_index_(n.words, n.sents, n.chars)
), by = grouping][, list(
Average_Grade_Level = mean(c(Flesch_Kincaid, Gunning_Fog_Index, Coleman_Liau, SMOG, Automated_Readability_Index), na.rm=TRUE)
), by = c(
grouping, "Flesch_Kincaid", "Gunning_Fog_Index", "Coleman_Liau", "SMOG", "Automated_Readability_Index"
)]
if (isTRUE(order.by.readability)){
data.table::setorder(out, -Average_Grade_Level)
}
class(out) <- unique(c("readability", class(out)))
attributes(out)[["groups"]] <- grouping
out
}
#' Plots a readability Object
#'
#' Plots a readability object
#'
#' @param x A \code{readability} object.
#' @param \ldots ignored.
#' @method plot readability
#' @export
plot.readability <- function(x, ...){
Value <- NULL
x[["grouping.var"]] <- apply(x[, attributes(x)[["groups"]], with = FALSE], 1, paste, collapse = ".")
x[["grouping.var"]] <- factor(x[["grouping.var"]], levels = rev(x[["grouping.var"]]))
x <- x[, attributes(x)[["groups"]]:=NULL]
y <- tidyr::gather_(x, "Measure", "Value", c("Flesch_Kincaid", "Gunning_Fog_Index",
"Coleman_Liau", "SMOG", "Automated_Readability_Index"))
y[["Measure"]] <- gsub("_", " ", y[["Measure"]])
data.table::setDT(y)
center_dat <- y[, list(upper = mean(Value) + SE(Value), lower = mean(Value) - SE(Value),
means = mean(Value)), keyby = "grouping.var"]
nms <- gsub("(^|[[:space:]])([[:alpha:]])", "\\1\\U\\2", attributes(x)[["groups"]], perl=TRUE)
xaxis <- floor(min(y[["Value"]])):ceiling(max(y[["Value"]]))
ggplot2::ggplot(y, ggplot2::aes_string(y = "grouping.var")) +
ggplot2::geom_vline(xintercept = mean(center_dat[["means"]]), size=.75, alpha = .25, linetype="dashed") +
# ggplot2::geom_point(ggplot2::aes_string(color = "Measure", x = "Value"), size=1.4) +
ggplot2::geom_point(ggplot2::aes_string(color = "Measure", x = "Value"), size=2, shape=1) +
ggplot2::geom_errorbarh(data = center_dat, size=.75, alpha=.4,
ggplot2::aes_string(x = "means", xmin="upper", xmax="lower"), height = .3) +
ggplot2::geom_point(data = center_dat, ggplot2::aes_string(x = "means"), alpha = .5, shape=15, size=3) +
ggplot2::geom_point(data = center_dat, ggplot2::aes_string(x = "means"), size=1) +
ggplot2::scale_x_continuous(breaks = xaxis) +
ggplot2::ylab(paste(nms, collapse = " & ")) +
ggplot2::xlab("Grade Level") +
ggplot2::theme_bw() +
ggplot2::scale_color_discrete(name="Readability\nScore")
}
#' Prints a readability Object
#'
#' Prints a readability object
#'
#' @param x A \code{readability} object.
#' @param digits The number of digits to print.
#' @param \ldots ignored.
#' @method print readability
#' @export
print.readability <- function(x, digits = 1, ...){
key_id <- NULL
colord <-colnames(x)
cols <- c("Flesch_Kincaid", "Gunning_Fog_Index", "Coleman_Liau",
"SMOG", "Automated_Readability_Index", "Average_Grade_Level")
x[["key_id"]] <- 1:nrow(x)
y <- tidyr::gather_(x, "measure", "value", cols)
y[["value"]] <- digit_format(y[["value"]], digits)
y <- tidyr::spread_(y, "measure", "value")
data.table::setDT(y)
y <- y[order(key_id)]
y[, "key_id"] <- NULL
data.table::setcolorder(y, colord)
print(y)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DataDesc.R
\docType{data}
\name{data.benchmark}
\alias{data.benchmark}
\title{MiRNA Sequencing Benchmark Data}
\format{
A data frame with 1033 rows and 54 columns. Here are the examples of the column and row naming rule:
\describe{
\item{MXF2516_D13}{One sample belonging to MXF and library D with sample ID MXF2516 and library ID D13.}
\item{hsa-let-7a-2*}{Gene ID.}
}
}
\usage{
data.benchmark
}
\description{
Myxofibrosarcoma (MXF) and pleomorphic malignant fibrous histiocytoma (PMFH) are the two most common and aggressive subtypes of genetically complex soft tissue sarcoma.
This dataset includes three libraries used for sequencing 54 individual tumor samples. Library preparation and read capture were each processed by a single experienced technician in one run.
}
\keyword{datasets}
| /man/data.benchmark.Rd | no_license | LXQin/PRECISION.seq.DATA | R | false | true | 874 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DataDesc.R
\docType{data}
\name{data.benchmark}
\alias{data.benchmark}
\title{MiRNA Sequencing Benchmark Data}
\format{
A data frame with 1033 rows and 54 columns. Here are the examples of the column and row naming rule:
\describe{
\item{MXF2516_D13}{One sample belonging to MXF and library D with sample ID MXF2516 and library ID D13.}
\item{hsa-let-7a-2*}{Gene ID.}
}
}
\usage{
data.benchmark
}
\description{
Myxofibrosarcoma (MXF) and pleomorphic malignant fibrous histiocytoma (PMFH) are the two most common and aggressive subtypes of genetically complex soft tissue sarcoma.
This dataset includes three libraries used for sequencing 54 individual tumor samples. Library preparation and read capture were each processed by a single experienced technician in one run.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitR-package.r
\name{mcmcSeitlTheta2}
\alias{mcmcSeitlTheta2}
\title{Markov-chain Monte Carlo (MCMC) outputs of a fit of the SEITL model to the
influenza outbreak on Tristan da Cunha (long run)}
\format{
A list with three elements describing the MCMC outputs
}
\description{
A dataset containing the MCMC outputs of a fit of the `seitlDeter` model to
the `fluTdc1971` data set (50000 iterations). This differs from the
`mcmcSeitlTheta1` dataset in the choice of initial parameter set theta.
}
\details{
\itemize{
\item \code{trace} data frame containing the MCMC trace
\item \code{acceptanceRate} a single value denoting the acceptance rate
\item \code{covmatEmprical} the covariance matrix of parameter samples
}
}
\keyword{internal}
| /man/mcmcSeitlTheta2.Rd | permissive | sbfnk/fitR | R | false | true | 819 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitR-package.r
\name{mcmcSeitlTheta2}
\alias{mcmcSeitlTheta2}
\title{Markov-chain Monte Carlo (MCMC) outputs of a fit of the SEITL model to the
influenza outbreak on Tristan da Cunha (long run)}
\format{
A list with three elements describing the MCMC outputs
}
\description{
A dataset containing the MCMC outputs of a fit of the `seitlDeter` model to
the `fluTdc1971` data set (50000 iterations). This differs from the
`mcmcSeitlTheta1` dataset in the choice of initial parameter set theta.
}
\details{
\itemize{
\item \code{trace} data frame containing the MCMC trace
\item \code{acceptanceRate} a single value denoting the acceptance rate
\item \code{covmatEmprical} the covariance matrix of parameter samples
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.ssra.R
\name{plot.ssra}
\alias{plot.ssra}
\title{Plot ssra}
\usage{
\method{plot}{ssra}(x, r.crt = NULL, r.sig = TRUE, d.sq = NULL,
m.sig = TRUE, sig.col = TRUE,
col = c("red2", "green4", "blue3", "black"),
pch = c(1, 2, 0, 4), mar = c(3.5, 3.5, 1.5, 1), ...)
}
\arguments{
\item{x}{requires the return object from the SSRA function}
\item{r.crt}{minimal absolute correlation to be judged 'sequential'}
\item{r.sig}{plot statistically significant correlations}
\item{d.sq}{minimal effect size Cohen's d to be judged 'sequential'}
\item{m.sig}{plot statistically significant mean difference}
\item{sig.col}{significance in different colors}
\item{col}{color code or name}
\item{pch}{plotting character}
\item{mar}{number of lines of margin to be specified on the four sides of the plot}
\item{...}{further arguments passed to or from other methods}
}
\description{
Function for plotting the ssra object
}
\details{
Using this function, all item pairs are plotted on a graph by their correlation coefficients and
their mean differences (Cohen's d). This graph is useful for defining (or changing) criteria regarding
correlation coefficient and mean difference to judge whether an item pair is 'sequential' or 'equal'.
}
\examples{
# Example data based on Takeya (1991)
# Sakai Sequential Relation Analysis
# ordering assesed according to the correlation coefficient and mean difference
exdat.ssra <- SSRA(exdat, output = FALSE)
plot(exdat.ssra)
}
\author{
Takuya Yanagida \email{takuya.yanagida@univie.ac.at},
Keiko Sakai \email{keiko.sakai@oit.ac.jp}
}
\references{
Takeya, M. (1991). \emph{A new test theory: Structural analyses for educational information}.
Tokyo: Waseda University Press.
}
\seealso{
\code{\link{SSRA}}, \code{\link{treegram}}, \code{\link{scatterplot}}
}
| /man/plot.ssra.Rd | no_license | cran/SSRA | R | false | true | 1,935 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.ssra.R
\name{plot.ssra}
\alias{plot.ssra}
\title{Plot ssra}
\usage{
\method{plot}{ssra}(x, r.crt = NULL, r.sig = TRUE, d.sq = NULL,
m.sig = TRUE, sig.col = TRUE,
col = c("red2", "green4", "blue3", "black"),
pch = c(1, 2, 0, 4), mar = c(3.5, 3.5, 1.5, 1), ...)
}
\arguments{
\item{x}{requires the return object from the SSRA function}
\item{r.crt}{minimal absolute correlation to be judged 'sequential'}
\item{r.sig}{plot statistically significant correlations}
\item{d.sq}{minimal effect size Cohen's d to be judged 'sequential'}
\item{m.sig}{plot statistically significant mean difference}
\item{sig.col}{significance in different colors}
\item{col}{color code or name}
\item{pch}{plotting character}
\item{mar}{number of lines of margin to be specified on the four sides of the plot}
\item{...}{further arguments passed to or from other methods}
}
\description{
Function for plotting the ssra object
}
\details{
Using this function, all item pairs are plotted on a graph by their correlation coefficients and
their mean differences (Cohen's d). This graph is useful for defining (or changing) criteria regarding
correlation coefficient and mean difference to judge whether an item pair is 'sequential' or 'equal'.
}
\examples{
# Example data based on Takeya (1991)
# Sakai Sequential Relation Analysis
# ordering assesed according to the correlation coefficient and mean difference
exdat.ssra <- SSRA(exdat, output = FALSE)
plot(exdat.ssra)
}
\author{
Takuya Yanagida \email{takuya.yanagida@univie.ac.at},
Keiko Sakai \email{keiko.sakai@oit.ac.jp}
}
\references{
Takeya, M. (1991). \emph{A new test theory: Structural analyses for educational information}.
Tokyo: Waseda University Press.
}
\seealso{
\code{\link{SSRA}}, \code{\link{treegram}}, \code{\link{scatterplot}}
}
|
### spatial difference - 3 columns by two rows
#1. scatter
#2. chla averaged across time-series. Maybe draw transition line
#3. chla difference
#4. blshtrk difference
#5. swordifish difference
#6. ecocast difference
outputDir="/Users/heatherwelch/Dropbox/JPSS/plots_03.05.19/"
library(scales)
source("/Users/heatherwelch/Dropbox/JPSS/JPSS_VIIRS/code/load_functions.R")
library(plotly)
path = "/Volumes/EcoCast_SeaGate/ERD_DOM/EcoCast_CodeArchive"
staticdir=paste0(path,"/static_variables/")
studyarea=readOGR(dsn=staticdir,layer="sa_square_coast3")
fcnRescale=function(i){
a <- (i - min(i[], na.rm=TRUE))/(max(i[], na.rm=TRUE)-min(i[], na.rm=TRUE))
}
modisDir="/Users/heatherwelch/Dropbox/JPSS/modis_8Day/EcoCastRuns/output/mean_mask"
viirsDir="/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/EcoCastRuns/output/mean_mask"
dates_m=list.files(modisDir,full.names = T,recursive = T,pattern = "mean.grd")%>% grep("2016-10-11",.,invert=T,value=T) %>% gsub("/Users/heatherwelch/Dropbox/JPSS/modis_8Day/EcoCastRuns/output/mean_mask/EcoCast_-0.1_-0.1_-0.05_-0.9_0.9_","",.) %>% gsub("_mean.grd","",.)
dates_v=list.files(viirsDir,full.names = T,recursive = T,pattern = "mean.grd")%>% grep("2016-09-07",.,invert=T,value=T) %>% gsub("/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/EcoCastRuns/output/mean_mask/EcoCast_-0.1_-0.1_-0.05_-0.9_0.9_","",.) %>% gsub("_mean.grd","",.)
to_match_date=intersect(dates_m,dates_v)
to_match_date
#1. scatter ####
modisDir="/Users/heatherwelch/Dropbox/JPSS/modis_8Day/Satellite_mask"
viirsDir="/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/Satellite_mask"
dates_m=list.files(modisDir,full.names = T,recursive = T,pattern = ".grd")%>% grep("2016-10-11",.,invert=T,value=T) %>% gsub("/Users/heatherwelch/Dropbox/JPSS/modis_8Day/Satellite_mask/","",.) %>% gsub("/l.blendChl.grd","",.)
dates_v=list.files(viirsDir,full.names = T,recursive = T,pattern = ".grd")%>% grep("2016-09-07",.,invert=T,value=T) %>% gsub("/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/Satellite_mask/","",.) %>% gsub("/l.blendChl.grd","",.)
to_match=intersect(dates_m,dates_v)
sat_m=list.files(modisDir,full.names = T,recursive = T,pattern = ".grd") %>% grep("2016-10-11",.,invert=T,value=T)%>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
dates_m=list.files(modisDir,full.names = T,recursive = T,pattern = ".grd")%>% grep("2016-10-11",.,invert=T,value=T) %>% gsub("/Users/heatherwelch/Dropbox/JPSS/modis_8Day/Satellite_mask/","",.) %>% gsub("/l.blendChl.grd","",.)
names(sat_m)=dates_m
m_stats=cellStats(sat_m,stat="mean")
b=m_stats %>% as.data.frame() %>% mutate(date=as.Date(dates_m))
colnames(b)=c("chla","date")
b=b %>% mutate(year=as.factor(strtrim(as.character(date),4))) %>% filter(year!=2012&year!=2019)%>% mutate(month=as.factor(substr(as.character(date),6,7)))%>% filter(month!="01"&month!="07")
m_df=b %>% mutate(sensor="MODIS") #%>% left_join(a,.,by=c("full_TS"="date"))
m_df=m_df[m_df$date %in% as.Date(to_match),]
sat_v=list.files(viirsDir,full.names = T,recursive = T,pattern = ".grd") %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
dates_v=list.files(viirsDir,full.names = T,recursive = T,pattern = ".grd") %>% gsub("/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/Satellite_mask/","",.) %>% gsub("/l.blendChl.grd","",.)
names(sat_v)=dates_v
v_stats=cellStats(sat_v,stat="mean")
b=v_stats %>% as.data.frame() %>% mutate(date=as.Date(dates_v))
colnames(b)=c("chla","date")
b=b %>% mutate(year=as.factor(strtrim(as.character(date),4))) %>% filter(year!=2012&year!=2019)%>% mutate(month=as.factor(substr(as.character(date),6,7)))%>% filter(month!="01"&month!="07")
v_df=b %>% mutate(sensor="VIIRS") #%>% left_join(a,.,by=c("full_TS"="date"))
v_df=v_df[v_df$date %in% as.Date(to_match),]
master=do.call("rbind",list(m_df,v_df))
master$sensor=as.factor(master$sensor)
chla_scatter=master %>% spread(sensor,chla)
### colored/shaped by month and year. not doing this for the moment
# a=ggplot(chla_scatter,aes(x=MODIS,y=VIIRS))+geom_point(aes(color=year,shape=month))+geom_abline(slope=1,intercept = 0)+xlim(0,1)+ylim(0,1)+stat_smooth(method="lm",se = F,linetype="dashed",color="black")+
# scale_color_manual("year",values=c("2015"="darkgoldenrod","2016"="coral1","2017"="gray","2018"="cadetblue3"))+
# ylab("VIIRS Chl-a (mg^3)")+xlab("MODIS Chl-a (mg^3)")+
# theme(legend.key.size = unit(.5,'lines'),legend.position=c(.1,.8))
# a
a=ggplot(chla_scatter,aes(x=MODIS,y=VIIRS))+geom_point(shape=1)+geom_abline(slope=1,intercept = 0,color="blue")+xlim(-2,-.44)+ylim(-2,-.44)+stat_smooth(method="lm",se = F,linetype="dashed",color="blue")+
ylab("VIIRS Chl-a (mg^3)")+xlab("MODIS Chl-a (mg^3)")+
theme(legend.key.size = unit(.5,'lines'),legend.position=c(.1,.8))+
theme(axis.text = element_text(size=6),axis.title = element_text(size=6),legend.text=element_text(size=6),legend.title = element_text(size=6),strip.text.y = element_text(size = 6),strip.text.x = element_text(size = 6), strip.background = element_blank())
scatterplot=a
scatterplot
datatype="scatterplot"
png(paste(outputDir,datatype,".png",sep=''),width=10,height=10,units='cm',res=400)
par(ps=10)
par(mar=c(4,4,1,1))
par(cex=1)
scatterplot
dev.off()
#2. chla averaged across time-series. Maybe draw transition line ####
modisDir="/Users/heatherwelch/Dropbox/JPSS/modis_8Day/Satellite_mask"
viirsDir="/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/Satellite_mask"
#1. time series of spatial average, just mean
modisE=list.files(modisDir,full.names = T,recursive = T,pattern = ".grd")%>% grep("2016-10-11",.,invert=T,value=T)
modisE=unique (grep(paste(to_match_date,collapse="|"),modisE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
viirsE=list.files(viirsDir,full.names = T,recursive = T,pattern = ".grd")%>% grep("2016-09-07",.,invert=T,value=T)
viirsE=unique (grep(paste(to_match_date,collapse="|"),viirsE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
PlotPNGs<-function(stack,datatype,outputDir){
EcoCols<-colorRampPalette(c("red","orange","white","cyan","blue"))
ChlorCols<-colorRampPalette(brewer.pal(9,'YlGn'))
SpCols<-colorRampPalette(brewer.pal(9,'GnBu'))
####### produce png ####
png(paste(outputDir,datatype,".png",sep=''),width=24,height=24,units='cm',res=400)
par(mar=c(3,3,.5,.5),las=1,font=2)
par(mfrow=c(1,1))
col=ChlorCols(255)
stackM=stack %>% calc(.,fun=mean,na.rm=T)
H=maxValue(stackM) %>% max(na.rm=T)
L=minValue(stackM) %>% min(na.rm=T)
zlimits=c(L,H)
image.plot(stackM,col=col,xlim=c(-130,-115),ylim=c(30,47),zlim=zlimits,legend.args = list(text="mg/m3",cex=1, side=3, line=0,adj=-.1))
maps::map('worldHires',add=TRUE,col=grey(0.7),fill=TRUE)
contour(stackM, add=TRUE, col="black",levels=c(-1.14),labcex = 1,lwd=2)
text(-122,45,datatype,adj=c(0,0),cex=1.5)
box()
dev.off()
}
PlotPNGs(stack=modisE,datatype = "MODIS Chl-a",outputDir = outputDir)
PlotPNGs(stack=viirsE,datatype = "VIIRS Chl-a",outputDir = outputDir)
#3. spatial differences ####
### chla ####
modisDir="/Users/heatherwelch/Dropbox/JPSS/modis_8Day/Satellite_mask"
viirsDir="/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/Satellite_mask"
modisE=list.files(modisDir,full.names = T,recursive = T,pattern = ".grd")%>% grep("2016-10-11",.,invert=T,value=T)
modisE=unique (grep(paste(to_match_date,collapse="|"),modisE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
viirsE=list.files(viirsDir,full.names = T,recursive = T,pattern = ".grd")%>% grep("2016-09-07",.,invert=T,value=T)
viirsE=unique (grep(paste(to_match_date,collapse="|"),viirsE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
rescaledStack=fcnRescale(stack(modisE,viirsE))
modisE=rescaledStack[[grep(".1$",names(rescaledStack))]]
viirsE=rescaledStack[[grep(".2$",names(rescaledStack))]]
chla=modisE-viirsE
### blshTr ####
modisDir="/Users/heatherwelch/Dropbox/JPSS/modis_8Day/EcoCastRuns/blshTr/predCIs_mask"
viirsDir="/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/EcoCastRuns/blshTr/predCIs_mask"
modisE=list.files(modisDir,full.names = T,recursive = T,pattern = "mean.grd")%>% grep("2016-10-11",.,invert=T,value=T)
modisE=unique (grep(paste(to_match_date,collapse="|"),modisE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
viirsE=list.files(viirsDir,full.names = T,recursive = T,pattern = "mean.grd")%>% grep("2016-09-07",.,invert=T,value=T)
viirsE=unique (grep(paste(to_match_date,collapse="|"),viirsE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
rescaledStack=fcnRescale(stack(modisE,viirsE))
modisE=rescaledStack[[grep(".1$",names(rescaledStack))]]
viirsE=rescaledStack[[grep(".2$",names(rescaledStack))]]
blshtrk=modisE-viirsE
### swor ####
modisDir="/Users/heatherwelch/Dropbox/JPSS/modis_8Day/EcoCastRuns/swor/predCIs_mask"
viirsDir="/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/EcoCastRuns/swor/predCIs_mask"
modisE=list.files(modisDir,full.names = T,recursive = T,pattern = "mean.grd")%>% grep("2016-10-11",.,invert=T,value=T)
modisE=unique (grep(paste(to_match_date,collapse="|"),modisE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
viirsE=list.files(viirsDir,full.names = T,recursive = T,pattern = "mean.grd")%>% grep("2016-09-07",.,invert=T,value=T)
viirsE=unique (grep(paste(to_match_date,collapse="|"),viirsE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
rescaledStack=fcnRescale(stack(modisE,viirsE))
modisE=rescaledStack[[grep(".1$",names(rescaledStack))]]
viirsE=rescaledStack[[grep(".2$",names(rescaledStack))]]
swor=modisE-viirsE
### ecocast ####
modisDir="/Users/heatherwelch/Dropbox/JPSS/modis_8Day/EcoCastRuns/output/mean_mask"
viirsDir="/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/EcoCastRuns/output/mean_mask"
modisE=list.files(modisDir,full.names = T,recursive = T,pattern = "mean.grd")%>% grep("2016-10-11",.,invert=T,value=T)
modisE=unique (grep(paste(to_match_date,collapse="|"),modisE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
viirsE=list.files(viirsDir,full.names = T,recursive = T,pattern = "mean.grd")%>% grep("2016-09-07",.,invert=T,value=T)
viirsE=unique (grep(paste(to_match_date,collapse="|"),viirsE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
rescaledStack=fcnRescale(stack(modisE,viirsE))
modisE=rescaledStack[[grep(".1$",names(rescaledStack))]]
viirsE=rescaledStack[[grep(".2$",names(rescaledStack))]]
ecocast=modisE-viirsE
#### plot all ####
plotting=stack(calc(chla,mean,na.rm=T),calc(swor,mean,na.rm=T),calc(blshtrk,mean,na.rm=T),calc(ecocast,mean,na.rm=T))
H=max(plotting[],na.rm=T)
L=min(plotting[],na.rm=T)
PlotPNGs_difference<-function(stack,product,outputDir,H,L,countour_ras){
col=colorRamps:::blue2red(255)
stackM=stack %>% calc(.,mean,na.rm=T)
Labs=abs(L)
both=c(H,Labs)
MaxVal=max(both)
# zlimits=c(L,H)
zlimits=c((MaxVal*-1),H)
####### produce png ####
png(paste(outputDir,product,"_M-V_difference2.png",sep=''),width=24,height=24,units='cm',res=400)
par(mar=c(3,3,.5,.5),las=1,font=2)
par(mfrow=c(1,1))
par(oma=c(0,0,0,1))
#### MODIS - VIIRS ####
stackSD=stack %>% calc(.,mean,na.rm=T) %>% cellStats(.,sd)
stackMP=stackM%>% cellStats(.,mean)
#postive=stackM[stackM>(stackM+stackSD)]
plusSD=rasterToPoints(stackM,fun=function(x)x>(stackMP+stackSD))
minusSD=rasterToPoints(stackM,fun=function(x)x<(stackMP-stackSD))
image.plot(stackM,col=col,xlim=c(-130,-115),ylim=c(30,47),zlim=zlimits,legend.cex=.5)
maps::map('worldHires',add=TRUE,col=grey(0.7),fill=TRUE)
points(plusSD,cex=5,pch = ".")
points(minusSD,cex=5,pch = ".")
contour(countour_ras, add=TRUE, col="black",levels=c(-1.14),labcex = 1,lwd=2)
text(-122,45,product,adj=c(0,0),cex=1.5)
box()
dev.off()
}
viirsDir="/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/Satellite_mask"
viirsE=list.files(viirsDir,full.names = T,recursive = T,pattern = ".grd")%>% grep("2016-09-07",.,invert=T,value=T)
countour_ras=unique (grep(paste(to_match_date,collapse="|"),viirsE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))%>% calc(.,fun=mean,na.rm=T)
PlotPNGs_difference(stack = chla,product = "Chl-a",outputDir = outputDir,H=H,L=L,countour_ras = countour_ras)
PlotPNGs_difference(stack = swor,product = "Swordfish",outputDir = outputDir,H=H,L=L,countour_ras = countour_ras)
PlotPNGs_difference(stack = blshtrk,product = "Blueshark - Tracking",outputDir = outputDir,H=H,L=L,countour_ras = countour_ras)
PlotPNGs_difference(stack = ecocast,product = "EcoCast",outputDir = outputDir,H=H,L=L,countour_ras = countour_ras)
| /code_03.05.19/spatial_differences.R | no_license | HeatherWelch/JPSS_VIIRS | R | false | false | 12,679 | r | ### spatial difference - 3 columns by two rows
#1. scatter
#2. chla averaged across time-series. Maybe draw transition line
#3. chla difference
#4. blshtrk difference
#5. swordifish difference
#6. ecocast difference
outputDir="/Users/heatherwelch/Dropbox/JPSS/plots_03.05.19/"
library(scales)
source("/Users/heatherwelch/Dropbox/JPSS/JPSS_VIIRS/code/load_functions.R")
library(plotly)
path = "/Volumes/EcoCast_SeaGate/ERD_DOM/EcoCast_CodeArchive"
staticdir=paste0(path,"/static_variables/")
studyarea=readOGR(dsn=staticdir,layer="sa_square_coast3")
fcnRescale=function(i){
a <- (i - min(i[], na.rm=TRUE))/(max(i[], na.rm=TRUE)-min(i[], na.rm=TRUE))
}
modisDir="/Users/heatherwelch/Dropbox/JPSS/modis_8Day/EcoCastRuns/output/mean_mask"
viirsDir="/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/EcoCastRuns/output/mean_mask"
dates_m=list.files(modisDir,full.names = T,recursive = T,pattern = "mean.grd")%>% grep("2016-10-11",.,invert=T,value=T) %>% gsub("/Users/heatherwelch/Dropbox/JPSS/modis_8Day/EcoCastRuns/output/mean_mask/EcoCast_-0.1_-0.1_-0.05_-0.9_0.9_","",.) %>% gsub("_mean.grd","",.)
dates_v=list.files(viirsDir,full.names = T,recursive = T,pattern = "mean.grd")%>% grep("2016-09-07",.,invert=T,value=T) %>% gsub("/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/EcoCastRuns/output/mean_mask/EcoCast_-0.1_-0.1_-0.05_-0.9_0.9_","",.) %>% gsub("_mean.grd","",.)
to_match_date=intersect(dates_m,dates_v)
to_match_date
#1. scatter ####
modisDir="/Users/heatherwelch/Dropbox/JPSS/modis_8Day/Satellite_mask"
viirsDir="/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/Satellite_mask"
dates_m=list.files(modisDir,full.names = T,recursive = T,pattern = ".grd")%>% grep("2016-10-11",.,invert=T,value=T) %>% gsub("/Users/heatherwelch/Dropbox/JPSS/modis_8Day/Satellite_mask/","",.) %>% gsub("/l.blendChl.grd","",.)
dates_v=list.files(viirsDir,full.names = T,recursive = T,pattern = ".grd")%>% grep("2016-09-07",.,invert=T,value=T) %>% gsub("/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/Satellite_mask/","",.) %>% gsub("/l.blendChl.grd","",.)
to_match=intersect(dates_m,dates_v)
sat_m=list.files(modisDir,full.names = T,recursive = T,pattern = ".grd") %>% grep("2016-10-11",.,invert=T,value=T)%>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
dates_m=list.files(modisDir,full.names = T,recursive = T,pattern = ".grd")%>% grep("2016-10-11",.,invert=T,value=T) %>% gsub("/Users/heatherwelch/Dropbox/JPSS/modis_8Day/Satellite_mask/","",.) %>% gsub("/l.blendChl.grd","",.)
names(sat_m)=dates_m
m_stats=cellStats(sat_m,stat="mean")
b=m_stats %>% as.data.frame() %>% mutate(date=as.Date(dates_m))
colnames(b)=c("chla","date")
b=b %>% mutate(year=as.factor(strtrim(as.character(date),4))) %>% filter(year!=2012&year!=2019)%>% mutate(month=as.factor(substr(as.character(date),6,7)))%>% filter(month!="01"&month!="07")
m_df=b %>% mutate(sensor="MODIS") #%>% left_join(a,.,by=c("full_TS"="date"))
m_df=m_df[m_df$date %in% as.Date(to_match),]
sat_v=list.files(viirsDir,full.names = T,recursive = T,pattern = ".grd") %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
dates_v=list.files(viirsDir,full.names = T,recursive = T,pattern = ".grd") %>% gsub("/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/Satellite_mask/","",.) %>% gsub("/l.blendChl.grd","",.)
names(sat_v)=dates_v
v_stats=cellStats(sat_v,stat="mean")
b=v_stats %>% as.data.frame() %>% mutate(date=as.Date(dates_v))
colnames(b)=c("chla","date")
b=b %>% mutate(year=as.factor(strtrim(as.character(date),4))) %>% filter(year!=2012&year!=2019)%>% mutate(month=as.factor(substr(as.character(date),6,7)))%>% filter(month!="01"&month!="07")
v_df=b %>% mutate(sensor="VIIRS") #%>% left_join(a,.,by=c("full_TS"="date"))
v_df=v_df[v_df$date %in% as.Date(to_match),]
master=do.call("rbind",list(m_df,v_df))
master$sensor=as.factor(master$sensor)
chla_scatter=master %>% spread(sensor,chla)
### colored/shaped by month and year. not doing this for the moment
# a=ggplot(chla_scatter,aes(x=MODIS,y=VIIRS))+geom_point(aes(color=year,shape=month))+geom_abline(slope=1,intercept = 0)+xlim(0,1)+ylim(0,1)+stat_smooth(method="lm",se = F,linetype="dashed",color="black")+
# scale_color_manual("year",values=c("2015"="darkgoldenrod","2016"="coral1","2017"="gray","2018"="cadetblue3"))+
# ylab("VIIRS Chl-a (mg^3)")+xlab("MODIS Chl-a (mg^3)")+
# theme(legend.key.size = unit(.5,'lines'),legend.position=c(.1,.8))
# a
a=ggplot(chla_scatter,aes(x=MODIS,y=VIIRS))+geom_point(shape=1)+geom_abline(slope=1,intercept = 0,color="blue")+xlim(-2,-.44)+ylim(-2,-.44)+stat_smooth(method="lm",se = F,linetype="dashed",color="blue")+
ylab("VIIRS Chl-a (mg^3)")+xlab("MODIS Chl-a (mg^3)")+
theme(legend.key.size = unit(.5,'lines'),legend.position=c(.1,.8))+
theme(axis.text = element_text(size=6),axis.title = element_text(size=6),legend.text=element_text(size=6),legend.title = element_text(size=6),strip.text.y = element_text(size = 6),strip.text.x = element_text(size = 6), strip.background = element_blank())
scatterplot=a
scatterplot
datatype="scatterplot"
png(paste(outputDir,datatype,".png",sep=''),width=10,height=10,units='cm',res=400)
par(ps=10)
par(mar=c(4,4,1,1))
par(cex=1)
scatterplot
dev.off()
#2. chla averaged across time-series. Maybe draw transition line ####
modisDir="/Users/heatherwelch/Dropbox/JPSS/modis_8Day/Satellite_mask"
viirsDir="/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/Satellite_mask"
#1. time series of spatial average, just mean
modisE=list.files(modisDir,full.names = T,recursive = T,pattern = ".grd")%>% grep("2016-10-11",.,invert=T,value=T)
modisE=unique (grep(paste(to_match_date,collapse="|"),modisE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
viirsE=list.files(viirsDir,full.names = T,recursive = T,pattern = ".grd")%>% grep("2016-09-07",.,invert=T,value=T)
viirsE=unique (grep(paste(to_match_date,collapse="|"),viirsE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
PlotPNGs<-function(stack,datatype,outputDir){
EcoCols<-colorRampPalette(c("red","orange","white","cyan","blue"))
ChlorCols<-colorRampPalette(brewer.pal(9,'YlGn'))
SpCols<-colorRampPalette(brewer.pal(9,'GnBu'))
####### produce png ####
png(paste(outputDir,datatype,".png",sep=''),width=24,height=24,units='cm',res=400)
par(mar=c(3,3,.5,.5),las=1,font=2)
par(mfrow=c(1,1))
col=ChlorCols(255)
stackM=stack %>% calc(.,fun=mean,na.rm=T)
H=maxValue(stackM) %>% max(na.rm=T)
L=minValue(stackM) %>% min(na.rm=T)
zlimits=c(L,H)
image.plot(stackM,col=col,xlim=c(-130,-115),ylim=c(30,47),zlim=zlimits,legend.args = list(text="mg/m3",cex=1, side=3, line=0,adj=-.1))
maps::map('worldHires',add=TRUE,col=grey(0.7),fill=TRUE)
contour(stackM, add=TRUE, col="black",levels=c(-1.14),labcex = 1,lwd=2)
text(-122,45,datatype,adj=c(0,0),cex=1.5)
box()
dev.off()
}
PlotPNGs(stack=modisE,datatype = "MODIS Chl-a",outputDir = outputDir)
PlotPNGs(stack=viirsE,datatype = "VIIRS Chl-a",outputDir = outputDir)
#3. spatial differences ####
### chla ####
modisDir="/Users/heatherwelch/Dropbox/JPSS/modis_8Day/Satellite_mask"
viirsDir="/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/Satellite_mask"
modisE=list.files(modisDir,full.names = T,recursive = T,pattern = ".grd")%>% grep("2016-10-11",.,invert=T,value=T)
modisE=unique (grep(paste(to_match_date,collapse="|"),modisE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
viirsE=list.files(viirsDir,full.names = T,recursive = T,pattern = ".grd")%>% grep("2016-09-07",.,invert=T,value=T)
viirsE=unique (grep(paste(to_match_date,collapse="|"),viirsE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
rescaledStack=fcnRescale(stack(modisE,viirsE))
modisE=rescaledStack[[grep(".1$",names(rescaledStack))]]
viirsE=rescaledStack[[grep(".2$",names(rescaledStack))]]
chla=modisE-viirsE
### blshTr ####
modisDir="/Users/heatherwelch/Dropbox/JPSS/modis_8Day/EcoCastRuns/blshTr/predCIs_mask"
viirsDir="/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/EcoCastRuns/blshTr/predCIs_mask"
modisE=list.files(modisDir,full.names = T,recursive = T,pattern = "mean.grd")%>% grep("2016-10-11",.,invert=T,value=T)
modisE=unique (grep(paste(to_match_date,collapse="|"),modisE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
viirsE=list.files(viirsDir,full.names = T,recursive = T,pattern = "mean.grd")%>% grep("2016-09-07",.,invert=T,value=T)
viirsE=unique (grep(paste(to_match_date,collapse="|"),viirsE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
rescaledStack=fcnRescale(stack(modisE,viirsE))
modisE=rescaledStack[[grep(".1$",names(rescaledStack))]]
viirsE=rescaledStack[[grep(".2$",names(rescaledStack))]]
blshtrk=modisE-viirsE
### swor ####
modisDir="/Users/heatherwelch/Dropbox/JPSS/modis_8Day/EcoCastRuns/swor/predCIs_mask"
viirsDir="/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/EcoCastRuns/swor/predCIs_mask"
modisE=list.files(modisDir,full.names = T,recursive = T,pattern = "mean.grd")%>% grep("2016-10-11",.,invert=T,value=T)
modisE=unique (grep(paste(to_match_date,collapse="|"),modisE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
viirsE=list.files(viirsDir,full.names = T,recursive = T,pattern = "mean.grd")%>% grep("2016-09-07",.,invert=T,value=T)
viirsE=unique (grep(paste(to_match_date,collapse="|"),viirsE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
rescaledStack=fcnRescale(stack(modisE,viirsE))
modisE=rescaledStack[[grep(".1$",names(rescaledStack))]]
viirsE=rescaledStack[[grep(".2$",names(rescaledStack))]]
swor=modisE-viirsE
### ecocast ####
modisDir="/Users/heatherwelch/Dropbox/JPSS/modis_8Day/EcoCastRuns/output/mean_mask"
viirsDir="/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/EcoCastRuns/output/mean_mask"
modisE=list.files(modisDir,full.names = T,recursive = T,pattern = "mean.grd")%>% grep("2016-10-11",.,invert=T,value=T)
modisE=unique (grep(paste(to_match_date,collapse="|"),modisE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
viirsE=list.files(viirsDir,full.names = T,recursive = T,pattern = "mean.grd")%>% grep("2016-09-07",.,invert=T,value=T)
viirsE=unique (grep(paste(to_match_date,collapse="|"),viirsE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))
rescaledStack=fcnRescale(stack(modisE,viirsE))
modisE=rescaledStack[[grep(".1$",names(rescaledStack))]]
viirsE=rescaledStack[[grep(".2$",names(rescaledStack))]]
ecocast=modisE-viirsE
#### plot all ####
plotting=stack(calc(chla,mean,na.rm=T),calc(swor,mean,na.rm=T),calc(blshtrk,mean,na.rm=T),calc(ecocast,mean,na.rm=T))
H=max(plotting[],na.rm=T)
L=min(plotting[],na.rm=T)
PlotPNGs_difference<-function(stack,product,outputDir,H,L,countour_ras){
col=colorRamps:::blue2red(255)
stackM=stack %>% calc(.,mean,na.rm=T)
Labs=abs(L)
both=c(H,Labs)
MaxVal=max(both)
# zlimits=c(L,H)
zlimits=c((MaxVal*-1),H)
####### produce png ####
png(paste(outputDir,product,"_M-V_difference2.png",sep=''),width=24,height=24,units='cm',res=400)
par(mar=c(3,3,.5,.5),las=1,font=2)
par(mfrow=c(1,1))
par(oma=c(0,0,0,1))
#### MODIS - VIIRS ####
stackSD=stack %>% calc(.,mean,na.rm=T) %>% cellStats(.,sd)
stackMP=stackM%>% cellStats(.,mean)
#postive=stackM[stackM>(stackM+stackSD)]
plusSD=rasterToPoints(stackM,fun=function(x)x>(stackMP+stackSD))
minusSD=rasterToPoints(stackM,fun=function(x)x<(stackMP-stackSD))
image.plot(stackM,col=col,xlim=c(-130,-115),ylim=c(30,47),zlim=zlimits,legend.cex=.5)
maps::map('worldHires',add=TRUE,col=grey(0.7),fill=TRUE)
points(plusSD,cex=5,pch = ".")
points(minusSD,cex=5,pch = ".")
contour(countour_ras, add=TRUE, col="black",levels=c(-1.14),labcex = 1,lwd=2)
text(-122,45,product,adj=c(0,0),cex=1.5)
box()
dev.off()
}
viirsDir="/Users/heatherwelch/Dropbox/JPSS/viirs_8Day/Satellite_mask"
viirsE=list.files(viirsDir,full.names = T,recursive = T,pattern = ".grd")%>% grep("2016-09-07",.,invert=T,value=T)
countour_ras=unique (grep(paste(to_match_date,collapse="|"),viirsE, value=TRUE)) %>% stack() %>%mask(.,studyarea) %>% crop(.,extent(studyarea))%>% calc(.,fun=mean,na.rm=T)
PlotPNGs_difference(stack = chla,product = "Chl-a",outputDir = outputDir,H=H,L=L,countour_ras = countour_ras)
PlotPNGs_difference(stack = swor,product = "Swordfish",outputDir = outputDir,H=H,L=L,countour_ras = countour_ras)
PlotPNGs_difference(stack = blshtrk,product = "Blueshark - Tracking",outputDir = outputDir,H=H,L=L,countour_ras = countour_ras)
PlotPNGs_difference(stack = ecocast,product = "EcoCast",outputDir = outputDir,H=H,L=L,countour_ras = countour_ras)
|
library('quanteda')
library('tibble')
library('dplyr')
load("rfiles/d.Rdata")
d <- d[d$Name%in%c("Attica", "Auburn", "Brazil", "Gary"),]
#tf-idf for the whole corpus
#create corpus
crps <- corpus(d$doc)
#set party docvar
docvars(crps, "Party") <- d$winner
docvars(crps, "City") <- d$Name
#convert to dfm
dfm_in3 <- dfm(crps, ngrams = 3)
#calculate tf-idf
tfidf_in3 <- tfidf(dfm_in3)
#tfidf_in3[1:5,1:5]
tfidf_in3[900:905,1:5]
tri_tfidf <- colMeans(tfidf_in3)
#names(tri_tfidf)
a <- tibble(tfidf = tri_tfidf, token = names(tri_tfidf))
a <- a[order(a$tfidf, decreasing = T),]
a
# tf-idf for individual cities
b <- sapply(unique(docvars(crps, "City")),
function(x){corpus_subset(crps, City == x) %>%
dfm(ngrams = 3) %>%
tfidf() %>%
colMeans})
Attica <- b$Attica
Attica <- tibble(tfidf = Attica, token = names(Attica))
Brazil <- b$Brazil
Brazil <- tibble(tfidf = Brazil, token = names(Brazil))
Att <- merge(Attica, a, by = "token")
Att$tfidfRatio <- Att$tfidf.x/Att$tfidf.y
Att$tfidfDiff <- Att$tfidf.y-Att$tfidf.x
#########################################
dfm_in3 <- dfm_in31
dfm_in3@x <- as.numeric(rep(1,length(dfm_in3@x)-1))
aab <- colSums(dfm_in31)/colSums(dfm_in3)
range(colSums(dfm_in3))
AttBool <- tibble(tfidf = aab, token = names(aab))
AttBool | /old/quantedaTFIDF.R | no_license | desmarais-lab/govWebsites | R | false | false | 1,316 | r | library('quanteda')
library('tibble')
library('dplyr')
load("rfiles/d.Rdata")
d <- d[d$Name%in%c("Attica", "Auburn", "Brazil", "Gary"),]
#tf-idf for the whole corpus
#create corpus
crps <- corpus(d$doc)
#set party docvar
docvars(crps, "Party") <- d$winner
docvars(crps, "City") <- d$Name
#convert to dfm
dfm_in3 <- dfm(crps, ngrams = 3)
#calculate tf-idf
tfidf_in3 <- tfidf(dfm_in3)
#tfidf_in3[1:5,1:5]
tfidf_in3[900:905,1:5]
tri_tfidf <- colMeans(tfidf_in3)
#names(tri_tfidf)
a <- tibble(tfidf = tri_tfidf, token = names(tri_tfidf))
a <- a[order(a$tfidf, decreasing = T),]
a
# tf-idf for individual cities
b <- sapply(unique(docvars(crps, "City")),
function(x){corpus_subset(crps, City == x) %>%
dfm(ngrams = 3) %>%
tfidf() %>%
colMeans})
Attica <- b$Attica
Attica <- tibble(tfidf = Attica, token = names(Attica))
Brazil <- b$Brazil
Brazil <- tibble(tfidf = Brazil, token = names(Brazil))
Att <- merge(Attica, a, by = "token")
Att$tfidfRatio <- Att$tfidf.x/Att$tfidf.y
Att$tfidfDiff <- Att$tfidf.y-Att$tfidf.x
#########################################
dfm_in3 <- dfm_in31
dfm_in3@x <- as.numeric(rep(1,length(dfm_in3@x)-1))
aab <- colSums(dfm_in31)/colSums(dfm_in3)
range(colSums(dfm_in3))
AttBool <- tibble(tfidf = aab, token = names(aab))
AttBool |
# Page no 186
n <- 1220
pcap <- 0.18
alpha <- 0.05
std.error <- sqrt(pcap * (1-pcap) / n)
std.error
relaibility.coeff <- qnorm(1- alpha/2 ,0 ,1)
relaibility.coeff
confidence.interval <- c(pcap - relaibility.coeff*std.error ,
pcap + relaibility.coeff*std.error )
round(confidence.interval, 3) | /Chapter 6/Ex6_5_1.R | no_license | SaksheePhade/R-Programming | R | false | false | 334 | r | # Page no 186
n <- 1220
pcap <- 0.18
alpha <- 0.05
std.error <- sqrt(pcap * (1-pcap) / n)
std.error
relaibility.coeff <- qnorm(1- alpha/2 ,0 ,1)
relaibility.coeff
confidence.interval <- c(pcap - relaibility.coeff*std.error ,
pcap + relaibility.coeff*std.error )
round(confidence.interval, 3) |
LoadData <- function(dataset="D1"){
if(dataset == "D1")
{
Dataset <- read.csv("E:/TO_upload/1.Datasets/D1_ALLAML.csv",row.names = NULL)
}else if(dataset == "D2")
{
Dataset <- read.csv("E:/TO_upload/1.Datasets/D2_arcene.csv",row.names = NULL)
}else if(dataset == "D3")
{
Dataset <- read.csv("E:/TO_upload/1.Datasets/D3_GLI85.csv",row.names = NULL)
Y<-Dataset$Y
Dataset<-Dataset[,-2]
Dataset<-cbind(Dataset,Y)
}else if(dataset == "D4")
{
Dataset <- read.csv("E:/TO_upload/1.Datasets/D4_Prostate_GE.csv",row.names = NULL)
}else if(dataset == "D5")
{
Dataset <- read.csv("E:/TO_upload/1.Datasets/D5_SMK_CAN_187.csv",row.names = NULL)
Y<-Dataset$Y
Dataset<-Dataset[,-2]
Dataset<-cbind(Dataset,Y)
}
# rownames(Dataset) <- Dataset[,1]
# Dataset<-Dataset[,-1]
index<-which(colnames(Dataset)=="X")
Dataset<-Dataset[,-index]
for(i in 1:nrow(Dataset)){
if(Dataset$Y[i]!=1){Dataset$Y[i]<-0}
}
return(Dataset)
} | /LoadData.R | no_license | priya-1211/Feature-Subset-Selection | R | false | false | 1,029 | r | LoadData <- function(dataset="D1"){
if(dataset == "D1")
{
Dataset <- read.csv("E:/TO_upload/1.Datasets/D1_ALLAML.csv",row.names = NULL)
}else if(dataset == "D2")
{
Dataset <- read.csv("E:/TO_upload/1.Datasets/D2_arcene.csv",row.names = NULL)
}else if(dataset == "D3")
{
Dataset <- read.csv("E:/TO_upload/1.Datasets/D3_GLI85.csv",row.names = NULL)
Y<-Dataset$Y
Dataset<-Dataset[,-2]
Dataset<-cbind(Dataset,Y)
}else if(dataset == "D4")
{
Dataset <- read.csv("E:/TO_upload/1.Datasets/D4_Prostate_GE.csv",row.names = NULL)
}else if(dataset == "D5")
{
Dataset <- read.csv("E:/TO_upload/1.Datasets/D5_SMK_CAN_187.csv",row.names = NULL)
Y<-Dataset$Y
Dataset<-Dataset[,-2]
Dataset<-cbind(Dataset,Y)
}
# rownames(Dataset) <- Dataset[,1]
# Dataset<-Dataset[,-1]
index<-which(colnames(Dataset)=="X")
Dataset<-Dataset[,-index]
for(i in 1:nrow(Dataset)){
if(Dataset$Y[i]!=1){Dataset$Y[i]<-0}
}
return(Dataset)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HomeAndConstructionBusiness.R
\name{HomeAndConstructionBusiness}
\alias{HomeAndConstructionBusiness}
\title{HomeAndConstructionBusiness}
\usage{
HomeAndConstructionBusiness(id = NULL, priceRange = NULL,
paymentAccepted = NULL, openingHours = NULL, currenciesAccepted = NULL,
branchOf = NULL, telephone = NULL,
specialOpeningHoursSpecification = NULL, smokingAllowed = NULL,
reviews = NULL, review = NULL, publicAccess = NULL, photos = NULL,
photo = NULL, openingHoursSpecification = NULL,
maximumAttendeeCapacity = NULL, maps = NULL, map = NULL, logo = NULL,
isicV4 = NULL, isAccessibleForFree = NULL, hasMap = NULL,
globalLocationNumber = NULL, geo = NULL, faxNumber = NULL,
events = NULL, event = NULL, containsPlace = NULL,
containedInPlace = NULL, containedIn = NULL, branchCode = NULL,
amenityFeature = NULL, aggregateRating = NULL, address = NULL,
additionalProperty = NULL, url = NULL, sameAs = NULL,
potentialAction = NULL, name = NULL, mainEntityOfPage = NULL,
image = NULL, identifier = NULL, disambiguatingDescription = NULL,
description = NULL, alternateName = NULL, additionalType = NULL)
}
\arguments{
\item{id}{identifier for the object (URI)}
\item{priceRange}{(Text type.) The price range of the business, for example ```$$$```.}
\item{paymentAccepted}{(Text type.) Cash, Credit Card, Cryptocurrency, Local Exchange Tradings System, etc.}
\item{openingHours}{(Text or Text type.) The general opening hours for a business. Opening hours can be specified as a weekly time range, starting with days, then times per day. Multiple days can be listed with commas ',' separating each day. Day or time ranges are specified using a hyphen '-'.* Days are specified using the following two-letter combinations: ```Mo```, ```Tu```, ```We```, ```Th```, ```Fr```, ```Sa```, ```Su```.* Times are specified using 24:00 time. For example, 3pm is specified as ```15:00```. * Here is an example: <code><time itemprop="openingHours" datetime="Tu,Th 16:00-20:00">Tuesdays and Thursdays 4-8pm</time></code>.* If a business is open 7 days a week, then it can be specified as <code><time itemprop="openingHours" datetime="Mo-Su">Monday through Sunday, all day</time></code>.}
\item{currenciesAccepted}{(Text type.) The currency accepted.Use standard formats: [ISO 4217 currency format](http://en.wikipedia.org/wiki/ISO_4217) e.g. "USD"; [Ticker symbol](https://en.wikipedia.org/wiki/List_of_cryptocurrencies) for cryptocurrencies e.g. "BTC"; well known names for [Local Exchange Tradings Systems](https://en.wikipedia.org/wiki/Local_exchange_trading_system) (LETS) and other currency types e.g. "Ithaca HOUR".}
\item{branchOf}{(Organization type.) The larger organization that this local business is a branch of, if any. Not to be confused with (anatomical)[[branch]].}
\item{telephone}{(Text or Text or Text or Text type.) The telephone number.}
\item{specialOpeningHoursSpecification}{(OpeningHoursSpecification type.) The special opening hours of a certain place.Use this to explicitly override general opening hours brought in scope by [[openingHoursSpecification]] or [[openingHours]].}
\item{smokingAllowed}{(Boolean type.) Indicates whether it is allowed to smoke in the place, e.g. in the restaurant, hotel or hotel room.}
\item{reviews}{(Review or Review or Review or Review or Review type.) Review of the item.}
\item{review}{(Review or Review or Review or Review or Review or Review or Review or Review type.) A review of the item.}
\item{publicAccess}{(Boolean type.) A flag to signal that the [[Place]] is open to public visitors. If this property is omitted there is no assumed default boolean value}
\item{photos}{(Photograph or ImageObject type.) Photographs of this place.}
\item{photo}{(Photograph or ImageObject type.) A photograph of this place.}
\item{openingHoursSpecification}{(OpeningHoursSpecification type.) The opening hours of a certain place.}
\item{maximumAttendeeCapacity}{(Integer or Integer type.) The total number of individuals that may attend an event or venue.}
\item{maps}{(URL type.) A URL to a map of the place.}
\item{map}{(URL type.) A URL to a map of the place.}
\item{logo}{(URL or ImageObject or URL or ImageObject or URL or ImageObject or URL or ImageObject or URL or ImageObject type.) An associated logo.}
\item{isicV4}{(Text or Text or Text type.) The International Standard of Industrial Classification of All Economic Activities (ISIC), Revision 4 code for a particular organization, business person, or place.}
\item{isAccessibleForFree}{(Boolean or Boolean or Boolean or Boolean type.) A flag to signal that the item, event, or place is accessible for free.}
\item{hasMap}{(URL or Map type.) A URL to a map of the place.}
\item{globalLocationNumber}{(Text or Text or Text type.) The [Global Location Number](http://www.gs1.org/gln) (GLN, sometimes also referred to as International Location Number or ILN) of the respective organization, person, or place. The GLN is a 13-digit number used to identify parties and physical locations.}
\item{geo}{(GeoShape or GeoCoordinates type.) The geo coordinates of the place.}
\item{faxNumber}{(Text or Text or Text or Text type.) The fax number.}
\item{events}{(Event or Event type.) Upcoming or past events associated with this place or organization.}
\item{event}{(Event or Event or Event or Event or Event or Event or Event type.) Upcoming or past event associated with this place, organization, or action.}
\item{containsPlace}{(Place type.) The basic containment relation between a place and another that it contains.}
\item{containedInPlace}{(Place type.) The basic containment relation between a place and one that contains it.}
\item{containedIn}{(Place type.) The basic containment relation between a place and one that contains it.}
\item{branchCode}{(Text type.) A short textual code (also called "store code") that uniquely identifies a place of business. The code is typically assigned by the parentOrganization and used in structured URLs.For example, in the URL http://www.starbucks.co.uk/store-locator/etc/detail/3047 the code "3047" is a branchCode for a particular branch.}
\item{amenityFeature}{(LocationFeatureSpecification or LocationFeatureSpecification or LocationFeatureSpecification type.) An amenity feature (e.g. a characteristic or service) of the Accommodation. This generic property does not make a statement about whether the feature is included in an offer for the main accommodation or available at extra costs.}
\item{aggregateRating}{(AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating type.) The overall rating, based on a collection of reviews or ratings, of the item.}
\item{address}{(Text or PostalAddress or Text or PostalAddress or Text or PostalAddress or Text or PostalAddress or Text or PostalAddress type.) Physical address of the item.}
\item{additionalProperty}{(PropertyValue or PropertyValue or PropertyValue or PropertyValue type.) A property-value pair representing an additional characteristics of the entitity, e.g. a product feature or another characteristic for which there is no matching property in schema.org.Note: Publishers should be aware that applications designed to use specific schema.org properties (e.g. http://schema.org/width, http://schema.org/color, http://schema.org/gtin13, ...) will typically expect such data to be provided using those properties, rather than using the generic property/value mechanism.}
\item{url}{(URL type.) URL of the item.}
\item{sameAs}{(URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.}
\item{potentialAction}{(Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.}
\item{name}{(Text type.) The name of the item.}
\item{mainEntityOfPage}{(URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.}
\item{image}{(URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].}
\item{identifier}{(URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.}
\item{disambiguatingDescription}{(Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.}
\item{description}{(Text type.) A description of the item.}
\item{alternateName}{(Text type.) An alias for the item.}
\item{additionalType}{(URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.}
}
\value{
a list object corresponding to a schema:HomeAndConstructionBusiness
}
\description{
A construction business.A HomeAndConstructionBusiness is a [[LocalBusiness]] that provides services around homes and buildings.As a [[LocalBusiness]] it can be described as a [[provider]] of one or more [[Service]](s).
}
| /man/HomeAndConstructionBusiness.Rd | no_license | cboettig/schemar | R | false | true | 9,998 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HomeAndConstructionBusiness.R
\name{HomeAndConstructionBusiness}
\alias{HomeAndConstructionBusiness}
\title{HomeAndConstructionBusiness}
\usage{
HomeAndConstructionBusiness(id = NULL, priceRange = NULL,
paymentAccepted = NULL, openingHours = NULL, currenciesAccepted = NULL,
branchOf = NULL, telephone = NULL,
specialOpeningHoursSpecification = NULL, smokingAllowed = NULL,
reviews = NULL, review = NULL, publicAccess = NULL, photos = NULL,
photo = NULL, openingHoursSpecification = NULL,
maximumAttendeeCapacity = NULL, maps = NULL, map = NULL, logo = NULL,
isicV4 = NULL, isAccessibleForFree = NULL, hasMap = NULL,
globalLocationNumber = NULL, geo = NULL, faxNumber = NULL,
events = NULL, event = NULL, containsPlace = NULL,
containedInPlace = NULL, containedIn = NULL, branchCode = NULL,
amenityFeature = NULL, aggregateRating = NULL, address = NULL,
additionalProperty = NULL, url = NULL, sameAs = NULL,
potentialAction = NULL, name = NULL, mainEntityOfPage = NULL,
image = NULL, identifier = NULL, disambiguatingDescription = NULL,
description = NULL, alternateName = NULL, additionalType = NULL)
}
\arguments{
\item{id}{identifier for the object (URI)}
\item{priceRange}{(Text type.) The price range of the business, for example ```$$$```.}
\item{paymentAccepted}{(Text type.) Cash, Credit Card, Cryptocurrency, Local Exchange Tradings System, etc.}
\item{openingHours}{(Text or Text type.) The general opening hours for a business. Opening hours can be specified as a weekly time range, starting with days, then times per day. Multiple days can be listed with commas ',' separating each day. Day or time ranges are specified using a hyphen '-'.* Days are specified using the following two-letter combinations: ```Mo```, ```Tu```, ```We```, ```Th```, ```Fr```, ```Sa```, ```Su```.* Times are specified using 24:00 time. For example, 3pm is specified as ```15:00```. * Here is an example: <code><time itemprop="openingHours" datetime="Tu,Th 16:00-20:00">Tuesdays and Thursdays 4-8pm</time></code>.* If a business is open 7 days a week, then it can be specified as <code><time itemprop="openingHours" datetime="Mo-Su">Monday through Sunday, all day</time></code>.}
\item{currenciesAccepted}{(Text type.) The currency accepted.Use standard formats: [ISO 4217 currency format](http://en.wikipedia.org/wiki/ISO_4217) e.g. "USD"; [Ticker symbol](https://en.wikipedia.org/wiki/List_of_cryptocurrencies) for cryptocurrencies e.g. "BTC"; well known names for [Local Exchange Tradings Systems](https://en.wikipedia.org/wiki/Local_exchange_trading_system) (LETS) and other currency types e.g. "Ithaca HOUR".}
\item{branchOf}{(Organization type.) The larger organization that this local business is a branch of, if any. Not to be confused with (anatomical)[[branch]].}
\item{telephone}{(Text or Text or Text or Text type.) The telephone number.}
\item{specialOpeningHoursSpecification}{(OpeningHoursSpecification type.) The special opening hours of a certain place.Use this to explicitly override general opening hours brought in scope by [[openingHoursSpecification]] or [[openingHours]].}
\item{smokingAllowed}{(Boolean type.) Indicates whether it is allowed to smoke in the place, e.g. in the restaurant, hotel or hotel room.}
\item{reviews}{(Review or Review or Review or Review or Review type.) Review of the item.}
\item{review}{(Review or Review or Review or Review or Review or Review or Review or Review type.) A review of the item.}
\item{publicAccess}{(Boolean type.) A flag to signal that the [[Place]] is open to public visitors. If this property is omitted there is no assumed default boolean value}
\item{photos}{(Photograph or ImageObject type.) Photographs of this place.}
\item{photo}{(Photograph or ImageObject type.) A photograph of this place.}
\item{openingHoursSpecification}{(OpeningHoursSpecification type.) The opening hours of a certain place.}
\item{maximumAttendeeCapacity}{(Integer or Integer type.) The total number of individuals that may attend an event or venue.}
\item{maps}{(URL type.) A URL to a map of the place.}
\item{map}{(URL type.) A URL to a map of the place.}
\item{logo}{(URL or ImageObject or URL or ImageObject or URL or ImageObject or URL or ImageObject or URL or ImageObject type.) An associated logo.}
\item{isicV4}{(Text or Text or Text type.) The International Standard of Industrial Classification of All Economic Activities (ISIC), Revision 4 code for a particular organization, business person, or place.}
\item{isAccessibleForFree}{(Boolean or Boolean or Boolean or Boolean type.) A flag to signal that the item, event, or place is accessible for free.}
\item{hasMap}{(URL or Map type.) A URL to a map of the place.}
\item{globalLocationNumber}{(Text or Text or Text type.) The [Global Location Number](http://www.gs1.org/gln) (GLN, sometimes also referred to as International Location Number or ILN) of the respective organization, person, or place. The GLN is a 13-digit number used to identify parties and physical locations.}
\item{geo}{(GeoShape or GeoCoordinates type.) The geo coordinates of the place.}
\item{faxNumber}{(Text or Text or Text or Text type.) The fax number.}
\item{events}{(Event or Event type.) Upcoming or past events associated with this place or organization.}
\item{event}{(Event or Event or Event or Event or Event or Event or Event type.) Upcoming or past event associated with this place, organization, or action.}
\item{containsPlace}{(Place type.) The basic containment relation between a place and another that it contains.}
\item{containedInPlace}{(Place type.) The basic containment relation between a place and one that contains it.}
\item{containedIn}{(Place type.) The basic containment relation between a place and one that contains it.}
\item{branchCode}{(Text type.) A short textual code (also called "store code") that uniquely identifies a place of business. The code is typically assigned by the parentOrganization and used in structured URLs.For example, in the URL http://www.starbucks.co.uk/store-locator/etc/detail/3047 the code "3047" is a branchCode for a particular branch.}
\item{amenityFeature}{(LocationFeatureSpecification or LocationFeatureSpecification or LocationFeatureSpecification type.) An amenity feature (e.g. a characteristic or service) of the Accommodation. This generic property does not make a statement about whether the feature is included in an offer for the main accommodation or available at extra costs.}
\item{aggregateRating}{(AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating type.) The overall rating, based on a collection of reviews or ratings, of the item.}
\item{address}{(Text or PostalAddress or Text or PostalAddress or Text or PostalAddress or Text or PostalAddress or Text or PostalAddress type.) Physical address of the item.}
\item{additionalProperty}{(PropertyValue or PropertyValue or PropertyValue or PropertyValue type.) A property-value pair representing an additional characteristics of the entitity, e.g. a product feature or another characteristic for which there is no matching property in schema.org.Note: Publishers should be aware that applications designed to use specific schema.org properties (e.g. http://schema.org/width, http://schema.org/color, http://schema.org/gtin13, ...) will typically expect such data to be provided using those properties, rather than using the generic property/value mechanism.}
\item{url}{(URL type.) URL of the item.}
\item{sameAs}{(URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.}
\item{potentialAction}{(Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.}
\item{name}{(Text type.) The name of the item.}
\item{mainEntityOfPage}{(URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.}
\item{image}{(URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].}
\item{identifier}{(URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.}
\item{disambiguatingDescription}{(Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.}
\item{description}{(Text type.) A description of the item.}
\item{alternateName}{(Text type.) An alias for the item.}
\item{additionalType}{(URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.}
}
\value{
a list object corresponding to a schema:HomeAndConstructionBusiness
}
\description{
A construction business.A HomeAndConstructionBusiness is a [[LocalBusiness]] that provides services around homes and buildings.As a [[LocalBusiness]] it can be described as a [[provider]] of one or more [[Service]](s).
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.