content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#### SET-UP --------------------------------------------------------------------
library(here)
source(here('scripts/genomics/admixtools/admixtools_plot_fun.R'))
source(here('scripts/genomics/gphocs/gphocs_plot_fun.R'))
source(here('scripts/gphocs/gphocs_plot_fun_msp3.R'))
## Output file:
figfile <- here('figs/ms/SuppFig_geneflow-clade1.eps')
#### D-STATS PLOT --------------------------------------------------------------
## Input files:
infile_gphocs <- here('analyses/gphocs/output/msp3_3sp/processed/msp3_3sp_mergedlogs.txt')
## Pops:
migpatterns <- c("mac_2_sp3E", "sp3E_2_mac",
"mac_2_sp3W", "sp3W_2_mac",
"mac_2_sp3", "sp3_2_mac")
miglabs <- c('mac > sp3S', 'sp3S > mac',
'mac > sp3N', 'sp3N > mac',
'mac > anc_sp3', 'anc_sp3 > mac')
## Load logs:
Log <- as.data.frame(fread(infile_gphocs, stringsAsFactors = FALSE))
mlog <- Log %>%
filter(var %in% c('2Nm', 'm.prop'),
!grepl('leh', migpattern),
!migpattern %in% c('sp3E_2_sp3W', 'sp3W_2_sp3E'),
migtype.run == c('mult')) %>%
mutate(migpattern = factor(migpattern, levels = migpatterns))
## Plot:
p_m1 <- vplot(
data = filter(mlog, var == '2Nm'),
xvar = 'migpattern', fillvar = 'cn', colvar = 'cn', yvar = 'val',
xlab = "", ylab = '2Nm',
legpos = 'top', legcolname = "migration in model", rm.leg.col = FALSE,
yticks.by = 0.1, rotate.x.ann = TRUE, linecols = NULL) +
scale_x_discrete(labels = miglabs) +
theme(plot.margin = margin(0.2, 0.2, 0, 0.2, 'cm'))
p_m1
p_m2 <- vplot(
data = filter(mlog, var == 'm.prop'),
xvar = 'migpattern', fillvar = 'cn', colvar = 'cn', yvar = 'val',
xlab = "", ylab = 'migrant percentage',
legpos = 'top', legcolname = "migration in model", rm.leg.col = FALSE,
yticks.by = 0.03, rotate.x.ann = TRUE, linecols = NULL) +
scale_x_discrete(labels = miglabs) +
theme(plot.margin = margin(0.2, 0.2, 0, 0.5, 'cm'))
p_m2
#### D-STATS PLOT --------------------------------------------------------------
## Input files:
infile_atools <- here('analyses/admixtools/output/msp3proj.mac3.FS6.dstat_msp3.msp3pops.dmode.out')
## D-stats df:
d <- prep_d(infile_atools) %>%
mutate(popcomb = gsub('\\(m', '(', popcomb)) %>%
mutate(popcomb = gsub(',m', ',', popcomb)) %>%
mutate(popcomb = gsub('Mt3', '*', popcomb)) %>%
mutate(popcomb = gsub('east', '-S', popcomb)) %>%
mutate(popcomb = gsub('west', '-N', popcomb)) %>%
mutate(popcomb = gsub('mac$', 'mac ', popcomb))
## Create D-stats plot:
mylabs <- c(expression(paste('(sp3-S,', bold("sp3-N"), ')', bold(',mac*'))),
'(sp3-S,sp3-N),mac ')
p_d <- plot_d(d, marg_sig = TRUE, ylab = 'D', zero_line = FALSE) +
scale_x_discrete(labels = mylabs) +
scale_y_continuous(
limits = c(0, 0.11), breaks = c(0, 0.05, 0.1), expand = c(0, 0)
) +
theme(
panel.grid.minor = element_blank(),
legend.text = element_text(size = 16),
legend.margin = margin(1, 5, 2, 1),
legend.box.margin = margin(0, 0, -5, 0),
plot.margin = margin(0.2, 0.2, 0, 0.2, 'cm')
)
p_d
#### COMBINE AND SAVE ----------------------------------------------------------
p <- plot_grid(p_m1, p_m2, p_d,
labels = c('A', 'B', 'C'), label_size = 20,
rel_heights = c(1, 0.5))
ggsave(figfile, width = 12, height = 10,
device = cairo_ps, fallback_resolution = 150)
system(paste0('xdg-open ', figfile))
| /scripts/figs_ms/SuppFig_geneflow-clade1.R | no_license | jelmerp/lemurs_msp3 | R | false | false | 3,423 | r | #### SET-UP --------------------------------------------------------------------
library(here)
source(here('scripts/genomics/admixtools/admixtools_plot_fun.R'))
source(here('scripts/genomics/gphocs/gphocs_plot_fun.R'))
source(here('scripts/gphocs/gphocs_plot_fun_msp3.R'))
## Output file:
figfile <- here('figs/ms/SuppFig_geneflow-clade1.eps')
#### D-STATS PLOT --------------------------------------------------------------
## Input files:
infile_gphocs <- here('analyses/gphocs/output/msp3_3sp/processed/msp3_3sp_mergedlogs.txt')
## Pops:
migpatterns <- c("mac_2_sp3E", "sp3E_2_mac",
"mac_2_sp3W", "sp3W_2_mac",
"mac_2_sp3", "sp3_2_mac")
miglabs <- c('mac > sp3S', 'sp3S > mac',
'mac > sp3N', 'sp3N > mac',
'mac > anc_sp3', 'anc_sp3 > mac')
## Load logs:
Log <- as.data.frame(fread(infile_gphocs, stringsAsFactors = FALSE))
mlog <- Log %>%
filter(var %in% c('2Nm', 'm.prop'),
!grepl('leh', migpattern),
!migpattern %in% c('sp3E_2_sp3W', 'sp3W_2_sp3E'),
migtype.run == c('mult')) %>%
mutate(migpattern = factor(migpattern, levels = migpatterns))
## Plot:
p_m1 <- vplot(
data = filter(mlog, var == '2Nm'),
xvar = 'migpattern', fillvar = 'cn', colvar = 'cn', yvar = 'val',
xlab = "", ylab = '2Nm',
legpos = 'top', legcolname = "migration in model", rm.leg.col = FALSE,
yticks.by = 0.1, rotate.x.ann = TRUE, linecols = NULL) +
scale_x_discrete(labels = miglabs) +
theme(plot.margin = margin(0.2, 0.2, 0, 0.2, 'cm'))
p_m1
p_m2 <- vplot(
data = filter(mlog, var == 'm.prop'),
xvar = 'migpattern', fillvar = 'cn', colvar = 'cn', yvar = 'val',
xlab = "", ylab = 'migrant percentage',
legpos = 'top', legcolname = "migration in model", rm.leg.col = FALSE,
yticks.by = 0.03, rotate.x.ann = TRUE, linecols = NULL) +
scale_x_discrete(labels = miglabs) +
theme(plot.margin = margin(0.2, 0.2, 0, 0.5, 'cm'))
p_m2
#### D-STATS PLOT --------------------------------------------------------------
## Input files:
infile_atools <- here('analyses/admixtools/output/msp3proj.mac3.FS6.dstat_msp3.msp3pops.dmode.out')
## D-stats df:
d <- prep_d(infile_atools) %>%
mutate(popcomb = gsub('\\(m', '(', popcomb)) %>%
mutate(popcomb = gsub(',m', ',', popcomb)) %>%
mutate(popcomb = gsub('Mt3', '*', popcomb)) %>%
mutate(popcomb = gsub('east', '-S', popcomb)) %>%
mutate(popcomb = gsub('west', '-N', popcomb)) %>%
mutate(popcomb = gsub('mac$', 'mac ', popcomb))
## Create D-stats plot:
mylabs <- c(expression(paste('(sp3-S,', bold("sp3-N"), ')', bold(',mac*'))),
'(sp3-S,sp3-N),mac ')
p_d <- plot_d(d, marg_sig = TRUE, ylab = 'D', zero_line = FALSE) +
scale_x_discrete(labels = mylabs) +
scale_y_continuous(
limits = c(0, 0.11), breaks = c(0, 0.05, 0.1), expand = c(0, 0)
) +
theme(
panel.grid.minor = element_blank(),
legend.text = element_text(size = 16),
legend.margin = margin(1, 5, 2, 1),
legend.box.margin = margin(0, 0, -5, 0),
plot.margin = margin(0.2, 0.2, 0, 0.2, 'cm')
)
p_d
#### COMBINE AND SAVE ----------------------------------------------------------
p <- plot_grid(p_m1, p_m2, p_d,
labels = c('A', 'B', 'C'), label_size = 20,
rel_heights = c(1, 0.5))
ggsave(figfile, width = 12, height = 10,
device = cairo_ps, fallback_resolution = 150)
system(paste0('xdg-open ', figfile))
|
library(gridExtra)
library(vcd)
test_pairwise_assoc <- function (df, include=NULL, exclude=NULL, sig.level=NULL, include.group=NULL, exclude.group=NULL) {
tests <- data.frame(a = character(0), b = character(0), type = character(0), p = numeric(0), R = numeric(0), stringsAsFactors=F)
for (i in 1:(ncol(df)-1)) {
for (j in (i+1):ncol(df)) {
name.a <- names(df)[i]
name.b <- names(df)[j]
if (!is.null(include) && !(name.a %in% include) && !(name.b %in% include)) next
if (!is.null(exclude) && (name.a %in% exclude || name.b %in% exclude)) next
if (!is.null(include.group) && (!(name.a %in% include.group) || !(name.b %in% include.group))) next
if (!is.null(exclude.group)) {
found = FALSE
for (g in 1:length(exclude.group)) {
if (name.a %in% exclude.group[[g]] && name.b %in% exclude.group[[g]]) {
found = TRUE
break
}
}
if (found) {
print(paste0("Excluding within-group comparison '", name.a, "' and '", name.b, "'"))
next
}
}
a <- df[,i]
b <- df[,j]
if ((is.factor(a) || is.logical(a)) && is.numeric(b)) {
print(paste0("CATEGORIAL VS. NUMERIC: ", names(df)[i], " <--> ", names(df)[j]))
if (length(levels(as.factor(as.character(a[!is.na(b)])))) > 1) { # we need more than one group
test <- kruskal.test(b~a, na.action=na.exclude)
tests[nrow(tests)+1,] <- c(name.a, name.b, "CAT-vs-NUM", test$p.value, NA)
} else {
print(paste0(" NO TEST PERFORMED: only single factor level: ", unique(a[!is.na(b)])));
}
} else if (is.numeric(a) && (is.factor(b) || is.logical(b))) {
print(paste0("NUMERIC VS. CATEGORIAL: ", names(df)[i], " <--> ", names(df)[j]))
if (length(levels(as.factor(as.character(b[!is.na(a)])))) > 1) { # we need more than one group
test <- kruskal.test(a~b, na.action=na.exclude)
tests[nrow(tests)+1,] <- c(name.b, name.a, "CAT-vs-NUM", test$p.value, NA)
} else {
print(paste0(" NO TEST PERFORMED: only single factor level: ", unique(b[!is.na(a)])));
}
} else if (is.numeric(a) & is.numeric(b)) {
print(paste0("NUMERIC VS. NUMERIC: ", names(df)[i], " <--> ", names(df)[j]))
fit <- lm(b~a)
p <- anova(fit)$'Pr(>F)'[1]
tests[nrow(tests)+1,] <- c(name.a, name.b, "NUM-vs-NUM", p, summary(fit)$r.squared)
} else if ((is.factor(a) || is.logical(a)) && (is.factor(b) || is.logical(b))) {
print(paste0("CATEGORIAL VS. CATEGORIAL: ", names(df)[i], " <--> ", names(df)[j]))
# if (length(levels(as.factor(as.character(a[!is.na(b)])))) > 1 | length(levels(as.factor(as.character(b[!is.na(a)])))) > 1) { # we need more than one group in each category
pair.table <- table(df[,c(i, j)])
if (sum(pair.table > 0)) {
p <- fisher.test(pair.table, simulate.p.value = TRUE, B = 10000)$p.value
# p <- fisher.test(pair.table, workspace=200000000)$p.value
tests[nrow(tests)+1,] <- c(name.a, name.b, "CAT-vs-CAT", p, NA)
}
# } else {
# print(paste0(" NO TEST PERFORMED: only single factor level: ", unique(a[!is.na(b)]), " VS. ", unique(b[!is.na(a)])));
# }
}
}
}
# sort results by p-value
tests$p <- as.numeric(tests$p)
tests$R <- as.numeric(tests$R)
tests <- tests[order(tests$p),]
# plot results
for (i in 1:nrow(tests)) {
if (!is.null(sig.level) && tests[i,"p"] > sig.level) break
name.a <- tests[i, "a"]
name.b <- tests[i, "b"]
a <- df[,name.a]
b <- df[,name.b]
if (tests[i,"type"] == "CAT-vs-NUM") {
boxplot(b~a, xlab=name.a, ylab=name.b, main=sprintf("p=%.2g", tests[i, "p"]), na.action=na.exclude, outline=F, cex.axis=0.6, las=2)
stripchart(b~a, method="jitter", na.action=na.exclude, vertical=T, pch=19, col=1:length(levels(as.factor(a))), cex=0.5, add=T)
} else if (tests[i,"type"] == "NUM-vs-NUM") {
plot(a, b, xlab=name.a, ylab=name.b, main=sprintf("R=%.2f, p=%.2g", tests[i, "R"], tests[i, "p"]))
abline(lm(b~a), col="red")
} else if (tests[i,"type"] == "CAT-vs-CAT") {
pair.table <- table(df[,c(name.a, name.b)])
mosaic(pair.table, pop=F, main=sprintf("p=%.2g", tests[i, "p"]))
labeling_cells(text=pair.table)(pair.table)
}
}
return(tests)
}
peaks <- read.delim("/mnt/projects/fiona/results/homer/ChIP24_AT2_ER_peaks.annotated.with-expr.tsv", check.names = F)
peaks <- peaks[!grepl("^GL", peaks$Chr),]
peaks <- peaks[peaks$`Peak Score` >= 10,]
peaks <- peaks[,c("Chr", "Peak Score", "Annotation", "Distance to TSS", "Gene Type", "CpG%", "GC%",
"RUNX1(Runt)/Jurkat-RUNX1-ChIP-Seq(GSE29180)/Homer No. motifs",
"ETS1(ETS)/Jurkat-ETS1-ChIP-Seq(GSE17954)/Homer No. motifs",
"overlaps_enhancer_in_celllines", "runx1_overlap")]
peaks$Chr <- factor(peaks$Chr)
peaks$Annotation <- gsub(" \\(.*", "", peaks$Annotation)
peaks$Annotation[peaks$Annotation==""] <- "other"
peaks$Annotation <- factor(peaks$Annotation)
#peaks$`Detailed Annotation` <- gsub(" \\(.*", "", peaks$`Detailed Annotation`)
#peaks$`Detailed Annotation` <- gsub(".*\\|", "", peaks$`Detailed Annotation`)
#peaks$`Detailed Annotation` <- gsub("CpG.*", "CpG", peaks$`Detailed Annotation`)
#peaks$`Detailed Annotation`[peaks$`Detailed Annotation`==""] <- "other"
#peaks$`Detailed Annotation` <- factor(peaks$`Detailed Annotation`)
peaks$`Distance to TSS` <- abs(peaks$`Distance to TSS`)
names(peaks)[names(peaks)=="RUNX1(Runt)/Jurkat-RUNX1-ChIP-Seq(GSE29180)/Homer No. motifs"] <- "numRUNXmotifs"
peaks$numRUNXmotifs[is.na(peaks$numRUNXmotifs)] <- 0
peaks$hasRUNXmotif <- factor(ifelse(peaks$numRUNXmotifs > 0, "yes", "no"))
names(peaks)[names(peaks)=="ETS1(ETS)/Jurkat-ETS1-ChIP-Seq(GSE17954)/Homer No. motifs"] <- "numETSmotifs"
peaks$numETSmotifs[is.na(peaks$numETSmotifs)] <- 0
peaks$hasETSmotif <- factor(ifelse(peaks$numETSmotifs > 0, "yes", "no"))
peaks$overlaps_enhancer_in_celllines <- factor(ifelse(peaks$overlaps_enhancer_in_celllines != "", "yes", "no"))
names(peaks)[names(peaks)=="overlaps_enhancer_in_celllines"] <- "inEnhancer"
peaks$runx1_overlap <- factor(peaks$runx1_overlap)
pdf("/mnt/projects/fiona/results/associations.pdf")
tests <- test_pairwise_assoc(
peaks,
sig.level=0.1,
exclude.group=list(c("Annotation", "Detailed Annotation"),
c("numRUNXmotifs", "hasRUNXmotif"),
c("numETSmotifs", "hasETSmotif"))
)
dev.off() | /associations.R | no_license | Gig77/ccri-fiona | R | false | false | 6,617 | r | library(gridExtra)
library(vcd)
test_pairwise_assoc <- function (df, include=NULL, exclude=NULL, sig.level=NULL, include.group=NULL, exclude.group=NULL) {
tests <- data.frame(a = character(0), b = character(0), type = character(0), p = numeric(0), R = numeric(0), stringsAsFactors=F)
for (i in 1:(ncol(df)-1)) {
for (j in (i+1):ncol(df)) {
name.a <- names(df)[i]
name.b <- names(df)[j]
if (!is.null(include) && !(name.a %in% include) && !(name.b %in% include)) next
if (!is.null(exclude) && (name.a %in% exclude || name.b %in% exclude)) next
if (!is.null(include.group) && (!(name.a %in% include.group) || !(name.b %in% include.group))) next
if (!is.null(exclude.group)) {
found = FALSE
for (g in 1:length(exclude.group)) {
if (name.a %in% exclude.group[[g]] && name.b %in% exclude.group[[g]]) {
found = TRUE
break
}
}
if (found) {
print(paste0("Excluding within-group comparison '", name.a, "' and '", name.b, "'"))
next
}
}
a <- df[,i]
b <- df[,j]
if ((is.factor(a) || is.logical(a)) && is.numeric(b)) {
print(paste0("CATEGORIAL VS. NUMERIC: ", names(df)[i], " <--> ", names(df)[j]))
if (length(levels(as.factor(as.character(a[!is.na(b)])))) > 1) { # we need more than one group
test <- kruskal.test(b~a, na.action=na.exclude)
tests[nrow(tests)+1,] <- c(name.a, name.b, "CAT-vs-NUM", test$p.value, NA)
} else {
print(paste0(" NO TEST PERFORMED: only single factor level: ", unique(a[!is.na(b)])));
}
} else if (is.numeric(a) && (is.factor(b) || is.logical(b))) {
print(paste0("NUMERIC VS. CATEGORIAL: ", names(df)[i], " <--> ", names(df)[j]))
if (length(levels(as.factor(as.character(b[!is.na(a)])))) > 1) { # we need more than one group
test <- kruskal.test(a~b, na.action=na.exclude)
tests[nrow(tests)+1,] <- c(name.b, name.a, "CAT-vs-NUM", test$p.value, NA)
} else {
print(paste0(" NO TEST PERFORMED: only single factor level: ", unique(b[!is.na(a)])));
}
} else if (is.numeric(a) & is.numeric(b)) {
print(paste0("NUMERIC VS. NUMERIC: ", names(df)[i], " <--> ", names(df)[j]))
fit <- lm(b~a)
p <- anova(fit)$'Pr(>F)'[1]
tests[nrow(tests)+1,] <- c(name.a, name.b, "NUM-vs-NUM", p, summary(fit)$r.squared)
} else if ((is.factor(a) || is.logical(a)) && (is.factor(b) || is.logical(b))) {
print(paste0("CATEGORIAL VS. CATEGORIAL: ", names(df)[i], " <--> ", names(df)[j]))
# if (length(levels(as.factor(as.character(a[!is.na(b)])))) > 1 | length(levels(as.factor(as.character(b[!is.na(a)])))) > 1) { # we need more than one group in each category
pair.table <- table(df[,c(i, j)])
if (sum(pair.table > 0)) {
p <- fisher.test(pair.table, simulate.p.value = TRUE, B = 10000)$p.value
# p <- fisher.test(pair.table, workspace=200000000)$p.value
tests[nrow(tests)+1,] <- c(name.a, name.b, "CAT-vs-CAT", p, NA)
}
# } else {
# print(paste0(" NO TEST PERFORMED: only single factor level: ", unique(a[!is.na(b)]), " VS. ", unique(b[!is.na(a)])));
# }
}
}
}
# sort results by p-value
tests$p <- as.numeric(tests$p)
tests$R <- as.numeric(tests$R)
tests <- tests[order(tests$p),]
# plot results
for (i in 1:nrow(tests)) {
if (!is.null(sig.level) && tests[i,"p"] > sig.level) break
name.a <- tests[i, "a"]
name.b <- tests[i, "b"]
a <- df[,name.a]
b <- df[,name.b]
if (tests[i,"type"] == "CAT-vs-NUM") {
boxplot(b~a, xlab=name.a, ylab=name.b, main=sprintf("p=%.2g", tests[i, "p"]), na.action=na.exclude, outline=F, cex.axis=0.6, las=2)
stripchart(b~a, method="jitter", na.action=na.exclude, vertical=T, pch=19, col=1:length(levels(as.factor(a))), cex=0.5, add=T)
} else if (tests[i,"type"] == "NUM-vs-NUM") {
plot(a, b, xlab=name.a, ylab=name.b, main=sprintf("R=%.2f, p=%.2g", tests[i, "R"], tests[i, "p"]))
abline(lm(b~a), col="red")
} else if (tests[i,"type"] == "CAT-vs-CAT") {
pair.table <- table(df[,c(name.a, name.b)])
mosaic(pair.table, pop=F, main=sprintf("p=%.2g", tests[i, "p"]))
labeling_cells(text=pair.table)(pair.table)
}
}
return(tests)
}
peaks <- read.delim("/mnt/projects/fiona/results/homer/ChIP24_AT2_ER_peaks.annotated.with-expr.tsv", check.names = F)
peaks <- peaks[!grepl("^GL", peaks$Chr),]
peaks <- peaks[peaks$`Peak Score` >= 10,]
peaks <- peaks[,c("Chr", "Peak Score", "Annotation", "Distance to TSS", "Gene Type", "CpG%", "GC%",
"RUNX1(Runt)/Jurkat-RUNX1-ChIP-Seq(GSE29180)/Homer No. motifs",
"ETS1(ETS)/Jurkat-ETS1-ChIP-Seq(GSE17954)/Homer No. motifs",
"overlaps_enhancer_in_celllines", "runx1_overlap")]
peaks$Chr <- factor(peaks$Chr)
peaks$Annotation <- gsub(" \\(.*", "", peaks$Annotation)
peaks$Annotation[peaks$Annotation==""] <- "other"
peaks$Annotation <- factor(peaks$Annotation)
#peaks$`Detailed Annotation` <- gsub(" \\(.*", "", peaks$`Detailed Annotation`)
#peaks$`Detailed Annotation` <- gsub(".*\\|", "", peaks$`Detailed Annotation`)
#peaks$`Detailed Annotation` <- gsub("CpG.*", "CpG", peaks$`Detailed Annotation`)
#peaks$`Detailed Annotation`[peaks$`Detailed Annotation`==""] <- "other"
#peaks$`Detailed Annotation` <- factor(peaks$`Detailed Annotation`)
peaks$`Distance to TSS` <- abs(peaks$`Distance to TSS`)
names(peaks)[names(peaks)=="RUNX1(Runt)/Jurkat-RUNX1-ChIP-Seq(GSE29180)/Homer No. motifs"] <- "numRUNXmotifs"
peaks$numRUNXmotifs[is.na(peaks$numRUNXmotifs)] <- 0
peaks$hasRUNXmotif <- factor(ifelse(peaks$numRUNXmotifs > 0, "yes", "no"))
names(peaks)[names(peaks)=="ETS1(ETS)/Jurkat-ETS1-ChIP-Seq(GSE17954)/Homer No. motifs"] <- "numETSmotifs"
peaks$numETSmotifs[is.na(peaks$numETSmotifs)] <- 0
peaks$hasETSmotif <- factor(ifelse(peaks$numETSmotifs > 0, "yes", "no"))
peaks$overlaps_enhancer_in_celllines <- factor(ifelse(peaks$overlaps_enhancer_in_celllines != "", "yes", "no"))
names(peaks)[names(peaks)=="overlaps_enhancer_in_celllines"] <- "inEnhancer"
peaks$runx1_overlap <- factor(peaks$runx1_overlap)
pdf("/mnt/projects/fiona/results/associations.pdf")
tests <- test_pairwise_assoc(
peaks,
sig.level=0.1,
exclude.group=list(c("Annotation", "Detailed Annotation"),
c("numRUNXmotifs", "hasRUNXmotif"),
c("numETSmotifs", "hasETSmotif"))
)
dev.off() |
# IGVsnapshots.R
# Generate an IGV batch file to look at all regions with peaks (just do one snapshot if they overlap)
# Randy Johnson
# CCR Collaborative Bioinformatics Resource at Frederick National Laboratory
# Leidos Biomedical Research, Inc
vars <- commandArgs(TRUE)
tmp <- strsplit(vars, '=')
vars <- as.list(sapply(tmp, `[`, 2))
names(vars) <- sapply(tmp, `[`, 1)
##### testing #####
if(FALSE)
{
vars$wtIns <- '../Data/Hughes_TAF3_WT_q20ct2.bed'
vars$wtPeaks <- '../Results/Hughes_TAF3_WT_q20ct2_peaks.bed'
vars$mtIns <- '../Data/Hughes_TAF3mutM880A_q20ct2.bed'
vars$mtPeaks <- '../Results/Hughes_TAF3mutM880A_q20ct2_peaks.bed'
}
##### Defaults #####
##### Read in data #####
## wtIns <- read.table(vars$wtIns, skip = 1, stringsAsFactors = FALSE, header = FALSE)
wtPeaks <- read.table(vars$wtPeaks, skip = 1, stringsAsFactors = FALSE, header = FALSE)
names(wtPeaks) <- c('chr', 'start', 'end', 'genes', 'V5', 'strand', 'thickStart', 'thickEnd')
## mtIns <- read.table(vars$mtIns, skip = 1, stringsAsFactors = FALSE, header = FALSE)
mtPeaks <- read.table(vars$mtPeaks, skip = 1, stringsAsFactors = FALSE, header = FALSE)
names(mtPeaks) <- c('chr', 'start', 'end', 'genes', 'V5', 'strand', 'thickStart', 'thickEnd')
Peaks <- list(wt = wtPeaks,
mt = mtPeaks)
##### Get snapshot coordinates #####
snapshots <- matrix(nrow = 0, ncol = 3, dimnames = list(character(), c("chr", "start", "end")))
allchrs <- unique(c(wtPeaks$chr, mtPeaks$chr))
chr.curr <- allchrs[1]
while(dim(Peaks$wt)[1] > 0 & dim(Peaks$mt)[1] > 0)
{
# reset pick
pick <- ''
nopick <- ''
# move to next chromosome if we need to
if(Peaks$wt$chr[1] != chr.curr & Peaks$mt$chr[1] != chr.curr)
{
allchrs <- allchrs[-1]
chr.curr <- allchrs[1]
# this should be the next one ... could be buggy!
if(Peaks$wt$chr[1] != allchrs[1] | Peaks$mt$chr[1] != allchrs[1])
stop('Possible problem in sorting of bed files detected')
}
# find next peak start
if(dim(Peaks$wt)[1] == 0)
{
pick <- 'mt'
nopick <- 'wt'
}
if(dim(Peaks$mt)[1] == 0 & pick == '')
{
pick <- 'wt'
nopick <- 'mt'
}
if(Peaks$wt$chr[1] != chr.curr & pick == '')
{
pick <- 'mt'
nopick <- 'wt'
}
if(Peaks$mt$chr[1] != chr.curr & pick == '')
{
pick <- 'wt'
nopick <- 'mt'
}
if(Peaks$wt$start[1] < Peaks$mt$start[1] & pick == '')
{
pick <- 'wt'
nopick <- 'mt'
}
if(Peaks$wt$start[1] > Peaks$mt$start[1] & pick == '')
{
pick <- 'mt'
nopick <- 'wt'
}
# identify overlaps between the two
if(dim(Peaks[[nopick]])[1] > 0)
{
if(Peaks[[pick]]$end[1] > Peaks[[nopick]]$start[1])
{
both <- TRUE
end <- max(c(Peaks[[nopick]]$end[1], Peaks[[pick]]$end[1]))
}else{
both <- FALSE
end <- Peaks[[pick]]$end[1]
}
both <- FALSE
}
# construct location for snapshot
snapshots <- rbind(snapshots,
c(chr.curr,
as.character(Peaks[[pick]]$start[1]),
as.character(end)))
# remove included peaks
Peaks[[pick]] <- Peaks[[pick]][-1,]
if(both)
Peaks[[nopick]] <- Peaks[[nopick]][-1,]
}
##### Write file #####
cat("snapshotDirectory ~/Documents/Work/CCBR-Commons/Projects/ccbr577/Results/peaks/", '\n', file = 'IGVbatch.txt')
for(i in 1:dim(snapshots)[1])
{
cat("goto ", snapshots[i,1], ":", snapshots[i,2], "-", snapshots[i,3], "\n",
file = 'IGVbatch.txt', sep = '', append = TRUE)
cat("snapshot\n", file = 'IGVbatch.txt', append = TRUE)
}
| /Projects/ccbr577/Scripts/IGVsnapshots.R | no_license | CCBR/CCBR-Commons | R | false | false | 3,755 | r | # IGVsnapshots.R
# Generate an IGV batch file to look at all regions with peaks (just do one snapshot if they overlap)
# Randy Johnson
# CCR Collaborative Bioinformatics Resource at Frederick National Laboratory
# Leidos Biomedical Research, Inc
vars <- commandArgs(TRUE)
tmp <- strsplit(vars, '=')
vars <- as.list(sapply(tmp, `[`, 2))
names(vars) <- sapply(tmp, `[`, 1)
##### testing #####
if(FALSE)
{
vars$wtIns <- '../Data/Hughes_TAF3_WT_q20ct2.bed'
vars$wtPeaks <- '../Results/Hughes_TAF3_WT_q20ct2_peaks.bed'
vars$mtIns <- '../Data/Hughes_TAF3mutM880A_q20ct2.bed'
vars$mtPeaks <- '../Results/Hughes_TAF3mutM880A_q20ct2_peaks.bed'
}
##### Defaults #####
##### Read in data #####
## wtIns <- read.table(vars$wtIns, skip = 1, stringsAsFactors = FALSE, header = FALSE)
wtPeaks <- read.table(vars$wtPeaks, skip = 1, stringsAsFactors = FALSE, header = FALSE)
names(wtPeaks) <- c('chr', 'start', 'end', 'genes', 'V5', 'strand', 'thickStart', 'thickEnd')
## mtIns <- read.table(vars$mtIns, skip = 1, stringsAsFactors = FALSE, header = FALSE)
mtPeaks <- read.table(vars$mtPeaks, skip = 1, stringsAsFactors = FALSE, header = FALSE)
names(mtPeaks) <- c('chr', 'start', 'end', 'genes', 'V5', 'strand', 'thickStart', 'thickEnd')
Peaks <- list(wt = wtPeaks,
mt = mtPeaks)
##### Get snapshot coordinates #####
snapshots <- matrix(nrow = 0, ncol = 3, dimnames = list(character(), c("chr", "start", "end")))
allchrs <- unique(c(wtPeaks$chr, mtPeaks$chr))
chr.curr <- allchrs[1]
while(dim(Peaks$wt)[1] > 0 & dim(Peaks$mt)[1] > 0)
{
# reset pick
pick <- ''
nopick <- ''
# move to next chromosome if we need to
if(Peaks$wt$chr[1] != chr.curr & Peaks$mt$chr[1] != chr.curr)
{
allchrs <- allchrs[-1]
chr.curr <- allchrs[1]
# this should be the next one ... could be buggy!
if(Peaks$wt$chr[1] != allchrs[1] | Peaks$mt$chr[1] != allchrs[1])
stop('Possible problem in sorting of bed files detected')
}
# find next peak start
if(dim(Peaks$wt)[1] == 0)
{
pick <- 'mt'
nopick <- 'wt'
}
if(dim(Peaks$mt)[1] == 0 & pick == '')
{
pick <- 'wt'
nopick <- 'mt'
}
if(Peaks$wt$chr[1] != chr.curr & pick == '')
{
pick <- 'mt'
nopick <- 'wt'
}
if(Peaks$mt$chr[1] != chr.curr & pick == '')
{
pick <- 'wt'
nopick <- 'mt'
}
if(Peaks$wt$start[1] < Peaks$mt$start[1] & pick == '')
{
pick <- 'wt'
nopick <- 'mt'
}
if(Peaks$wt$start[1] > Peaks$mt$start[1] & pick == '')
{
pick <- 'mt'
nopick <- 'wt'
}
# identify overlaps between the two
if(dim(Peaks[[nopick]])[1] > 0)
{
if(Peaks[[pick]]$end[1] > Peaks[[nopick]]$start[1])
{
both <- TRUE
end <- max(c(Peaks[[nopick]]$end[1], Peaks[[pick]]$end[1]))
}else{
both <- FALSE
end <- Peaks[[pick]]$end[1]
}
both <- FALSE
}
# construct location for snapshot
snapshots <- rbind(snapshots,
c(chr.curr,
as.character(Peaks[[pick]]$start[1]),
as.character(end)))
# remove included peaks
Peaks[[pick]] <- Peaks[[pick]][-1,]
if(both)
Peaks[[nopick]] <- Peaks[[nopick]][-1,]
}
##### Write file #####
cat("snapshotDirectory ~/Documents/Work/CCBR-Commons/Projects/ccbr577/Results/peaks/", '\n', file = 'IGVbatch.txt')
for(i in 1:dim(snapshots)[1])
{
cat("goto ", snapshots[i,1], ":", snapshots[i,2], "-", snapshots[i,3], "\n",
file = 'IGVbatch.txt', sep = '', append = TRUE)
cat("snapshot\n", file = 'IGVbatch.txt', append = TRUE)
}
|
\alias{gtkTextIterCanInsert}
\name{gtkTextIterCanInsert}
\title{gtkTextIterCanInsert}
\description{Considering the default editability of the buffer, and tags that
affect editability, determines whether text inserted at \code{iter} would
be editable. If text inserted at \code{iter} would be editable then the
user should be allowed to insert text at \code{iter}.
\code{\link{gtkTextBufferInsertInteractive}} uses this function to decide
whether insertions are allowed at a given position.}
\usage{gtkTextIterCanInsert(object, default.editability)}
\arguments{
\item{\code{object}}{[\code{\link{GtkTextIter}}] an iterator}
\item{\code{default.editability}}{[logical] \code{TRUE} if text is editable by default}
}
\value{[logical] whether text inserted at \code{iter} would be editable}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /man/gtkTextIterCanInsert.Rd | no_license | cran/RGtk2.10 | R | false | false | 860 | rd | \alias{gtkTextIterCanInsert}
\name{gtkTextIterCanInsert}
\title{gtkTextIterCanInsert}
\description{Considering the default editability of the buffer, and tags that
affect editability, determines whether text inserted at \code{iter} would
be editable. If text inserted at \code{iter} would be editable then the
user should be allowed to insert text at \code{iter}.
\code{\link{gtkTextBufferInsertInteractive}} uses this function to decide
whether insertions are allowed at a given position.}
\usage{gtkTextIterCanInsert(object, default.editability)}
\arguments{
\item{\code{object}}{[\code{\link{GtkTextIter}}] an iterator}
\item{\code{default.editability}}{[logical] \code{TRUE} if text is editable by default}
}
\value{[logical] whether text inserted at \code{iter} would be editable}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
dadosCrimes = read.csv("USArrests.csv")
par(mfrow=c(1,1))
vetorSomaCrimes = c()
for(i in 1:length(dadosCrimes[,1]))
{
pop = as.numeric(dadosCrimes[i,][4])
soma = sum(dadosCrimes[i,][2],dadosCrimes[i,][3],dadosCrimes[i,][5])
vetorSomaCrimes[i] = soma/pop
}
bpEstadosMaisCrimes = barplot(vetorSomaCrimes,main="Estados com mais crimes",xaxt="none")
axis(1,at=bpEstadosMaisCrimes,labels=dadosCrimes[,1],las=2,cex.axis=0.7)
bpCrimeAssassinato = barplot(dadosCrimes[,2],main="Estados com mais assassinato",xaxt="none")
axis(1,at=bpCrimeAssassinato,labels=dadosCrimes[,1],las=2,cex.axis=0.7)
bpCrimeAssalto = barplot(dadosCrimes[,3],main="Estados com mais assalto",xaxt="none")
axis(1,at=bpCrimeAssalto,labels=dadosCrimes[,1],las=2,cex.axis=0.7)
bpCrimeEstupro = barplot(dadosCrimes[,5],main="Estados com mais estupro",xaxt="none")
axis(1,at=bpCrimeEstupro,labels=dadosCrimes[,1],las=2,cex.axis=0.7)
| /companias/crimesEUA.r | no_license | bernardomsvieira/Rproject | R | false | false | 918 | r | dadosCrimes = read.csv("USArrests.csv")
par(mfrow=c(1,1))
vetorSomaCrimes = c()
for(i in 1:length(dadosCrimes[,1]))
{
pop = as.numeric(dadosCrimes[i,][4])
soma = sum(dadosCrimes[i,][2],dadosCrimes[i,][3],dadosCrimes[i,][5])
vetorSomaCrimes[i] = soma/pop
}
bpEstadosMaisCrimes = barplot(vetorSomaCrimes,main="Estados com mais crimes",xaxt="none")
axis(1,at=bpEstadosMaisCrimes,labels=dadosCrimes[,1],las=2,cex.axis=0.7)
bpCrimeAssassinato = barplot(dadosCrimes[,2],main="Estados com mais assassinato",xaxt="none")
axis(1,at=bpCrimeAssassinato,labels=dadosCrimes[,1],las=2,cex.axis=0.7)
bpCrimeAssalto = barplot(dadosCrimes[,3],main="Estados com mais assalto",xaxt="none")
axis(1,at=bpCrimeAssalto,labels=dadosCrimes[,1],las=2,cex.axis=0.7)
bpCrimeEstupro = barplot(dadosCrimes[,5],main="Estados com mais estupro",xaxt="none")
axis(1,at=bpCrimeEstupro,labels=dadosCrimes[,1],las=2,cex.axis=0.7)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blblm.R
\name{sigma.blblm}
\alias{sigma.blblm}
\title{obtain sigma}
\usage{
\method{sigma}{blblm}(object, confidence = FALSE, level = 0.95, ...)
}
\arguments{
\item{object}{blblm class}
\item{confidence}{need confidence interval or not}
\item{level}{1-alpha, confidence level}
\item{...}{more arguments}
}
\description{
obtain sigma
}
| /man/sigma.blblm.Rd | permissive | shihanjing/sta141c-project | R | false | true | 416 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blblm.R
\name{sigma.blblm}
\alias{sigma.blblm}
\title{obtain sigma}
\usage{
\method{sigma}{blblm}(object, confidence = FALSE, level = 0.95, ...)
}
\arguments{
\item{object}{blblm class}
\item{confidence}{need confidence interval or not}
\item{level}{1-alpha, confidence level}
\item{...}{more arguments}
}
\description{
obtain sigma
}
|
blockstats <- function(m1, m0, coefname = "GENOTYPE") {
stopifnot(all.equal(class(m1), class(m0)))
## should handle special case where class(m0)=="coxph.null"
UseMethod("blockstats", m1)
}
blockstats.lm <- function(m1, m0, coefname = "GENOTYPE") {
beta <- unname(coef(m1)[coefname])
se <- unname(sqrt(vcov(m1)[coefname, coefname]))
n <- length(na.omit(residuals(m1))) # seemingly no direct way to extract
pval <- pt(-abs(beta)/se, df = m1$df.residual)*2
return(c(n = n, beta = beta, se = se, lrt = NA, pval = pval))
}
blockstats.glm <- function(m1, m0, coefname = "GENOTYPE") {
beta <- unname(coef(m1)[coefname])
se <- unname(sqrt(vcov(m1)[coefname, coefname]))
n <- length(na.omit(residuals(m1))) # seemingly no direct way to extract
lrt <- max(m0$deviance - m1$deviance, 0)
return(c(n = n, beta = beta, se = se, lrt = lrt, pval = NA))
}
blockstats.coxph <- function(m1, m0, coefname = "GENOTYPE") {
beta <- unname(coef(m1)[coefname])
se <- unname(sqrt(vcov(m1)[coefname, coefname]))
lrt <- max(2*(m1$loglik[2] - m0$loglik[2]), 0)
n <- m1$n
return(c(n = n, beta = beta, se = se, lrt = lrt, pval = NA))
}
blockstats.clm <- function(m1, m0, coefname = "GENOTYPE") {
beta <- unname(coef(m1)[coefname])
se <- unname(sqrt(vcov(m1)[coefname, coefname]))
lrt <- max(2*(m1$logLik - m0$logLik), 0)
n <- m1$n
return(c(n = n, beta = beta, se = se, lrt = lrt, pval = NA))
}
blockassoc <- function(qcall, data, minimac,
usubjid = getOption("gtx.usubjid", "USUBJID"),
threshold.MAF = 0, threshold.Rsq = 0,
threshold.pass = NULL,
message.begin = "blockassoc",
out.signif = 6, use.compiled = FALSE) {
stopifnot(is.expression(qcall) || is.call(qcall))
if (use.compiled) {
stop("Compiled functions not yet implemented")
## test for special cases that can be handled by compiled C++ code
## PUT ALL THIS INSIDE A tryCatch()
if (all.equal(qcall[1], quote(lm()))) { # qcall is a call to lm()
mc <- match.call(definition = lm, call = qcall, expand.dots = TRUE)
## only proceed if all arguments are ones we know how to handle:
if (all(names(mc)[-1] %in% c("formula", "subset"))) {
mc[1] <- quote(model.frame())
mf <- eval(mc, envir = data) #rownames contain indices after subset and NA omission
## call my C++ f(data[as.integer(rownames(mf)), usubjid], mf)
}
## fall through
}
## fall through
## otherwise fall through to interpreted code
}
m0 <- eval(qcall, envir = data)
if (!missing(minimac)) {
## should loop over multiple files if supplied
info <- read.table(gzfile(paste(minimac, "info.gz", sep = ".")),
comment.char = "", quote = "", header = TRUE, as.is = TRUE)
dose <- read.table(gzfile(paste(minimac, "dose.gz", sep = ".")),
comment.char = "", quote = "", header = FALSE,
colClasses = c(rep("character", 2), rep("numeric", nrow(info))))
stopifnot(all(dose[ , 2] == "DOSE"))
} else {
stop("Missing minimac argument; other genotype formats not supported")
}
stopifnot(length(unique(data[[usubjid]])) == nrow(data))
data.machid <- paste(data[[usubjid]], data[[usubjid]], sep = "->")
data.include <- apply(!is.na(data), 1, all) & data.machid %in% dose[ , 1]
cat(message.begin, ": Analysing ", sum(data.include), "/", nrow(data),
" subjects with non-missing phenotypes, covariates, and genotypes\n", sep = "")
data <- data[data.include, , drop = FALSE]
data.machid <- paste(data[[usubjid]], data[[usubjid]], sep = "->") # update
cat(message.begin, ": Using ", length(data.machid), "/", nrow(dose),
" genotyped subjects\n", sep = "")
dose <- dose[match(data.machid, dose[ , 1]), 3:ncol(dose)]
stopifnot(nrow(dose) == nrow(data))
stopifnot(ncol(dose) == nrow(info))
stopifnot(!any(is.na(dose)))
## this will not match the Rsq calculated by minimac, because minimac calculates the *ALLELIC* r2hat
xbar <- colMeans(dose)
x2bar <- colMeans(dose^2)
info2 <- data.frame(analysed.Freq1 = signif(0.5*xbar, out.signif),
analysed.Rsq = signif((x2bar - xbar^2)/(xbar*(1-0.5*xbar)), out.signif),
SNP = info$SNP,
stringsAsFactors = FALSE)
## which to analyse?
## should there be an option to choose whether filtering on calculated or minimac-reported statistics?
#cat(basename(minimac), ": Analysing n candidate variants\n", sep = "")
ww <- which((pmin(info2$analysed.Freq1, 1 - info2$analysed.Freq1) >= threshold.MAF &
info2$analysed.Rsq >= threshold.Rsq) |
info2$SNP %in% threshold.pass)
cat(message.begin, ": Analysing ", length(ww), "/", nrow(info2), " variants\n", sep = "")
assoc <- as.data.frame(t(vapply(ww, function(idx) {
return(tryCatch({
data$GENOTYPE <- dose[ , idx]
m1 <- suppressWarnings(update(m0, formula = . ~ . + GENOTYPE, data = data))
blockstats(m1, m0, coefname = "GENOTYPE")},
error = function(e) return(c(NA, NA, NA, NA, NA))))},
c(analysed.N = 0L, beta = 0., SE = 0., LRT = 0., pvalue = 0.))))
if (all(is.na(assoc$pvalue))) {
assoc$pvalue <- signif(pchisq(assoc$LRT, df = 1, lower.tail = FALSE), out.signif)
}
assoc <- within(assoc, {
pwald <- signif(pnorm(-abs(beta)/SE)*2, out.signif)
beta <- signif(beta, out.signif)
SE <- signif(SE, out.signif)
analysed.N <- as.integer(analysed.N)
LRT <- signif(LRT, out.signif)
pvalue <- signif(pvalue, out.signif)
})
return(cbind(info[ww, ], info2[ww, ], assoc))
}
| /R/blockassoc.R | no_license | tengfei/gtx | R | false | false | 5,733 | r | blockstats <- function(m1, m0, coefname = "GENOTYPE") {
stopifnot(all.equal(class(m1), class(m0)))
## should handle special case where class(m0)=="coxph.null"
UseMethod("blockstats", m1)
}
blockstats.lm <- function(m1, m0, coefname = "GENOTYPE") {
beta <- unname(coef(m1)[coefname])
se <- unname(sqrt(vcov(m1)[coefname, coefname]))
n <- length(na.omit(residuals(m1))) # seemingly no direct way to extract
pval <- pt(-abs(beta)/se, df = m1$df.residual)*2
return(c(n = n, beta = beta, se = se, lrt = NA, pval = pval))
}
blockstats.glm <- function(m1, m0, coefname = "GENOTYPE") {
beta <- unname(coef(m1)[coefname])
se <- unname(sqrt(vcov(m1)[coefname, coefname]))
n <- length(na.omit(residuals(m1))) # seemingly no direct way to extract
lrt <- max(m0$deviance - m1$deviance, 0)
return(c(n = n, beta = beta, se = se, lrt = lrt, pval = NA))
}
blockstats.coxph <- function(m1, m0, coefname = "GENOTYPE") {
beta <- unname(coef(m1)[coefname])
se <- unname(sqrt(vcov(m1)[coefname, coefname]))
lrt <- max(2*(m1$loglik[2] - m0$loglik[2]), 0)
n <- m1$n
return(c(n = n, beta = beta, se = se, lrt = lrt, pval = NA))
}
blockstats.clm <- function(m1, m0, coefname = "GENOTYPE") {
beta <- unname(coef(m1)[coefname])
se <- unname(sqrt(vcov(m1)[coefname, coefname]))
lrt <- max(2*(m1$logLik - m0$logLik), 0)
n <- m1$n
return(c(n = n, beta = beta, se = se, lrt = lrt, pval = NA))
}
blockassoc <- function(qcall, data, minimac,
usubjid = getOption("gtx.usubjid", "USUBJID"),
threshold.MAF = 0, threshold.Rsq = 0,
threshold.pass = NULL,
message.begin = "blockassoc",
out.signif = 6, use.compiled = FALSE) {
stopifnot(is.expression(qcall) || is.call(qcall))
if (use.compiled) {
stop("Compiled functions not yet implemented")
## test for special cases that can be handled by compiled C++ code
## PUT ALL THIS INSIDE A tryCatch()
if (all.equal(qcall[1], quote(lm()))) { # qcall is a call to lm()
mc <- match.call(definition = lm, call = qcall, expand.dots = TRUE)
## only proceed if all arguments are ones we know how to handle:
if (all(names(mc)[-1] %in% c("formula", "subset"))) {
mc[1] <- quote(model.frame())
mf <- eval(mc, envir = data) #rownames contain indices after subset and NA omission
## call my C++ f(data[as.integer(rownames(mf)), usubjid], mf)
}
## fall through
}
## fall through
## otherwise fall through to interpreted code
}
m0 <- eval(qcall, envir = data)
if (!missing(minimac)) {
## should loop over multiple files if supplied
info <- read.table(gzfile(paste(minimac, "info.gz", sep = ".")),
comment.char = "", quote = "", header = TRUE, as.is = TRUE)
dose <- read.table(gzfile(paste(minimac, "dose.gz", sep = ".")),
comment.char = "", quote = "", header = FALSE,
colClasses = c(rep("character", 2), rep("numeric", nrow(info))))
stopifnot(all(dose[ , 2] == "DOSE"))
} else {
stop("Missing minimac argument; other genotype formats not supported")
}
stopifnot(length(unique(data[[usubjid]])) == nrow(data))
data.machid <- paste(data[[usubjid]], data[[usubjid]], sep = "->")
data.include <- apply(!is.na(data), 1, all) & data.machid %in% dose[ , 1]
cat(message.begin, ": Analysing ", sum(data.include), "/", nrow(data),
" subjects with non-missing phenotypes, covariates, and genotypes\n", sep = "")
data <- data[data.include, , drop = FALSE]
data.machid <- paste(data[[usubjid]], data[[usubjid]], sep = "->") # update
cat(message.begin, ": Using ", length(data.machid), "/", nrow(dose),
" genotyped subjects\n", sep = "")
dose <- dose[match(data.machid, dose[ , 1]), 3:ncol(dose)]
stopifnot(nrow(dose) == nrow(data))
stopifnot(ncol(dose) == nrow(info))
stopifnot(!any(is.na(dose)))
## this will not match the Rsq calculated by minimac, because minimac calculates the *ALLELIC* r2hat
xbar <- colMeans(dose)
x2bar <- colMeans(dose^2)
info2 <- data.frame(analysed.Freq1 = signif(0.5*xbar, out.signif),
analysed.Rsq = signif((x2bar - xbar^2)/(xbar*(1-0.5*xbar)), out.signif),
SNP = info$SNP,
stringsAsFactors = FALSE)
## which to analyse?
## should there be an option to choose whether filtering on calculated or minimac-reported statistics?
#cat(basename(minimac), ": Analysing n candidate variants\n", sep = "")
ww <- which((pmin(info2$analysed.Freq1, 1 - info2$analysed.Freq1) >= threshold.MAF &
info2$analysed.Rsq >= threshold.Rsq) |
info2$SNP %in% threshold.pass)
cat(message.begin, ": Analysing ", length(ww), "/", nrow(info2), " variants\n", sep = "")
assoc <- as.data.frame(t(vapply(ww, function(idx) {
return(tryCatch({
data$GENOTYPE <- dose[ , idx]
m1 <- suppressWarnings(update(m0, formula = . ~ . + GENOTYPE, data = data))
blockstats(m1, m0, coefname = "GENOTYPE")},
error = function(e) return(c(NA, NA, NA, NA, NA))))},
c(analysed.N = 0L, beta = 0., SE = 0., LRT = 0., pvalue = 0.))))
if (all(is.na(assoc$pvalue))) {
assoc$pvalue <- signif(pchisq(assoc$LRT, df = 1, lower.tail = FALSE), out.signif)
}
assoc <- within(assoc, {
pwald <- signif(pnorm(-abs(beta)/SE)*2, out.signif)
beta <- signif(beta, out.signif)
SE <- signif(SE, out.signif)
analysed.N <- as.integer(analysed.N)
LRT <- signif(LRT, out.signif)
pvalue <- signif(pvalue, out.signif)
})
return(cbind(info[ww, ], info2[ww, ], assoc))
}
|
#' Make a magrader submission object
#'
#' A magrader submission object contains six parts:
#' 1. `answer` - The student's original answer (stored as a string)
#' 2. `focus` - An optional portion of the student's code parsed as an expression tree
#' 3. `status` - The current status (pass, fail, warn)
#' 4. `warnings` - A set of warnings
#' 5. `praise` - A set of praise messages
#' 6. `failure` - A set of failure messages
#'
#' @param answer - The complete, original student submission as a character string
#' @param focus - A portion of the student answer parsed as an expression tree
#' @param status - The current status of the submission: "pass", "fail", or "warn"
#' @param warnings - Warning messages as a vector of character strings, to be displayed if final status is warn
#' @param praise - Praise messages as a vector of character strings, to be displayed if final status is warn or pass
#' @param failure - Failure messages as a character strings, to be displayed if status switches to fail
#'
#' @return A magrader submission class object, which inherits the list class
#' @export
make_submission <- function(answer,
focus = NULL,
status = "pass",
warnings = NULL,
praise = NULL,
failure = NULL) {
if (!status %in% c("pass", "fail", "warn"))
stop("Submission status must be one of 'pass', 'fail' or 'warn'")
structure(list(answer = answer,
focus = focus,
status = status,
warnings = warnings,
praise = praise,
failure = failure),
class = c("submission", "list"))
}
| /R/make_submission.R | no_license | garrettgman/magrader | R | false | false | 1,728 | r | #' Make a magrader submission object
#'
#' A magrader submission object contains six parts:
#' 1. `answer` - The student's original answer (stored as a string)
#' 2. `focus` - An optional portion of the student's code parsed as an expression tree
#' 3. `status` - The current status (pass, fail, warn)
#' 4. `warnings` - A set of warnings
#' 5. `praise` - A set of praise messages
#' 6. `failure` - A set of failure messages
#'
#' @param answer - The complete, original student submission as a character string
#' @param focus - A portion of the student answer parsed as an expression tree
#' @param status - The current status of the submission: "pass", "fail", or "warn"
#' @param warnings - Warning messages as a vector of character strings, to be displayed if final status is warn
#' @param praise - Praise messages as a vector of character strings, to be displayed if final status is warn or pass
#' @param failure - Failure messages as a character strings, to be displayed if status switches to fail
#'
#' @return A magrader submission class object, which inherits the list class
#' @export
make_submission <- function(answer,
focus = NULL,
status = "pass",
warnings = NULL,
praise = NULL,
failure = NULL) {
if (!status %in% c("pass", "fail", "warn"))
stop("Submission status must be one of 'pass', 'fail' or 'warn'")
structure(list(answer = answer,
focus = focus,
status = status,
warnings = warnings,
praise = praise,
failure = failure),
class = c("submission", "list"))
}
|
# Let d(n) be defined as the sum of proper divisors of n
# (numbers less than n which divide evenly into n).
# If d(a) <- b and d(b) <- a, where a ≠ b, then a and b are an
# amicable pair and each of a and b are called amicable numbers.
# For example, the proper divisors of 220 are
# 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) <- 284.
# The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) <- 220.
# Evaluate the sum of all the amicable numbers under 10000.
sum_div <- function (n) {
total <- 1
for (x in (2 : (floor(sqrt(n)) + 1))){
if (n %% x == 0) {
total <- total + x
y <- floor(n / x)
if (y > x) {
total <- total + y
}
}
}
return (total)
}
amicable_numbers <- function(a){
b <- sum_div(a)
if (a != b && sum_div(b) == a){
return (a)
}
}
holder <- numeric(0)
for (i in (1:10000)){
c <- amicable_numbers(i)
if (is.null(c) != TRUE){
holder <- append(holder, c)
}
}
print(paste("Problem 21 <- ", sum(holder))) # 31626
| /021_amicable_numbers.R | no_license | broepke/ProjectEuler | R | false | false | 1,024 | r | # Let d(n) be defined as the sum of proper divisors of n
# (numbers less than n which divide evenly into n).
# If d(a) <- b and d(b) <- a, where a ≠ b, then a and b are an
# amicable pair and each of a and b are called amicable numbers.
# For example, the proper divisors of 220 are
# 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) <- 284.
# The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) <- 220.
# Evaluate the sum of all the amicable numbers under 10000.
sum_div <- function (n) {
total <- 1
for (x in (2 : (floor(sqrt(n)) + 1))){
if (n %% x == 0) {
total <- total + x
y <- floor(n / x)
if (y > x) {
total <- total + y
}
}
}
return (total)
}
amicable_numbers <- function(a){
b <- sum_div(a)
if (a != b && sum_div(b) == a){
return (a)
}
}
holder <- numeric(0)
for (i in (1:10000)){
c <- amicable_numbers(i)
if (is.null(c) != TRUE){
holder <- append(holder, c)
}
}
print(paste("Problem 21 <- ", sum(holder))) # 31626
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## The function makes a special matrix on which we can perform 4 different functions as shown Below
makeCacheMatrix <- function(x = matrix()) {
i<-NULL
set<-function(y){
x<<-y
i<<-NULL
}
get<-function() x
setinverse<-function(inverse) i<<-inverse
getinverse<-function() i
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
## Write a short comment describing this function
## The function below gets the inverse for the special matrix created above
cacheSolve <- function(x, ...) {
i<-x$getinverse()
if(!is.null(i))
{
message("getting cached data")
return(i)
}
data<-x$get()
i<-solve(data)
x$setinverse(i)
i
## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | vivek3101/ProgrammingAssignment2 | R | false | false | 873 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## The function makes a special matrix on which we can perform 4 different functions as shown Below
makeCacheMatrix <- function(x = matrix()) {
i<-NULL
set<-function(y){
x<<-y
i<<-NULL
}
get<-function() x
setinverse<-function(inverse) i<<-inverse
getinverse<-function() i
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
## Write a short comment describing this function
## The function below gets the inverse for the special matrix created above
cacheSolve <- function(x, ...) {
i<-x$getinverse()
if(!is.null(i))
{
message("getting cached data")
return(i)
}
data<-x$get()
i<-solve(data)
x$setinverse(i)
i
## Return a matrix that is the inverse of 'x'
}
|
#'
#'
#'Samineh Bagheri
#'11.03.2016
#'Cologne University of Applied Sciences
#'
#'
#call all the necessary functions and libraries
#If you recieve by executing the following command
#Please consider installing the missing Packages
source("sourceSOCR.R")
nrun=1
KS<-c(40,30,20,10,5,4,3,2,1)
for(k in KS){
problem<-"sphere4angle"
angle<-pi/k
d<-dimension<-2
testName<-paste("comp",which(k==KS),sep="")
source(paste("testProblem/",problem,".R",sep=""))
#initialize the SORC
saveName<-paste(problem,"-",testName,sep="")
startSeed<-3103543
bigSORC<-list()
for(mySeed in c(startSeed:(startSeed+nrun-1))){
cat(testName,"configuration is running for the seed",mySeed,"\n")
mySORC<-initializeSORC(fn=fn,dLower=-1,dUpper=1,name=saveName,
lower=lower,upper=upper,dimension=dimension
,tol=1e-7
,nConstraints=m,kType="UK"
,initSize=5*dimension,initMethod="LHS"
,vis=F
,visZoom=F
,solu=solu
,budget=50
,seed=mySeed
,fCalc="beta" #alpha, beta, mix only relevant for mix
,wtType="random"
,noiseVar = 0.001
,handleCrash=T
,PLUGINTYPE="adaptive" #"fixed", "adaptive", original
,maxTime=10
,repairInfeas = T
,defGamma=list(gammaScheme="adaptive",gammaThreshold=2)
,constantSigma=NULL
,ri=list(eps3=0.0,q=1,mmax=1000,eps2=0.0,eps1=1e-3) )
mySORC<-mainSORC(mySORC)
bigSORC[[mySeed-startSeed+1]]<-mySORC
}
save(bigSORC,file=paste("Results/",saveName,".RData",sep=""))
}
| /callSOCRAng.R | no_license | saminehbagheri/SOCU | R | false | false | 2,006 | r | #'
#'
#'Samineh Bagheri
#'11.03.2016
#'Cologne University of Applied Sciences
#'
#'
#call all the necessary functions and libraries
#If you recieve by executing the following command
#Please consider installing the missing Packages
source("sourceSOCR.R")
nrun=1
KS<-c(40,30,20,10,5,4,3,2,1)
for(k in KS){
problem<-"sphere4angle"
angle<-pi/k
d<-dimension<-2
testName<-paste("comp",which(k==KS),sep="")
source(paste("testProblem/",problem,".R",sep=""))
#initialize the SORC
saveName<-paste(problem,"-",testName,sep="")
startSeed<-3103543
bigSORC<-list()
for(mySeed in c(startSeed:(startSeed+nrun-1))){
cat(testName,"configuration is running for the seed",mySeed,"\n")
mySORC<-initializeSORC(fn=fn,dLower=-1,dUpper=1,name=saveName,
lower=lower,upper=upper,dimension=dimension
,tol=1e-7
,nConstraints=m,kType="UK"
,initSize=5*dimension,initMethod="LHS"
,vis=F
,visZoom=F
,solu=solu
,budget=50
,seed=mySeed
,fCalc="beta" #alpha, beta, mix only relevant for mix
,wtType="random"
,noiseVar = 0.001
,handleCrash=T
,PLUGINTYPE="adaptive" #"fixed", "adaptive", original
,maxTime=10
,repairInfeas = T
,defGamma=list(gammaScheme="adaptive",gammaThreshold=2)
,constantSigma=NULL
,ri=list(eps3=0.0,q=1,mmax=1000,eps2=0.0,eps1=1e-3) )
mySORC<-mainSORC(mySORC)
bigSORC[[mySeed-startSeed+1]]<-mySORC
}
save(bigSORC,file=paste("Results/",saveName,".RData",sep=""))
}
|
#
datasetInputPar <- reactive({
switch(input$datasetParam,
"Species" = df_products_upload(),
"Environmental" = df_env())
})
observe({
if(input$datasetParam == "Species"){
var_suggest <- rownames(df_products_upload())
updateSelectInput(session, "SetsVarsTests",
choices = rownames(df_products_upload()),
selected = var_suggest)
}
if(input$datasetParam == "Environmental"){
var_suggest <- colnames(df_env())
updateSelectInput(session, "SetsVarsTests",
choices = colnames(df_env()),
selected = var_suggest)
}
})
DatbleToTest <- reactive({
if(input$datasetBx == "Species"){
dtr <- t(datasetInputPar())
dtr <- as.data.frame(dtr)
} else{dtr <- datasetInputPar()}
newdf <- dtr %>% dplyr::select(input$SetsVarsTests) %>% as.data.frame()
return(newdf)
})
testResParametric <- function(test, pval){
messPrin <- capture.output({
cat('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n')
cat(' Analysis of Parametric Tests \n')
cat(' \n')
cat(' ', test, ' \n\n')
cat('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n')
cat('p-Value: ', pval,' \n\n')
if(pval > 0.05){
cat('As the computed p-value is greater than the significance level alpha=0.05,
one cannot reject the null hypothesis H0, and accept the null hypothesis.','\n\n')}else {
cat('As the computed p-value is lower than the significance level alpha=0.05,
one should reject the null hypothesis H0, and accept the alternative hypothesis Ha. ','\n\n')}
cat('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n')
})
return(messPrin)
}
Exp_NormTesMeans <- eventReactive(input$runParMeans, {
ev <- DatbleToTest()
if(input$test_MeanParam=='ttestOne') {
t <- t.test(ev, mu = input$muTestOne, alternative = "two.sided")
}
if(input$test_MeanParam=='ttestTwo') {
t <- t.test(as.numeric(ev[ ,1]), as.numeric(ev[ ,2]))
}
mt <- t$method
pv <- t$p.value
return(testResParametric(test=mt, pval=pv))
})
output$renderTestParametric <- renderPrint({Exp_NormTesMeans()})
Exp_TestVariance <- eventReactive(input$runParVariance, {
ev <- DatbleToTest()
if(input$test_VarParam=='oneSampleVar') {
t <- DescTools::VarTest(ev, sigma.squared = input$muSigTestOneVar)
}
if(input$test_VarParam=='twoSampleVar') {
t <- DescTools::VarTest(as.numeric(ev[,1]), as.numeric(ev[,2]))
}
if(input$test_VarParam=='kSampleVar') {
colNE <- colnames(ev)
newDF <- tidyr::gather(ev, key='key', value='value', colNE)
t <- bartlett.test(as.numeric(newDF[,2]), as.factor(newDF[,1]))
}
mt <- t$method
pv <- t$p.value
return(testResParametric(test=mt, pval=pv))
})
output$renderTestParametric <- renderPrint({Exp_NormTesMeans()})
output$renderVarianceParametric <- renderPrint({Exp_TestVariance()})
| /inst/shinyApp/asset/serv/fun_parametric.R | no_license | Lucel2448/rstructBio | R | false | false | 3,198 | r |
#
datasetInputPar <- reactive({
switch(input$datasetParam,
"Species" = df_products_upload(),
"Environmental" = df_env())
})
observe({
if(input$datasetParam == "Species"){
var_suggest <- rownames(df_products_upload())
updateSelectInput(session, "SetsVarsTests",
choices = rownames(df_products_upload()),
selected = var_suggest)
}
if(input$datasetParam == "Environmental"){
var_suggest <- colnames(df_env())
updateSelectInput(session, "SetsVarsTests",
choices = colnames(df_env()),
selected = var_suggest)
}
})
DatbleToTest <- reactive({
if(input$datasetBx == "Species"){
dtr <- t(datasetInputPar())
dtr <- as.data.frame(dtr)
} else{dtr <- datasetInputPar()}
newdf <- dtr %>% dplyr::select(input$SetsVarsTests) %>% as.data.frame()
return(newdf)
})
testResParametric <- function(test, pval){
messPrin <- capture.output({
cat('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n')
cat(' Analysis of Parametric Tests \n')
cat(' \n')
cat(' ', test, ' \n\n')
cat('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n')
cat('p-Value: ', pval,' \n\n')
if(pval > 0.05){
cat('As the computed p-value is greater than the significance level alpha=0.05,
one cannot reject the null hypothesis H0, and accept the null hypothesis.','\n\n')}else {
cat('As the computed p-value is lower than the significance level alpha=0.05,
one should reject the null hypothesis H0, and accept the alternative hypothesis Ha. ','\n\n')}
cat('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n')
})
return(messPrin)
}
Exp_NormTesMeans <- eventReactive(input$runParMeans, {
ev <- DatbleToTest()
if(input$test_MeanParam=='ttestOne') {
t <- t.test(ev, mu = input$muTestOne, alternative = "two.sided")
}
if(input$test_MeanParam=='ttestTwo') {
t <- t.test(as.numeric(ev[ ,1]), as.numeric(ev[ ,2]))
}
mt <- t$method
pv <- t$p.value
return(testResParametric(test=mt, pval=pv))
})
output$renderTestParametric <- renderPrint({Exp_NormTesMeans()})
Exp_TestVariance <- eventReactive(input$runParVariance, {
ev <- DatbleToTest()
if(input$test_VarParam=='oneSampleVar') {
t <- DescTools::VarTest(ev, sigma.squared = input$muSigTestOneVar)
}
if(input$test_VarParam=='twoSampleVar') {
t <- DescTools::VarTest(as.numeric(ev[,1]), as.numeric(ev[,2]))
}
if(input$test_VarParam=='kSampleVar') {
colNE <- colnames(ev)
newDF <- tidyr::gather(ev, key='key', value='value', colNE)
t <- bartlett.test(as.numeric(newDF[,2]), as.factor(newDF[,1]))
}
mt <- t$method
pv <- t$p.value
return(testResParametric(test=mt, pval=pv))
})
output$renderTestParametric <- renderPrint({Exp_NormTesMeans()})
output$renderVarianceParametric <- renderPrint({Exp_TestVariance()})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/yupana_analysis.R
\name{yupana_analysis}
\alias{yupana_analysis}
\title{Fieldbook analysis report}
\usage{
yupana_analysis(
data,
response,
model_factors,
comparison,
test_comp = "SNK",
sig_level = 0.05,
plot_dist = "boxplot",
plot_diag = FALSE,
digits = 2
)
}
\arguments{
\item{data}{Field book data.}
\item{response}{Response variable.}
\item{model_factors}{Model used for the experimental design.}
\item{comparison}{Factors to compare}
\item{test_comp}{Comprasison test c("SNK", "TUKEY", "DUNCAN")}
\item{sig_level}{Significal test (default: p = 0.005)}
\item{plot_dist}{Plot data distribution (default = "boxplot")}
\item{plot_diag}{Diagnostic plots for model (default = FALSE).}
\item{digits}{Digits number in the table exported.}
}
\value{
list
}
\description{
Function to create a complete report of the fieldbook
}
\examples{
\dontrun{
library(inti)
library(gsheet)
url <- paste0("https://docs.google.com/spreadsheets/d/"
, "15r7ZwcZZHbEgltlF6gSFvCTFA-CFzVBWwg3mFlRyKPs/edit#gid=946957922")
# browseURL(url)
fb <- gsheet2tbl(url)
yrs <- yupana_analysis(data = fb
, response = "spad_83"
, model_factors = "geno + treat"
, comparison = c("geno", "treat")
)
yrs$meancomp
}
}
| /man/yupana_analysis.Rd | no_license | PoquisAngie/inti | R | false | true | 1,421 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/yupana_analysis.R
\name{yupana_analysis}
\alias{yupana_analysis}
\title{Fieldbook analysis report}
\usage{
yupana_analysis(
data,
response,
model_factors,
comparison,
test_comp = "SNK",
sig_level = 0.05,
plot_dist = "boxplot",
plot_diag = FALSE,
digits = 2
)
}
\arguments{
\item{data}{Field book data.}
\item{response}{Response variable.}
\item{model_factors}{Model used for the experimental design.}
\item{comparison}{Factors to compare}
\item{test_comp}{Comprasison test c("SNK", "TUKEY", "DUNCAN")}
\item{sig_level}{Significal test (default: p = 0.005)}
\item{plot_dist}{Plot data distribution (default = "boxplot")}
\item{plot_diag}{Diagnostic plots for model (default = FALSE).}
\item{digits}{Digits number in the table exported.}
}
\value{
list
}
\description{
Function to create a complete report of the fieldbook
}
\examples{
\dontrun{
library(inti)
library(gsheet)
url <- paste0("https://docs.google.com/spreadsheets/d/"
, "15r7ZwcZZHbEgltlF6gSFvCTFA-CFzVBWwg3mFlRyKPs/edit#gid=946957922")
# browseURL(url)
fb <- gsheet2tbl(url)
yrs <- yupana_analysis(data = fb
, response = "spad_83"
, model_factors = "geno + treat"
, comparison = c("geno", "treat")
)
yrs$meancomp
}
}
|
library(tensorflow)
# define output folder
model_dir <- tempfile()
# define simple string-based tensor operations
sess <- tf$Session()
input1 <- tf$placeholder(tf$string)
input2 <- tf$placeholder(tf$string)
output1 <- tf$string_join(inputs = c("Input1: ", input1, "!"))
output2 <- tf$string_join(inputs = c("Input2: ", input2, "!"))
unlink("tensorflow-multiple", recursive = TRUE)
export_savedmodel(
sess,
"tensorflow-multiple",
inputs = list(i1 = input1, i2 = input2),
outputs = list(o1 = output1, o2 = output2))
| /tests/testthat/models/tensorflow-multiples.R | no_license | zamorarr/tfdeploy | R | false | false | 525 | r | library(tensorflow)
# define output folder
model_dir <- tempfile()
# define simple string-based tensor operations
sess <- tf$Session()
input1 <- tf$placeholder(tf$string)
input2 <- tf$placeholder(tf$string)
output1 <- tf$string_join(inputs = c("Input1: ", input1, "!"))
output2 <- tf$string_join(inputs = c("Input2: ", input2, "!"))
unlink("tensorflow-multiple", recursive = TRUE)
export_savedmodel(
sess,
"tensorflow-multiple",
inputs = list(i1 = input1, i2 = input2),
outputs = list(o1 = output1, o2 = output2))
|
setwd("C:/Users/Ben/Documents/Pred_Obs/Bayesian")
sink("SingleSlope.jags")
cat("model{
for(i in 1:length(y)){
y[i] ~ dbern(p[i])
logit(p[i]) <- alpha[Species[i]] + beta * dist[i] + gamma * pow(dist[i],2)
}
for (j in 1:s){
alpha[j] ~ dnorm(intercept,tauIntercept)
}
beta ~ dnorm(0,0.001)
gamma ~ dnorm(0,0.001)
intercept ~ dnorm(0,0.001)
tauIntercept ~ dgamma(0.001,0.001)
sigmaIntercept<- pow(1/tauIntercept,.5)
}",fill = TRUE)
sink()
| /Bayesian/SingleSlope.R | no_license | bw4sz/Pred_Obs | R | false | false | 511 | r | setwd("C:/Users/Ben/Documents/Pred_Obs/Bayesian")
sink("SingleSlope.jags")
cat("model{
for(i in 1:length(y)){
y[i] ~ dbern(p[i])
logit(p[i]) <- alpha[Species[i]] + beta * dist[i] + gamma * pow(dist[i],2)
}
for (j in 1:s){
alpha[j] ~ dnorm(intercept,tauIntercept)
}
beta ~ dnorm(0,0.001)
gamma ~ dnorm(0,0.001)
intercept ~ dnorm(0,0.001)
tauIntercept ~ dgamma(0.001,0.001)
sigmaIntercept<- pow(1/tauIntercept,.5)
}",fill = TRUE)
sink()
|
##############################################################################/
##############################################################################/
#Basic code for plotting map of France
##############################################################################/
##############################################################################/
#loading the packages necessary for the analysis
library(rgdal)
library(rgeos)
library(plotrix)
library(mapplots)
library(RColorBrewer)
##############################################################################/
#loading the data####
##############################################################################/
#load geographical data
load("data/departe.RData")
#load the resistance results
botfraise<-read.table("data/botryfraise.txt",header=TRUE,sep="\t",
colClasses=c("factor","character","character"))
bottomate<-read.table("data/botrytomate.txt",header=TRUE,sep="\t",
colClasses=c("factor","character","character"))
#extract the department coordinates
ind_list<-departe$INSEE_DEP
coorddep<-data.frame("longitude"=departe@polygons[1][[1]]@labpt[1],
"latitude"=departe@polygons[1][[1]]@labpt[2])
for (i in 2:length(ind_list)){
coorddep<-rbind(coorddep,
cbind("longitude"=departe@polygons[i][[1]]@labpt[1],
"latitude"=departe@polygons[i][[1]]@labpt[2]))
}
coorddep<-cbind("dep_ID"=ind_list,coorddep)
##############################################################################/
#Fraise Iprodione figure####
##############################################################################/
#producing the map
ImiR1data<-cbind("dep_ID"=row.names(table(botfraise$dptmt,
botfraise$ImiR1,exclude="")),
"Resistant"=table(botfraise$dptmt,
botfraise$ImiR1,exclude="")[,1],
"Sensible"=table(botfraise$dptmt,
botfraise$ImiR1,exclude="")[,2],
"Total"=rowSums(table(botfraise$dptmt,
botfraise$ImiR1,exclude="")))
data2map<-merge(ImiR1data,coorddep,by="dep_ID")
colovec<-c(brewer.pal(9,"Reds")[7],brewer.pal(9,"Blues")[8])
png(file=paste("output/","ImiR1_fraise_spa",".png",sep=""),
width=4,height=4,units="in",res=300)
op<-par(mar=c(0,0,0,0))
plot(departe)
draw.pie(x=data2map$longitude,y=data2map$latitude,
z=cbind((as.numeric(as.character(data2map$Resistant))),
(as.numeric(as.character(data2map$Sensible)))),
col=colovec,lty=0,
radius=(sqrt(as.numeric(as.character(data2map$Total)))*15000),
labels=NA)
text(x=data2map$longitude,y=data2map$latitude,
labels=as.character(data2map$Total),cex=0.7)
par(op)
dev.off()
#producing the barplot
datXyear<-t(prop.table(table(botfraise$year,botfraise$ImiR1,exclude=""),
margin=1)*100)
totalyear<-rowSums(table(botfraise$year,botfraise$ImiR1,exclude=""))
png(file=paste("output/","ImiR1_fraise_temp",".png",sep=""),
width=8,height=7,units="in",res=300)
temp<-barplot(datXyear,col=colovec,las=1,font=2,main="Dicarboximide",
cex.main=2)
text(temp[1],103,paste("n=",totalyear[1],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[2],103,paste("n=",totalyear[2],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[3],103,paste("n=",totalyear[3],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[4],103,paste("n=",totalyear[4],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[5],103,paste("n=",totalyear[5],sep=""),font=3,cex=1,xpd=TRUE)
dev.off()
##############################################################################/
#Fraise QoI figure####
##############################################################################/
#producing the map
QoI.Rdata<-cbind("dep_ID"=row.names(table(botfraise$dptmt,
botfraise$QoI.R,exclude="")),
"Resistant"=table(botfraise$dptmt,
botfraise$QoI.R,exclude="")[,1],
"Sensible"=table(botfraise$dptmt,
botfraise$QoI.R,exclude="")[,2],
"Total"=rowSums(table(botfraise$dptmt,
botfraise$QoI.R,exclude="")))
data2map<-merge(QoI.Rdata,coorddep,by="dep_ID")
colovec<-c(brewer.pal(9,"Reds")[7],brewer.pal(9,"Blues")[8])
png(file=paste("output/","QoI.R_fraise_spa",".png",sep=""),
width=4,height=4,units="in",res=300)
op<-par(mar=c(0,0,0,0))
plot(departe)
draw.pie(x=data2map$longitude,y=data2map$latitude,
z=cbind((as.numeric(as.character(data2map$Resistant))),
(as.numeric(as.character(data2map$Sensible)))),
col=colovec,lty=0,
radius=(sqrt(as.numeric(as.character(data2map$Total)))*15000),
labels=NA)
text(x=data2map$longitude,y=data2map$latitude,
labels=as.character(data2map$Total),cex=0.7)
par(op)
dev.off()
#producing the barplot
datXyear<-t(prop.table(table(botfraise$year,botfraise$QoI.R,exclude=""),
margin=1)*100)
totalyear<-rowSums(table(botfraise$year,botfraise$QoI.R,exclude=""))
png(file=paste("output/","QoI.R_fraise_temp",".png",sep=""),
width=8,height=7,units="in",res=300)
temp<-barplot(datXyear,col=colovec,las=1,font=2,main="QoI",
cex.main=2)
text(temp[1],103,paste("n=",totalyear[1],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[2],103,paste("n=",totalyear[2],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[3],103,paste("n=",totalyear[3],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[4],103,paste("n=",totalyear[4],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[5],103,paste("n=",totalyear[5],sep=""),font=3,cex=1,xpd=TRUE)
dev.off()
##############################################################################/
#Fraise SDHI figure####
##############################################################################/
#producing the map
CarRdata<-cbind("dep_ID"=row.names(table(botfraise$dptmt,
botfraise$CarR,exclude="")),
"Resistant"=table(botfraise$dptmt,
botfraise$CarR,exclude="")[,1],
"Sensible"=table(botfraise$dptmt,
botfraise$CarR,exclude="")[,2],
"Total"=rowSums(table(botfraise$dptmt,
botfraise$CarR,exclude="")))
data2map<-merge(CarRdata,coorddep,by="dep_ID")
colovec<-c(brewer.pal(9,"Reds")[7],brewer.pal(9,"Blues")[8])
png(file=paste("output/","CarR_fraise_spa",".png",sep=""),
width=4,height=4,units="in",res=300)
op<-par(mar=c(0,0,0,0))
plot(departe)
draw.pie(x=data2map$longitude,y=data2map$latitude,
z=cbind((as.numeric(as.character(data2map$Resistant))),
(as.numeric(as.character(data2map$Sensible)))),
col=colovec,lty=0,
radius=(sqrt(as.numeric(as.character(data2map$Total)))*15000),
labels=NA)
text(x=data2map$longitude,y=data2map$latitude,
labels=as.character(data2map$Total),cex=0.7)
par(op)
dev.off()
#producing the barplot
datXyear<-t(prop.table(table(botfraise$year,botfraise$CarR,exclude=""),
margin=1)*100)
totalyear<-rowSums(table(botfraise$year,botfraise$CarR,exclude=""))
png(file=paste("output/","CarR_fraise_temp",".png",sep=""),
width=8,height=7,units="in",res=300)
temp<-barplot(datXyear,col=colovec,las=1,font=2,main="SDHI",
cex.main=2)
text(temp[1],103,paste("n=",totalyear[1],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[2],103,paste("n=",totalyear[2],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[3],103,paste("n=",totalyear[3],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[4],103,paste("n=",totalyear[4],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[5],103,paste("n=",totalyear[5],sep=""),font=3,cex=1,xpd=TRUE)
dev.off()
##############################################################################/
#Tomate Iprodione figure####
##############################################################################/
#producing the map
ImiR1data<-cbind("dep_ID"=row.names(table(bottomate$dptmt,
bottomate$ImiR1,exclude="")),
"Resistant"=table(bottomate$dptmt,
bottomate$ImiR1,exclude="")[,1],
"Sensible"=table(bottomate$dptmt,
bottomate$ImiR1,exclude="")[,2],
"Total"=rowSums(table(bottomate$dptmt,
bottomate$ImiR1,exclude="")))
data2map<-merge(ImiR1data,coorddep,by="dep_ID")
colovec<-c(brewer.pal(9,"Reds")[7],brewer.pal(9,"Blues")[8])
png(file=paste("output/","ImiR1_tomate_spa",".png",sep=""),
width=4,height=4,units="in",res=300)
op<-par(mar=c(0,0,0,0))
plot(departe)
draw.pie(x=data2map$longitude,y=data2map$latitude,
z=cbind((as.numeric(as.character(data2map$Resistant))),
(as.numeric(as.character(data2map$Sensible)))),
col=colovec,lty=0,
radius=(sqrt(as.numeric(as.character(data2map$Total)))*15000),
labels=NA)
text(x=data2map$longitude,y=data2map$latitude,
labels=as.character(data2map$Total),cex=0.7)
par(op)
dev.off()
#producing the barplot
datXyear<-t(prop.table(table(bottomate$year,bottomate$ImiR1,exclude=""),
margin=1)*100)
totalyear<-rowSums(table(bottomate$year,bottomate$ImiR1,exclude=""))
png(file=paste("output/","ImiR1_tomate_temp",".png",sep=""),
width=8,height=7,units="in",res=300)
temp<-barplot(datXyear,col=colovec,las=1,font=2,main="Dicarboximide",
cex.main=2)
text(temp[1],103,paste("n=",totalyear[1],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[2],103,paste("n=",totalyear[2],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[3],103,paste("n=",totalyear[3],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[4],103,paste("n=",totalyear[4],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[5],103,paste("n=",totalyear[5],sep=""),font=3,cex=1,xpd=TRUE)
dev.off()
##############################################################################/
#Tomate Anilino figure####
##############################################################################/
#producing the map
AniRdata<-cbind("dep_ID"=row.names(table(bottomate$dptmt,
bottomate$AniR1,exclude="")),
"Resistant"=table(bottomate$dptmt,
bottomate$AniR1,exclude="")[,1],
"Sensible"=table(bottomate$dptmt,
bottomate$AniR1,exclude="")[,2],
"Total"=rowSums(table(bottomate$dptmt,
bottomate$AniR1,exclude="")))
data2map<-merge(AniRdata,coorddep,by="dep_ID")
colovec<-c(brewer.pal(9,"Reds")[7],brewer.pal(9,"Blues")[8])
png(file=paste("output/","AniR1_tomate_spa",".png",sep=""),
width=4,height=4,units="in",res=300)
op<-par(mar=c(0,0,0,0))
plot(departe)
draw.pie(x=data2map$longitude,y=data2map$latitude,
z=cbind((as.numeric(as.character(data2map$Resistant))),
(as.numeric(as.character(data2map$Sensible)))),
col=colovec,lty=0,
radius=(sqrt(as.numeric(as.character(data2map$Total)))*15000),
labels=NA)
text(x=data2map$longitude,y=data2map$latitude,
labels=as.character(data2map$Total),cex=0.7)
par(op)
dev.off()
#producing the barplot
datXyear<-t(prop.table(table(bottomate$year,bottomate$AniR1,exclude=""),
margin=1)*100)
totalyear<-rowSums(table(bottomate$year,bottomate$AniR1,exclude=""))
png(file=paste("output/","AniR1_tomate_temp",".png",sep=""),
width=8,height=7,units="in",res=300)
temp<-barplot(datXyear,col=colovec,las=1,font=2,main="Anilino-pyrimidine",
cex.main=2)
text(temp[1],103,paste("n=",totalyear[1],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[2],103,paste("n=",totalyear[2],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[3],103,paste("n=",totalyear[3],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[4],103,paste("n=",totalyear[4],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[5],103,paste("n=",totalyear[5],sep=""),font=3,cex=1,xpd=TRUE)
dev.off()
##############################################################################/
#Tomate hydroxyanilide figure####
##############################################################################/
#producing the map
HydR3data<-cbind("dep_ID"=row.names(table(bottomate$dptmt,
bottomate$HydR3,exclude="")),
"Resistant"=table(bottomate$dptmt,
bottomate$HydR3,exclude="")[,1],
"Sensible"=table(bottomate$dptmt,
bottomate$HydR3,exclude="")[,2],
"Total"=rowSums(table(bottomate$dptmt,
bottomate$HydR3,exclude="")))
data2map<-merge(HydR3data,coorddep,by="dep_ID")
colovec<-c(brewer.pal(9,"Reds")[7],brewer.pal(9,"Blues")[8])
png(file=paste("output/","HydR3_tomate_spa",".png",sep=""),
width=4,height=4,units="in",res=300)
op<-par(mar=c(0,0,0,0))
plot(departe)
draw.pie(x=data2map$longitude,y=data2map$latitude,
z=cbind((as.numeric(as.character(data2map$Resistant))),
(as.numeric(as.character(data2map$Sensible)))),
col=colovec,lty=0,
radius=(sqrt(as.numeric(as.character(data2map$Total)))*15000),
labels=NA)
text(x=data2map$longitude,y=data2map$latitude,
labels=as.character(data2map$Total),cex=0.7)
par(op)
dev.off()
#producing the barplot
datXyear<-t(prop.table(table(bottomate$year,bottomate$HydR3,exclude=""),
margin=1)*100)
totalyear<-rowSums(table(bottomate$year,bottomate$HydR3,exclude=""))
png(file=paste("output/","HydR3_tomate_temp",".png",sep=""),
width=8,height=7,units="in",res=300)
temp<-barplot(datXyear,col=colovec,las=1,font=2,main="Hydroxyanilide",
cex.main=2)
text(temp[1],103,paste("n=",totalyear[1],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[2],103,paste("n=",totalyear[2],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[3],103,paste("n=",totalyear[3],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[4],103,paste("n=",totalyear[4],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[5],103,paste("n=",totalyear[5],sep=""),font=3,cex=1,xpd=TRUE)
dev.off()
##############################################################################/
#Tomate SDHI figure####
##############################################################################/
#producing the map
CarRdata<-cbind("dep_ID"=row.names(table(bottomate$dptmt,
bottomate$CarR,exclude="")),
"Resistant"=table(bottomate$dptmt,
bottomate$CarR,exclude="")[,1],
"Sensible"=table(bottomate$dptmt,
bottomate$CarR,exclude="")[,2],
"Total"=rowSums(table(bottomate$dptmt,
bottomate$CarR,exclude="")))
data2map<-merge(CarRdata,coorddep,by="dep_ID")
colovec<-c(brewer.pal(9,"Reds")[7],brewer.pal(9,"Blues")[8])
png(file=paste("output/","CarR_tomate_spa",".png",sep=""),
width=4,height=4,units="in",res=300)
op<-par(mar=c(0,0,0,0))
plot(departe)
draw.pie(x=data2map$longitude,y=data2map$latitude,
z=cbind((as.numeric(as.character(data2map$Resistant))),
(as.numeric(as.character(data2map$Sensible)))),
col=colovec,lty=0,
radius=(sqrt(as.numeric(as.character(data2map$Total)))*15000),
labels=NA)
text(x=data2map$longitude,y=data2map$latitude,
labels=as.character(data2map$Total),cex=0.7)
par(op)
dev.off()
#producing the barplot
datXyear<-t(prop.table(table(bottomate$year,bottomate$CarR,exclude=""),
margin=1)*100)
totalyear<-rowSums(table(bottomate$year,bottomate$CarR,exclude=""))
png(file=paste("output/","CarR_tomate_temp",".png",sep=""),
width=8,height=7,units="in",res=300)
temp<-barplot(datXyear,col=colovec,las=1,font=2,main="SDHI",
cex.main=2)
text(temp[1],103,paste("n=",totalyear[1],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[2],103,paste("n=",totalyear[2],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[3],103,paste("n=",totalyear[3],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[4],103,paste("n=",totalyear[4],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[5],103,paste("n=",totalyear[5],sep=""),font=3,cex=1,xpd=TRUE)
dev.off()
##############################################################################/
#END
##############################################################################/ | /botrymap.R | no_license | bbarres/VentuPlan | R | false | false | 16,589 | r | ##############################################################################/
##############################################################################/
#Basic code for plotting map of France
##############################################################################/
##############################################################################/
#loading the packages necessary for the analysis
library(rgdal)
library(rgeos)
library(plotrix)
library(mapplots)
library(RColorBrewer)
##############################################################################/
#loading the data####
##############################################################################/
#load geographical data
load("data/departe.RData")
#load the resistance results
botfraise<-read.table("data/botryfraise.txt",header=TRUE,sep="\t",
colClasses=c("factor","character","character"))
bottomate<-read.table("data/botrytomate.txt",header=TRUE,sep="\t",
colClasses=c("factor","character","character"))
#extract the department coordinates
ind_list<-departe$INSEE_DEP
coorddep<-data.frame("longitude"=departe@polygons[1][[1]]@labpt[1],
"latitude"=departe@polygons[1][[1]]@labpt[2])
for (i in 2:length(ind_list)){
coorddep<-rbind(coorddep,
cbind("longitude"=departe@polygons[i][[1]]@labpt[1],
"latitude"=departe@polygons[i][[1]]@labpt[2]))
}
coorddep<-cbind("dep_ID"=ind_list,coorddep)
##############################################################################/
#Fraise Iprodione figure####
##############################################################################/
#producing the map
ImiR1data<-cbind("dep_ID"=row.names(table(botfraise$dptmt,
botfraise$ImiR1,exclude="")),
"Resistant"=table(botfraise$dptmt,
botfraise$ImiR1,exclude="")[,1],
"Sensible"=table(botfraise$dptmt,
botfraise$ImiR1,exclude="")[,2],
"Total"=rowSums(table(botfraise$dptmt,
botfraise$ImiR1,exclude="")))
data2map<-merge(ImiR1data,coorddep,by="dep_ID")
colovec<-c(brewer.pal(9,"Reds")[7],brewer.pal(9,"Blues")[8])
png(file=paste("output/","ImiR1_fraise_spa",".png",sep=""),
width=4,height=4,units="in",res=300)
op<-par(mar=c(0,0,0,0))
plot(departe)
draw.pie(x=data2map$longitude,y=data2map$latitude,
z=cbind((as.numeric(as.character(data2map$Resistant))),
(as.numeric(as.character(data2map$Sensible)))),
col=colovec,lty=0,
radius=(sqrt(as.numeric(as.character(data2map$Total)))*15000),
labels=NA)
text(x=data2map$longitude,y=data2map$latitude,
labels=as.character(data2map$Total),cex=0.7)
par(op)
dev.off()
#producing the barplot
datXyear<-t(prop.table(table(botfraise$year,botfraise$ImiR1,exclude=""),
margin=1)*100)
totalyear<-rowSums(table(botfraise$year,botfraise$ImiR1,exclude=""))
png(file=paste("output/","ImiR1_fraise_temp",".png",sep=""),
width=8,height=7,units="in",res=300)
temp<-barplot(datXyear,col=colovec,las=1,font=2,main="Dicarboximide",
cex.main=2)
text(temp[1],103,paste("n=",totalyear[1],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[2],103,paste("n=",totalyear[2],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[3],103,paste("n=",totalyear[3],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[4],103,paste("n=",totalyear[4],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[5],103,paste("n=",totalyear[5],sep=""),font=3,cex=1,xpd=TRUE)
dev.off()
##############################################################################/
#Fraise QoI figure####
##############################################################################/
#producing the map
QoI.Rdata<-cbind("dep_ID"=row.names(table(botfraise$dptmt,
botfraise$QoI.R,exclude="")),
"Resistant"=table(botfraise$dptmt,
botfraise$QoI.R,exclude="")[,1],
"Sensible"=table(botfraise$dptmt,
botfraise$QoI.R,exclude="")[,2],
"Total"=rowSums(table(botfraise$dptmt,
botfraise$QoI.R,exclude="")))
data2map<-merge(QoI.Rdata,coorddep,by="dep_ID")
colovec<-c(brewer.pal(9,"Reds")[7],brewer.pal(9,"Blues")[8])
png(file=paste("output/","QoI.R_fraise_spa",".png",sep=""),
width=4,height=4,units="in",res=300)
op<-par(mar=c(0,0,0,0))
plot(departe)
draw.pie(x=data2map$longitude,y=data2map$latitude,
z=cbind((as.numeric(as.character(data2map$Resistant))),
(as.numeric(as.character(data2map$Sensible)))),
col=colovec,lty=0,
radius=(sqrt(as.numeric(as.character(data2map$Total)))*15000),
labels=NA)
text(x=data2map$longitude,y=data2map$latitude,
labels=as.character(data2map$Total),cex=0.7)
par(op)
dev.off()
#producing the barplot
datXyear<-t(prop.table(table(botfraise$year,botfraise$QoI.R,exclude=""),
margin=1)*100)
totalyear<-rowSums(table(botfraise$year,botfraise$QoI.R,exclude=""))
png(file=paste("output/","QoI.R_fraise_temp",".png",sep=""),
width=8,height=7,units="in",res=300)
temp<-barplot(datXyear,col=colovec,las=1,font=2,main="QoI",
cex.main=2)
text(temp[1],103,paste("n=",totalyear[1],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[2],103,paste("n=",totalyear[2],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[3],103,paste("n=",totalyear[3],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[4],103,paste("n=",totalyear[4],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[5],103,paste("n=",totalyear[5],sep=""),font=3,cex=1,xpd=TRUE)
dev.off()
##############################################################################/
#Fraise SDHI figure####
##############################################################################/
#producing the map
CarRdata<-cbind("dep_ID"=row.names(table(botfraise$dptmt,
botfraise$CarR,exclude="")),
"Resistant"=table(botfraise$dptmt,
botfraise$CarR,exclude="")[,1],
"Sensible"=table(botfraise$dptmt,
botfraise$CarR,exclude="")[,2],
"Total"=rowSums(table(botfraise$dptmt,
botfraise$CarR,exclude="")))
data2map<-merge(CarRdata,coorddep,by="dep_ID")
colovec<-c(brewer.pal(9,"Reds")[7],brewer.pal(9,"Blues")[8])
png(file=paste("output/","CarR_fraise_spa",".png",sep=""),
width=4,height=4,units="in",res=300)
op<-par(mar=c(0,0,0,0))
plot(departe)
draw.pie(x=data2map$longitude,y=data2map$latitude,
z=cbind((as.numeric(as.character(data2map$Resistant))),
(as.numeric(as.character(data2map$Sensible)))),
col=colovec,lty=0,
radius=(sqrt(as.numeric(as.character(data2map$Total)))*15000),
labels=NA)
text(x=data2map$longitude,y=data2map$latitude,
labels=as.character(data2map$Total),cex=0.7)
par(op)
dev.off()
#producing the barplot
datXyear<-t(prop.table(table(botfraise$year,botfraise$CarR,exclude=""),
margin=1)*100)
totalyear<-rowSums(table(botfraise$year,botfraise$CarR,exclude=""))
png(file=paste("output/","CarR_fraise_temp",".png",sep=""),
width=8,height=7,units="in",res=300)
temp<-barplot(datXyear,col=colovec,las=1,font=2,main="SDHI",
cex.main=2)
text(temp[1],103,paste("n=",totalyear[1],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[2],103,paste("n=",totalyear[2],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[3],103,paste("n=",totalyear[3],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[4],103,paste("n=",totalyear[4],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[5],103,paste("n=",totalyear[5],sep=""),font=3,cex=1,xpd=TRUE)
dev.off()
##############################################################################/
#Tomate Iprodione figure####
##############################################################################/
#producing the map
ImiR1data<-cbind("dep_ID"=row.names(table(bottomate$dptmt,
bottomate$ImiR1,exclude="")),
"Resistant"=table(bottomate$dptmt,
bottomate$ImiR1,exclude="")[,1],
"Sensible"=table(bottomate$dptmt,
bottomate$ImiR1,exclude="")[,2],
"Total"=rowSums(table(bottomate$dptmt,
bottomate$ImiR1,exclude="")))
data2map<-merge(ImiR1data,coorddep,by="dep_ID")
colovec<-c(brewer.pal(9,"Reds")[7],brewer.pal(9,"Blues")[8])
png(file=paste("output/","ImiR1_tomate_spa",".png",sep=""),
width=4,height=4,units="in",res=300)
op<-par(mar=c(0,0,0,0))
plot(departe)
draw.pie(x=data2map$longitude,y=data2map$latitude,
z=cbind((as.numeric(as.character(data2map$Resistant))),
(as.numeric(as.character(data2map$Sensible)))),
col=colovec,lty=0,
radius=(sqrt(as.numeric(as.character(data2map$Total)))*15000),
labels=NA)
text(x=data2map$longitude,y=data2map$latitude,
labels=as.character(data2map$Total),cex=0.7)
par(op)
dev.off()
#producing the barplot
datXyear<-t(prop.table(table(bottomate$year,bottomate$ImiR1,exclude=""),
margin=1)*100)
totalyear<-rowSums(table(bottomate$year,bottomate$ImiR1,exclude=""))
png(file=paste("output/","ImiR1_tomate_temp",".png",sep=""),
width=8,height=7,units="in",res=300)
temp<-barplot(datXyear,col=colovec,las=1,font=2,main="Dicarboximide",
cex.main=2)
text(temp[1],103,paste("n=",totalyear[1],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[2],103,paste("n=",totalyear[2],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[3],103,paste("n=",totalyear[3],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[4],103,paste("n=",totalyear[4],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[5],103,paste("n=",totalyear[5],sep=""),font=3,cex=1,xpd=TRUE)
dev.off()
##############################################################################/
#Tomate Anilino figure####
##############################################################################/
#producing the map
AniRdata<-cbind("dep_ID"=row.names(table(bottomate$dptmt,
bottomate$AniR1,exclude="")),
"Resistant"=table(bottomate$dptmt,
bottomate$AniR1,exclude="")[,1],
"Sensible"=table(bottomate$dptmt,
bottomate$AniR1,exclude="")[,2],
"Total"=rowSums(table(bottomate$dptmt,
bottomate$AniR1,exclude="")))
data2map<-merge(AniRdata,coorddep,by="dep_ID")
colovec<-c(brewer.pal(9,"Reds")[7],brewer.pal(9,"Blues")[8])
png(file=paste("output/","AniR1_tomate_spa",".png",sep=""),
width=4,height=4,units="in",res=300)
op<-par(mar=c(0,0,0,0))
plot(departe)
draw.pie(x=data2map$longitude,y=data2map$latitude,
z=cbind((as.numeric(as.character(data2map$Resistant))),
(as.numeric(as.character(data2map$Sensible)))),
col=colovec,lty=0,
radius=(sqrt(as.numeric(as.character(data2map$Total)))*15000),
labels=NA)
text(x=data2map$longitude,y=data2map$latitude,
labels=as.character(data2map$Total),cex=0.7)
par(op)
dev.off()
#producing the barplot
datXyear<-t(prop.table(table(bottomate$year,bottomate$AniR1,exclude=""),
margin=1)*100)
totalyear<-rowSums(table(bottomate$year,bottomate$AniR1,exclude=""))
png(file=paste("output/","AniR1_tomate_temp",".png",sep=""),
width=8,height=7,units="in",res=300)
temp<-barplot(datXyear,col=colovec,las=1,font=2,main="Anilino-pyrimidine",
cex.main=2)
text(temp[1],103,paste("n=",totalyear[1],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[2],103,paste("n=",totalyear[2],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[3],103,paste("n=",totalyear[3],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[4],103,paste("n=",totalyear[4],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[5],103,paste("n=",totalyear[5],sep=""),font=3,cex=1,xpd=TRUE)
dev.off()
##############################################################################/
#Tomate hydroxyanilide figure####
##############################################################################/
#producing the map
HydR3data<-cbind("dep_ID"=row.names(table(bottomate$dptmt,
bottomate$HydR3,exclude="")),
"Resistant"=table(bottomate$dptmt,
bottomate$HydR3,exclude="")[,1],
"Sensible"=table(bottomate$dptmt,
bottomate$HydR3,exclude="")[,2],
"Total"=rowSums(table(bottomate$dptmt,
bottomate$HydR3,exclude="")))
data2map<-merge(HydR3data,coorddep,by="dep_ID")
colovec<-c(brewer.pal(9,"Reds")[7],brewer.pal(9,"Blues")[8])
png(file=paste("output/","HydR3_tomate_spa",".png",sep=""),
width=4,height=4,units="in",res=300)
op<-par(mar=c(0,0,0,0))
plot(departe)
draw.pie(x=data2map$longitude,y=data2map$latitude,
z=cbind((as.numeric(as.character(data2map$Resistant))),
(as.numeric(as.character(data2map$Sensible)))),
col=colovec,lty=0,
radius=(sqrt(as.numeric(as.character(data2map$Total)))*15000),
labels=NA)
text(x=data2map$longitude,y=data2map$latitude,
labels=as.character(data2map$Total),cex=0.7)
par(op)
dev.off()
#producing the barplot
datXyear<-t(prop.table(table(bottomate$year,bottomate$HydR3,exclude=""),
margin=1)*100)
totalyear<-rowSums(table(bottomate$year,bottomate$HydR3,exclude=""))
png(file=paste("output/","HydR3_tomate_temp",".png",sep=""),
width=8,height=7,units="in",res=300)
temp<-barplot(datXyear,col=colovec,las=1,font=2,main="Hydroxyanilide",
cex.main=2)
text(temp[1],103,paste("n=",totalyear[1],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[2],103,paste("n=",totalyear[2],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[3],103,paste("n=",totalyear[3],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[4],103,paste("n=",totalyear[4],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[5],103,paste("n=",totalyear[5],sep=""),font=3,cex=1,xpd=TRUE)
dev.off()
##############################################################################/
#Tomate SDHI figure####
##############################################################################/
#producing the map
CarRdata<-cbind("dep_ID"=row.names(table(bottomate$dptmt,
bottomate$CarR,exclude="")),
"Resistant"=table(bottomate$dptmt,
bottomate$CarR,exclude="")[,1],
"Sensible"=table(bottomate$dptmt,
bottomate$CarR,exclude="")[,2],
"Total"=rowSums(table(bottomate$dptmt,
bottomate$CarR,exclude="")))
data2map<-merge(CarRdata,coorddep,by="dep_ID")
colovec<-c(brewer.pal(9,"Reds")[7],brewer.pal(9,"Blues")[8])
png(file=paste("output/","CarR_tomate_spa",".png",sep=""),
width=4,height=4,units="in",res=300)
op<-par(mar=c(0,0,0,0))
plot(departe)
draw.pie(x=data2map$longitude,y=data2map$latitude,
z=cbind((as.numeric(as.character(data2map$Resistant))),
(as.numeric(as.character(data2map$Sensible)))),
col=colovec,lty=0,
radius=(sqrt(as.numeric(as.character(data2map$Total)))*15000),
labels=NA)
text(x=data2map$longitude,y=data2map$latitude,
labels=as.character(data2map$Total),cex=0.7)
par(op)
dev.off()
#producing the barplot
datXyear<-t(prop.table(table(bottomate$year,bottomate$CarR,exclude=""),
margin=1)*100)
totalyear<-rowSums(table(bottomate$year,bottomate$CarR,exclude=""))
png(file=paste("output/","CarR_tomate_temp",".png",sep=""),
width=8,height=7,units="in",res=300)
temp<-barplot(datXyear,col=colovec,las=1,font=2,main="SDHI",
cex.main=2)
text(temp[1],103,paste("n=",totalyear[1],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[2],103,paste("n=",totalyear[2],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[3],103,paste("n=",totalyear[3],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[4],103,paste("n=",totalyear[4],sep=""),font=3,cex=1,xpd=TRUE)
text(temp[5],103,paste("n=",totalyear[5],sep=""),font=3,cex=1,xpd=TRUE)
dev.off()
##############################################################################/
#END
##############################################################################/ |
library(HIBPwned)
### Name: HIBP_headers
### Title: Construct headers for a HIBP request. [Optional] Change the
### agent to make the request bespoke to you.
### Aliases: HIBP_headers
### ** Examples
HIBP_headers()
| /data/genthat_extracted_code/HIBPwned/examples/HIBP_headers.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 224 | r | library(HIBPwned)
### Name: HIBP_headers
### Title: Construct headers for a HIBP request. [Optional] Change the
### agent to make the request bespoke to you.
### Aliases: HIBP_headers
### ** Examples
HIBP_headers()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/constructor.R
\name{ballgown-constructor}
\alias{ballgown-constructor}
\alias{ballgown}
\title{constructor function for ballgown objects}
\usage{
ballgown(
samples = NULL,
dataDir = NULL,
samplePattern = NULL,
bamfiles = NULL,
pData = NULL,
verbose = TRUE,
meas = "all"
)
}
\arguments{
\item{samples}{vector of file paths to folders containing sample-specific
ballgown data (generated by \code{tablemaker}). If \code{samples} is
provided, \code{dataDir} and \code{samplePattern} are not used.}
\item{dataDir}{file path to top-level directory containing sample-specific
folders with ballgown data in them. Only used if \code{samples} is NULL.}
\item{samplePattern}{regular expression identifying the subdirectories of\
\code{dataDir} containing data to be loaded into the ballgown object (and
only those subdirectories). Only used if \code{samples} is NULL.}
\item{bamfiles}{optional vector of file paths to read alignment files for
each sample. If provided, make sure to sort properly (e.g., in the same
order as \code{samples}). Default NULL.}
\item{pData}{optional \code{data.frame} with rows corresponding to samples and
columns corresponding to phenotypic variables.}
\item{verbose}{if \code{TRUE}, print status messages and timing information
as the object is constructed.}
\item{meas}{character vector containing either "all" or one or more of:
"rcount", "ucount", "mrcount", "cov", "cov_sd", "mcov", "mcov_sd", or
"FPKM". The resulting ballgown object will only contain the specified
expression measurements, for the appropriate features. See vignette for
which expression measurements are available for which features. "all"
creates the full object.}
}
\value{
an object of class \code{ballgown}
}
\description{
constructor function for ballgown objects
}
\details{
Because experimental data is recorded so variably, it is the user's
responsibility to format \code{pData} correctly. In particular, it's
really important that the rows of \code{pData} (corresponding to samples)
are ordered the same way as \code{samples} or the
\code{dataDir}/\code{samplePattern} combo. You can run
\code{list.files(path = dataDir, pattern = samplePattern)} to see the sample
order if \code{samples} was not used.
If you are creating a ballgown object for a large experiment, this function
may run slowly and use a large amount of RAM. We recommend running this
constructor as a batch job and saving the resulting ballgown object as an rda
file. The rda file usually has reasonable size on disk, and the object in it
shouldn't take up too much RAM when loaded, so the time and memory use in
creating the object is a one-time cost.
}
\examples{
bg = ballgown(dataDir=system.file('extdata', package='ballgown'),
samplePattern='sample')
pData(bg) = data.frame(id=sampleNames(bg), group=rep(c(1,0), each=10))
}
\seealso{
\code{\link{ballgownrsem}}, for loading RSEM output into a ballgown
object
}
\author{
Leonardo Collado-Torres, Alyssa Frazee
}
| /man/ballgown-constructor.Rd | no_license | alyssafrazee/ballgown | R | false | true | 3,059 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/constructor.R
\name{ballgown-constructor}
\alias{ballgown-constructor}
\alias{ballgown}
\title{constructor function for ballgown objects}
\usage{
ballgown(
samples = NULL,
dataDir = NULL,
samplePattern = NULL,
bamfiles = NULL,
pData = NULL,
verbose = TRUE,
meas = "all"
)
}
\arguments{
\item{samples}{vector of file paths to folders containing sample-specific
ballgown data (generated by \code{tablemaker}). If \code{samples} is
provided, \code{dataDir} and \code{samplePattern} are not used.}
\item{dataDir}{file path to top-level directory containing sample-specific
folders with ballgown data in them. Only used if \code{samples} is NULL.}
\item{samplePattern}{regular expression identifying the subdirectories of\
\code{dataDir} containing data to be loaded into the ballgown object (and
only those subdirectories). Only used if \code{samples} is NULL.}
\item{bamfiles}{optional vector of file paths to read alignment files for
each sample. If provided, make sure to sort properly (e.g., in the same
order as \code{samples}). Default NULL.}
\item{pData}{optional \code{data.frame} with rows corresponding to samples and
columns corresponding to phenotypic variables.}
\item{verbose}{if \code{TRUE}, print status messages and timing information
as the object is constructed.}
\item{meas}{character vector containing either "all" or one or more of:
"rcount", "ucount", "mrcount", "cov", "cov_sd", "mcov", "mcov_sd", or
"FPKM". The resulting ballgown object will only contain the specified
expression measurements, for the appropriate features. See vignette for
which expression measurements are available for which features. "all"
creates the full object.}
}
\value{
an object of class \code{ballgown}
}
\description{
constructor function for ballgown objects
}
\details{
Because experimental data is recorded so variably, it is the user's
responsibility to format \code{pData} correctly. In particular, it's
really important that the rows of \code{pData} (corresponding to samples)
are ordered the same way as \code{samples} or the
\code{dataDir}/\code{samplePattern} combo. You can run
\code{list.files(path = dataDir, pattern = samplePattern)} to see the sample
order if \code{samples} was not used.
If you are creating a ballgown object for a large experiment, this function
may run slowly and use a large amount of RAM. We recommend running this
constructor as a batch job and saving the resulting ballgown object as an rda
file. The rda file usually has reasonable size on disk, and the object in it
shouldn't take up too much RAM when loaded, so the time and memory use in
creating the object is a one-time cost.
}
\examples{
bg = ballgown(dataDir=system.file('extdata', package='ballgown'),
samplePattern='sample')
pData(bg) = data.frame(id=sampleNames(bg), group=rep(c(1,0), each=10))
}
\seealso{
\code{\link{ballgownrsem}}, for loading RSEM output into a ballgown
object
}
\author{
Leonardo Collado-Torres, Alyssa Frazee
}
|
## Programmer: Jacques du Plessis
## Date: 24 September 2017
## Course: Exploratory Data Analysis - Week 1
## Assignment Notes:
## 1. We will only be using data from the dates 2007-02-01 and 2007-02-02.
## 2. Read the data from just those dates rather than reading in the entire dataset
## 3. Convert the Date and Time variables to Date/Time classes using strptime
## 4. Note that in this dataset missing values are coded as ?
## 5. Create a separate R code file per plot
## 6. Your code should include code for reading the data so that the plot can be fully reproduced
## 7. This file re-create plot 2
## set the working directory
setwd("c:\\Development\\R\\Coursera\\04 - Exploratory data analysis\\Week 1")
## specify the source file name and location
sourcefilename <- "./data/household_power_consumption.txt"
## search for the specific row in the dataset
startrow <- grep("1/2/2007", readLines(sourcefilename))[1]-1
## search for the last row of the subset
endrow <- grep("3/2/2007", readLines(sourcefilename))[1]-1
## read the header seperately
myheader <-read.table(sourcefilename,header = FALSE, sep = ";",nrows= 1,stringsAsFactors = FALSE)
## read the subset of data using skip & nrows. Set na.strings = "?"
subsetofdata <- read.table(sourcefilename,header = FALSE, sep = ";",na.strings = "?",skip=startrow,nrows= (endrow - startrow))
## Remove NA rows with complete.cases function
subsetofdata <- subsetofdata[which(complete.cases(subsetofdata)),]
## add the header as colnames for the dataframe
colnames(subsetofdata) <- unlist(myheader)
## create a new column by converting the concatenated V1 & V2 to datetime
subsetofdata$mydatetime <- strptime(paste(subsetofdata$Date,subsetofdata$Time),"%d/%m/%Y %H:%M:%S")
## Plot 2 to png
png(filename = "plot2.png",width = 480, height = 480, units = "px", pointsize = 12)
## create plot 2
plot(subsetofdata$mydatetime,subsetofdata$Global_active_power,type="l",lty = 1,xlab="",ylab="Global Active Power (killowatts)")
## close the graphical device
dev.off()
| /plot2.R | no_license | J-DP/ExData_Plotting1 | R | false | false | 2,034 | r |
## Programmer: Jacques du Plessis
## Date: 24 September 2017
## Course: Exploratory Data Analysis - Week 1
## Assignment Notes:
## 1. We will only be using data from the dates 2007-02-01 and 2007-02-02.
## 2. Read the data from just those dates rather than reading in the entire dataset
## 3. Convert the Date and Time variables to Date/Time classes using strptime
## 4. Note that in this dataset missing values are coded as ?
## 5. Create a separate R code file per plot
## 6. Your code should include code for reading the data so that the plot can be fully reproduced
## 7. This file re-create plot 2
## set the working directory
setwd("c:\\Development\\R\\Coursera\\04 - Exploratory data analysis\\Week 1")
## specify the source file name and location
sourcefilename <- "./data/household_power_consumption.txt"
## search for the specific row in the dataset
startrow <- grep("1/2/2007", readLines(sourcefilename))[1]-1
## search for the last row of the subset
endrow <- grep("3/2/2007", readLines(sourcefilename))[1]-1
## read the header seperately
myheader <-read.table(sourcefilename,header = FALSE, sep = ";",nrows= 1,stringsAsFactors = FALSE)
## read the subset of data using skip & nrows. Set na.strings = "?"
subsetofdata <- read.table(sourcefilename,header = FALSE, sep = ";",na.strings = "?",skip=startrow,nrows= (endrow - startrow))
## Remove NA rows with complete.cases function
subsetofdata <- subsetofdata[which(complete.cases(subsetofdata)),]
## add the header as colnames for the dataframe
colnames(subsetofdata) <- unlist(myheader)
## create a new column by converting the concatenated V1 & V2 to datetime
subsetofdata$mydatetime <- strptime(paste(subsetofdata$Date,subsetofdata$Time),"%d/%m/%Y %H:%M:%S")
## Plot 2 to png
png(filename = "plot2.png",width = 480, height = 480, units = "px", pointsize = 12)
## create plot 2
plot(subsetofdata$mydatetime,subsetofdata$Global_active_power,type="l",lty = 1,xlab="",ylab="Global Active Power (killowatts)")
## close the graphical device
dev.off()
|
library(tidyverse)
library(ggplot2)
library(formattable)
library(sf)
library(readxl)
library(readr)
## getting the yield difference and GM difference between GSP and Approx
all_strips2019 <- st_read("W:/value_soil_testing_prj/Yield_data/finished/GIS_Results/All_Strips_2019_wgs84.shp")
all_strips2019 <- all_strips2019 %>%
dplyr::select(Paddock_ID, geometry)
#2.turn polygons into points - centriod
all_strips2019_centroid = st_centroid(all_strips2019)
all_strips2019_centroid <- all_strips2019_centroid %>% filter(!is.na(Paddock_ID))
#3.
av_rain <- raster::raster("W:/value_soil_testing_prj/Yield_data/2020/processing/rain_grid")
##3a. extract strips coordinates points from the raster (eg shapefile points and average rainfall grid)
all_strips2019_centroid$av_rain <- raster::extract(av_rain, all_strips2019_centroid)
all_strips2019_centroid_df <- as.data.frame(all_strips2019_centroid ) %>% dplyr::select(-geometry)
rm(all_strips2019, all_strips2019_centroid, av_rain)
## add in GRDC zone
zone2019_GRDC_bound <- st_read("W:/value_soil_testing_prj/Yield_data/finished/GIS_Results/All_Zones_2019_GRDC_wgs84.shp")
zone2019_GRDC_bound <- zone2019_GRDC_bound %>%
dplyr::select(Zone_ID,
AGROECOLOG)
zone2019_GRDC_bound <- as.data.frame(zone2019_GRDC_bound) %>%
dplyr::select(- geometry)
##make a paddock_ID clm
zone2019_GRDC_bound$length_zoneID <- nchar(zone2019_GRDC_bound$Zone_ID)
zone2019_GRDC_bound <- zone2019_GRDC_bound %>%
mutate(Paddock_ID =
case_when(length_zoneID == 6 ~ substr(Zone_ID, start = 1, stop = 5),
length_zoneID == 7 ~ substr(Zone_ID, start = 1, stop = 6)))
zone2019_GRDC_bound$Paddock_ID <- as.double(zone2019_GRDC_bound$Paddock_ID)
str(zone2019_GRDC_bound)
str(all_strips2019_centroid_df)
all_strips2019_centroid_df <- left_join(all_strips2019_centroid_df, zone2019_GRDC_bound)
all_strips2019_centroid_df <- all_strips2019_centroid_df %>%
dplyr::mutate(
rainfall_class = case_when(
av_rain<=350 ~ "low",
av_rain >500 ~ "high",
TRUE ~ "medium"))
all_strips2019_centroid_df <- all_strips2019_centroid_df %>%
dplyr::select( Paddock_ID,
AGROECOLOG,
rainfall_class)
### oops I need the rates from the fert df
fertiliser_applied2019 <- read.csv("W:/value_soil_testing_prj/Yield_data/2020/processing/processing_files/step2_fert_app_all_step_2019.csv")
str(fertiliser_applied2019)
fert_2019 <- fertiliser_applied2019 %>%
dplyr::select(Paddock_ID, Rate,GSP,Strip_Type,
Total_sum_N_content,
Total_sum_P_content)
rm(fertiliser_applied2019, zone2019_GRDC_bound)
str(all_strips2019_centroid_df)
str(fert_2019)
## add it to the other data
details_2019 <- left_join(fert_2019, all_strips2019_centroid_df)
rm(fert_2019,all_strips2019_centroid_df )
names(details_2019)
## The GSP is coded a bit strange its either a GPS or number - should all be GSP
details_2019 <- details_2019 %>%
dplyr::mutate(GSP = case_when(
GSP =!is.na(GSP) ~ "GSP"
))
str(details_2019)
## remove a couple of problem strips
details_2019 <- details_2019 %>%
filter(Paddock_ID != 31712 | !is.na(Rate))
details_2019 <- details_2019 %>%
filter(Paddock_ID != 51521 | !is.na(Rate))
details_2019 <- details_2019 %>%
filter(Paddock_ID != 51522 | !is.na(Rate))
#######################################################################################
## bring in the t.test data results
set1_2019 <- read.csv("W:/value_soil_testing_prj/Yield_data/finished/complied/use/Landmark_with_soil2020-06-24_For_TM.csv")
set2_2019 <- read.csv("W:/value_soil_testing_prj/Yield_data/finished/complied/use/Non_landmark_results_soil_and_pair_rates2020-06-25_For_TM.csv")
str(set1_2019)
names(set1_2019)
set1_2019 <- set1_2019 %>%
dplyr::select(Zone_ID = Paddock.code,
Organisation,
Contact,
Farmer,
Paddock_tested,
Rates,
Yld,
P_value,Mean_diff,rounded,Significant)
str(set2_2019)
names(set2_2019)
set2_2019 <- set2_2019 %>%
dplyr::select(Zone_ID = Paddock.code,
Organisation,
Contact,
Farmer,
Paddock_tested,
Rates,
Yld,
P_value,Mean_diff,rounded,Significant)
# Pole has rates in shapefile that are different to this, its that same but expressed in different units
set2_2019 <- set2_2019 %>%
dplyr::mutate(Rates = case_when(
Paddock_tested == "Georges" & Rates == 0 ~ 0,
Paddock_tested == "Georges" & Rates == 3 ~ 23,
Paddock_tested == "Georges" & Rates == 6 ~ 46,
Paddock_tested == "Georges" & Rates == 9 ~ 69,
Paddock_tested == "Georges" & Rates == 12 ~ 92,
Paddock_tested == "Georges" & Rates == 15 ~ 115,
TRUE ~ as.double(Rates)
))
t.test_2019 <- rbind(set2_2019, set1_2019)
rm(set1_2019, set2_2019)
##make a paddock_ID clm
t.test_2019$length_zoneID <- nchar(t.test_2019$Zone_ID)
t.test_2019 <- t.test_2019 %>%
mutate(Paddock_ID =
case_when(length_zoneID == 6 ~ substr(Zone_ID, start = 1, stop = 5),
length_zoneID == 7 ~ substr(Zone_ID, start = 1, stop = 6)))
t.test_2019$Paddock_ID <- as.double(t.test_2019$Paddock_ID)
## The t test data does not have N or P Strips and there is duplication
str(t.test_2019)
t.test_2019 <- t.test_2019 %>%
dplyr::mutate(Strip_Type = case_when(
Paddock_ID == 31132 ~ "N Strip",
Paddock_ID == 31231 ~ "N Strip",
Paddock_ID == 31233 ~ "N Strip",
Paddock_ID == 31711 ~ "N Strip",
Paddock_ID == 31712 ~ "N Strip",
Paddock_ID == 31726 ~ "N Strip",
Paddock_ID == 51512 ~ "N Strip",
Paddock_ID == 51713 ~ "N Strip",
Paddock_ID == 51911 ~ "N Strip",
Paddock_ID == 51914 ~ "N Strip",
Paddock_ID == 51915 ~ "N Strip",
Paddock_ID == 52413 ~ "N Strip",
Paddock_ID == 52444 ~ "N Strip",
Paddock_ID == 52453 ~ "N Strip",
Paddock_ID == 51511 ~ "N Strip",
Paddock_ID == 33311 ~ "N and P Strip",
Paddock_ID == 33321 ~ "N and P Strip",
Paddock_ID == 33331 ~ "N and P Strip",
TRUE ~ "P Strip"
))
## Ann paddocks are a bit of a mess some results are for P and some are for N I have used TB analysis to work out which is which
t.test_2019 <- t.test_2019 %>%
dplyr::mutate(Strip_Type = case_when(
#Schlitz_M2
Zone_ID == 333110 & Rates == 0 & Yld < 3 ~ "N Strip",
Zone_ID == 333110 & Rates == 60 ~ "N Strip",
Zone_ID == 333110 & Rates == 120 ~ "N Strip",
Zone_ID == 333110 & Rates == 0 & Yld > 3 ~ "P Strip",
Zone_ID == 333110 & Rates == 20 ~ "P Strip",
Zone_ID == 333110 & Rates == 40 ~ "P Strip",
Zone_ID == 333110 & Rates == 80 ~ "P Strip",
#Schlitz_M2
Zone_ID == 333111 & Rates == 0 & Yld > 2.69 ~ "N Strip",
Zone_ID == 333111 & Rates == 60 ~ "N Strip",
Zone_ID == 333111 & Rates == 120 ~ "N Strip",
Zone_ID == 333111 & Rates == 0 & Yld < 2.69 ~ "P Strip",
Zone_ID == 333111 & Rates == 20 ~ "P Strip",
Zone_ID == 333111 & Rates == 40 ~ "P Strip",
Zone_ID == 333111 & Rates == 80 ~ "P Strip",
#Chamberlain_2
Zone_ID == 333211 & Rates == 25 ~ "P Strip",
Zone_ID == 333211 & Rates == 50 ~ "P Strip",
Zone_ID == 333211 & Rates == 75 ~ "P Strip",
Zone_ID == 333210 & Rates == 25 ~ "P Strip",
Zone_ID == 333210 & Rates == 50 ~ "P Strip",
Zone_ID == 333210 & Rates == 75 ~ "P Strip",
Zone_ID == 333211 & Rates == 0 ~ "N Strip",
Zone_ID == 333211 & Rates == 60 ~ "N Strip",
Zone_ID == 333211 & Rates == 120 ~ "N Strip",
Zone_ID == 333210 & Rates == 0 ~ "N Strip",
Zone_ID == 333210 & Rates == 60 ~ "N Strip",
Zone_ID == 333210 & Rates == 120 ~ "N Strip",
#Nelson_3
Zone_ID == 333311 & Rates == 0 & Yld < 1.5 ~ "P Strip",
Zone_ID == 333311 & Rates == 25 & Yld > 1.5 ~ "P Strip",
Zone_ID == 333311 & Rates == 50 & Yld < 1.5 ~ "P Strip",
Zone_ID == 333311 & Rates == 75 ~ "P Strip",
Zone_ID == 333310 & Rates == 0 & Yld > 1.8 ~ "P Strip",
Zone_ID == 333310 & Rates == 25 & Yld < 1.7 ~ "P Strip",
Zone_ID == 333310 & Rates == 50 & Yld > 1.7 ~ "P Strip",
Zone_ID == 333310 & Rates == 75 ~ "P Strip",
Zone_ID == 333311 & Rates == 0 & Yld < 1.5 ~ "N Strip",
Zone_ID == 333311 & Rates == 25 & Yld > 1.5 ~ "N Strip",
Zone_ID == 333311 & Rates == 50 & Yld < 1.5 ~ "N Strip",
Zone_ID == 333311 & Rates == 0 & Yld > 1.5 ~ "N Strip",
Zone_ID == 333311 & Rates == 25 & Yld < 1.5 ~ "N Strip",
Zone_ID == 333311 & Rates == 50 & Yld > 1.5 ~ "N Strip",
Zone_ID == 333310 & Rates == 0 & Yld < 1.8 ~ "N Strip",
Zone_ID == 333310 & Rates == 25 & Yld > 1.7 ~ "N Strip",
Zone_ID == 333310 & Rates == 50 & Yld < 1.7 ~ "N Strip",
TRUE ~ Strip_Type
))
# is paddock is missing code "Landmark James_Falvey_2 Tim_McClelland_4 Mervyns" but I am not sure it should be included??
names(t.test_2019)
t.test_2019 <- t.test_2019 %>%
dplyr::mutate(Zone_ID = case_when(
Paddock_tested == "Mervyns" ~ 312431,
TRUE ~ as.double(Zone_ID)
) )
## I want to join details_2019 to t.test
str(t.test_2019)
str(details_2019)
t.test_2019 <- t.test_2019 %>%
dplyr::mutate(for_join = paste0(Paddock_ID, Strip_Type, Rates))
details_2019 <- details_2019 %>%
dplyr::mutate(for_join = paste0(Paddock_ID, Strip_Type, Rate))
t.test2019_details <- full_join(details_2019, t.test_2019)
names(t.test2019_details)
# I have two problem paddocks Jeff (53621) no rates no analysis and Mervyns non zone 312431
t.test2019_details <- t.test2019_details %>%
filter(Paddock_tested != "Mervyns")
t.test2019_details <- t.test2019_details %>%
filter(Paddock_ID != 53621)
t.test2019_details <- t.test2019_details %>%
dplyr::select(Zone_ID,
Paddock_ID,
Strip_Type,
Rate,
GSP,
Strip_Type,
Total_sum_N_content,
Total_sum_P_content,
Yld,
Organisation,
Contact,
Farmer,
Paddock_tested,
rainfall_class,
AGROECOLOG,
P_value,
Mean_diff,
rounded,
Significant)
#### This is ready for more the recomm rates
####################################################################################################################################################
## bring in rec rates ##
recom_rateDB2019 <- read_excel( "W:/value_soil_testing_prj/data_base/downloaded_sep2021/GRDC 2019 Paddock Database_SA_VIC_June11 2021.xlsx")
str(recom_rateDB2019)
# select only a few clms with recommedation
recom_rateDB2019 <- recom_rateDB2019 %>%
dplyr::select(Zone_ID = `Paddock code` ,
Total_N = `Total N`,
p_rec = `P rec`,
n_rec_yld_low = `N Rec (< 3 t/ha)` ,
n_rec_yld_med = `N Rec (3-5 t/ha)` ,
n_rec_yld_high = `N Rec (> 5 t/ha)`,
Colwell,
DGT,
PBI
)
recom_rateDB2019$n_rec_yld_low <- as.double(recom_rateDB2019$n_rec_yld_low)
recom_rateDB2019$n_rec_yld_med <- as.double(recom_rateDB2019$n_rec_yld_med)
recom_rateDB2019$n_rec_yld_high <- as.double(recom_rateDB2019$n_rec_yld_high)
recom_rateDB2019$Colwell <- as.double(recom_rateDB2019$Colwell)
recom_rateDB2019$DGT <- as.double(recom_rateDB2019$DGT)
recom_rateDB2019$PBI <- as.double(recom_rateDB2019$PBI)
recom_rateDB2019 <- dplyr::mutate(recom_rateDB2019, maxN = apply(recom_rateDB2019[4:6], 1, max, na.rm = TRUE))
# remove redunant clm and replace inf
recom_rateDB2019 <- recom_rateDB2019 %>%
mutate(
maxN = case_when(
maxN >= 0 ~ maxN,
TRUE ~ NA_real_
)
)
recom_rateDB2019 <- recom_rateDB2019 %>%
dplyr::select(Zone_ID ,
p_rec ,
maxN,
Total_N,
Colwell,
DGT,
PBI)
##make a paddock_ID clm
recom_rateDB2019$length_zoneID <- nchar(recom_rateDB2019$Zone_ID)
recom_rateDB2019 <- recom_rateDB2019 %>%
mutate(Paddock_ID =
case_when(length_zoneID == 6 ~ substr(Zone_ID, start = 1, stop = 5),
length_zoneID == 7 ~ substr(Zone_ID, start = 1, stop = 6)))
recom_rateDB2019$Paddock_ID <- as.double(recom_rateDB2019$Paddock_ID)
str(recom_rateDB2019) #this has all the zones
str(fert_2019_rain_GSP) #this has the paddock details
rm(details_2019, t.test_2019)
### join the t.test data to the recom rates
str(recom_rateDB2019)
str(t.test2019_details)
t.test_details_rec_rates <- left_join(t.test2019_details, recom_rateDB2019)
rm(recom_rateDB2019, t.test2019_details)
#############################################################################################################
### Redo the rec rates for N with my rainfall zone
t.test_details_rec_rates <- t.test_details_rec_rates %>%
mutate(Rec_N_jax = case_when(
rainfall_class == "low" & Total_N <= 80 ~ ((80 -Total_N)/0.5),
rainfall_class == "medium" & Total_N <= 160 ~ ((160 -Total_N)/0.5),
rainfall_class == "high" & Total_N <= 240 ~ ((240 -Total_N)/0.5),
TRUE ~ 0 ))
str(t.test_details_rec_rates)
# t.test_details_rec_rates <- t.test_details_rec_rates %>%
# mutate(critical_colwell = 4.6*( PBI^ (0.393)))
# ## is colwell greater than critical colwell?
# t.test_details_rec_rates <- t.test_details_rec_rates %>%
# mutate(colwell_thershold = case_when(
# Colwell > critical_colwell ~ "adequate",
# Colwell < critical_colwell ~ "p_required") )
## if p is required how much extra colwell p is needed to get to critical thershold?
# t.test_details_rec_rates <- t.test_details_rec_rates %>%
# mutate(to_reach_col_thershold = case_when(
# colwell_thershold == "p_required" ~ critical_colwell - Colwell))
#
# ## what is the recomm P rate?
# t.test_details_rec_rates <- t.test_details_rec_rates %>%
# mutate(p_rec_jax = case_when(
# colwell_thershold == "p_required" ~ ((0.0019*PBI+2.146)*to_reach_col_thershold),
# colwell_thershold == "adequate" ~ 5
# ))
# ## clean up extra clms
#
# names(t.test_details_rec_rates)
#
# t.test_details_rec_rates <- t.test_details_rec_rates %>%
# dplyr::select(-"length_zoneID",
# - critical_colwell,
# - colwell_thershold,
# - to_reach_col_thershold)
###################################################################################################################################
#write this out
write.csv(t.test_details_rec_rates, "W:/value_soil_testing_prj/Yield_data/2020/processing/processing_files/for_econmics/t.test_details_rec_rates_2019.csv")
| /scrap_notes4_2019.R | no_license | JackieOuzman/soil_testing_final_report | R | false | false | 14,913 | r |
library(tidyverse)
library(ggplot2)
library(formattable)
library(sf)
library(readxl)
library(readr)
## getting the yield difference and GM difference between GSP and Approx
all_strips2019 <- st_read("W:/value_soil_testing_prj/Yield_data/finished/GIS_Results/All_Strips_2019_wgs84.shp")
all_strips2019 <- all_strips2019 %>%
dplyr::select(Paddock_ID, geometry)
#2.turn polygons into points - centriod
all_strips2019_centroid = st_centroid(all_strips2019)
all_strips2019_centroid <- all_strips2019_centroid %>% filter(!is.na(Paddock_ID))
#3.
av_rain <- raster::raster("W:/value_soil_testing_prj/Yield_data/2020/processing/rain_grid")
##3a. extract strips coordinates points from the raster (eg shapefile points and average rainfall grid)
all_strips2019_centroid$av_rain <- raster::extract(av_rain, all_strips2019_centroid)
all_strips2019_centroid_df <- as.data.frame(all_strips2019_centroid ) %>% dplyr::select(-geometry)
rm(all_strips2019, all_strips2019_centroid, av_rain)
## add in GRDC zone
zone2019_GRDC_bound <- st_read("W:/value_soil_testing_prj/Yield_data/finished/GIS_Results/All_Zones_2019_GRDC_wgs84.shp")
zone2019_GRDC_bound <- zone2019_GRDC_bound %>%
dplyr::select(Zone_ID,
AGROECOLOG)
zone2019_GRDC_bound <- as.data.frame(zone2019_GRDC_bound) %>%
dplyr::select(- geometry)
##make a paddock_ID clm
zone2019_GRDC_bound$length_zoneID <- nchar(zone2019_GRDC_bound$Zone_ID)
zone2019_GRDC_bound <- zone2019_GRDC_bound %>%
mutate(Paddock_ID =
case_when(length_zoneID == 6 ~ substr(Zone_ID, start = 1, stop = 5),
length_zoneID == 7 ~ substr(Zone_ID, start = 1, stop = 6)))
zone2019_GRDC_bound$Paddock_ID <- as.double(zone2019_GRDC_bound$Paddock_ID)
str(zone2019_GRDC_bound)
str(all_strips2019_centroid_df)
all_strips2019_centroid_df <- left_join(all_strips2019_centroid_df, zone2019_GRDC_bound)
all_strips2019_centroid_df <- all_strips2019_centroid_df %>%
dplyr::mutate(
rainfall_class = case_when(
av_rain<=350 ~ "low",
av_rain >500 ~ "high",
TRUE ~ "medium"))
all_strips2019_centroid_df <- all_strips2019_centroid_df %>%
dplyr::select( Paddock_ID,
AGROECOLOG,
rainfall_class)
### oops I need the rates from the fert df
fertiliser_applied2019 <- read.csv("W:/value_soil_testing_prj/Yield_data/2020/processing/processing_files/step2_fert_app_all_step_2019.csv")
str(fertiliser_applied2019)
fert_2019 <- fertiliser_applied2019 %>%
dplyr::select(Paddock_ID, Rate,GSP,Strip_Type,
Total_sum_N_content,
Total_sum_P_content)
rm(fertiliser_applied2019, zone2019_GRDC_bound)
str(all_strips2019_centroid_df)
str(fert_2019)
## add it to the other data
details_2019 <- left_join(fert_2019, all_strips2019_centroid_df)
rm(fert_2019,all_strips2019_centroid_df )
names(details_2019)
## The GSP is coded a bit strange its either a GPS or number - should all be GSP
details_2019 <- details_2019 %>%
dplyr::mutate(GSP = case_when(
GSP =!is.na(GSP) ~ "GSP"
))
str(details_2019)
## remove a couple of problem strips
details_2019 <- details_2019 %>%
filter(Paddock_ID != 31712 | !is.na(Rate))
details_2019 <- details_2019 %>%
filter(Paddock_ID != 51521 | !is.na(Rate))
details_2019 <- details_2019 %>%
filter(Paddock_ID != 51522 | !is.na(Rate))
#######################################################################################
## bring in the t.test data results
set1_2019 <- read.csv("W:/value_soil_testing_prj/Yield_data/finished/complied/use/Landmark_with_soil2020-06-24_For_TM.csv")
set2_2019 <- read.csv("W:/value_soil_testing_prj/Yield_data/finished/complied/use/Non_landmark_results_soil_and_pair_rates2020-06-25_For_TM.csv")
str(set1_2019)
names(set1_2019)
set1_2019 <- set1_2019 %>%
dplyr::select(Zone_ID = Paddock.code,
Organisation,
Contact,
Farmer,
Paddock_tested,
Rates,
Yld,
P_value,Mean_diff,rounded,Significant)
str(set2_2019)
names(set2_2019)
set2_2019 <- set2_2019 %>%
dplyr::select(Zone_ID = Paddock.code,
Organisation,
Contact,
Farmer,
Paddock_tested,
Rates,
Yld,
P_value,Mean_diff,rounded,Significant)
# Pole has rates in shapefile that are different to this, its that same but expressed in different units
set2_2019 <- set2_2019 %>%
dplyr::mutate(Rates = case_when(
Paddock_tested == "Georges" & Rates == 0 ~ 0,
Paddock_tested == "Georges" & Rates == 3 ~ 23,
Paddock_tested == "Georges" & Rates == 6 ~ 46,
Paddock_tested == "Georges" & Rates == 9 ~ 69,
Paddock_tested == "Georges" & Rates == 12 ~ 92,
Paddock_tested == "Georges" & Rates == 15 ~ 115,
TRUE ~ as.double(Rates)
))
t.test_2019 <- rbind(set2_2019, set1_2019)
rm(set1_2019, set2_2019)
##make a paddock_ID clm
t.test_2019$length_zoneID <- nchar(t.test_2019$Zone_ID)
t.test_2019 <- t.test_2019 %>%
mutate(Paddock_ID =
case_when(length_zoneID == 6 ~ substr(Zone_ID, start = 1, stop = 5),
length_zoneID == 7 ~ substr(Zone_ID, start = 1, stop = 6)))
t.test_2019$Paddock_ID <- as.double(t.test_2019$Paddock_ID)
## The t test data does not have N or P Strips and there is duplication
str(t.test_2019)
t.test_2019 <- t.test_2019 %>%
dplyr::mutate(Strip_Type = case_when(
Paddock_ID == 31132 ~ "N Strip",
Paddock_ID == 31231 ~ "N Strip",
Paddock_ID == 31233 ~ "N Strip",
Paddock_ID == 31711 ~ "N Strip",
Paddock_ID == 31712 ~ "N Strip",
Paddock_ID == 31726 ~ "N Strip",
Paddock_ID == 51512 ~ "N Strip",
Paddock_ID == 51713 ~ "N Strip",
Paddock_ID == 51911 ~ "N Strip",
Paddock_ID == 51914 ~ "N Strip",
Paddock_ID == 51915 ~ "N Strip",
Paddock_ID == 52413 ~ "N Strip",
Paddock_ID == 52444 ~ "N Strip",
Paddock_ID == 52453 ~ "N Strip",
Paddock_ID == 51511 ~ "N Strip",
Paddock_ID == 33311 ~ "N and P Strip",
Paddock_ID == 33321 ~ "N and P Strip",
Paddock_ID == 33331 ~ "N and P Strip",
TRUE ~ "P Strip"
))
## Ann paddocks are a bit of a mess some results are for P and some are for N I have used TB analysis to work out which is which
t.test_2019 <- t.test_2019 %>%
dplyr::mutate(Strip_Type = case_when(
#Schlitz_M2
Zone_ID == 333110 & Rates == 0 & Yld < 3 ~ "N Strip",
Zone_ID == 333110 & Rates == 60 ~ "N Strip",
Zone_ID == 333110 & Rates == 120 ~ "N Strip",
Zone_ID == 333110 & Rates == 0 & Yld > 3 ~ "P Strip",
Zone_ID == 333110 & Rates == 20 ~ "P Strip",
Zone_ID == 333110 & Rates == 40 ~ "P Strip",
Zone_ID == 333110 & Rates == 80 ~ "P Strip",
#Schlitz_M2
Zone_ID == 333111 & Rates == 0 & Yld > 2.69 ~ "N Strip",
Zone_ID == 333111 & Rates == 60 ~ "N Strip",
Zone_ID == 333111 & Rates == 120 ~ "N Strip",
Zone_ID == 333111 & Rates == 0 & Yld < 2.69 ~ "P Strip",
Zone_ID == 333111 & Rates == 20 ~ "P Strip",
Zone_ID == 333111 & Rates == 40 ~ "P Strip",
Zone_ID == 333111 & Rates == 80 ~ "P Strip",
#Chamberlain_2
Zone_ID == 333211 & Rates == 25 ~ "P Strip",
Zone_ID == 333211 & Rates == 50 ~ "P Strip",
Zone_ID == 333211 & Rates == 75 ~ "P Strip",
Zone_ID == 333210 & Rates == 25 ~ "P Strip",
Zone_ID == 333210 & Rates == 50 ~ "P Strip",
Zone_ID == 333210 & Rates == 75 ~ "P Strip",
Zone_ID == 333211 & Rates == 0 ~ "N Strip",
Zone_ID == 333211 & Rates == 60 ~ "N Strip",
Zone_ID == 333211 & Rates == 120 ~ "N Strip",
Zone_ID == 333210 & Rates == 0 ~ "N Strip",
Zone_ID == 333210 & Rates == 60 ~ "N Strip",
Zone_ID == 333210 & Rates == 120 ~ "N Strip",
#Nelson_3
Zone_ID == 333311 & Rates == 0 & Yld < 1.5 ~ "P Strip",
Zone_ID == 333311 & Rates == 25 & Yld > 1.5 ~ "P Strip",
Zone_ID == 333311 & Rates == 50 & Yld < 1.5 ~ "P Strip",
Zone_ID == 333311 & Rates == 75 ~ "P Strip",
Zone_ID == 333310 & Rates == 0 & Yld > 1.8 ~ "P Strip",
Zone_ID == 333310 & Rates == 25 & Yld < 1.7 ~ "P Strip",
Zone_ID == 333310 & Rates == 50 & Yld > 1.7 ~ "P Strip",
Zone_ID == 333310 & Rates == 75 ~ "P Strip",
Zone_ID == 333311 & Rates == 0 & Yld < 1.5 ~ "N Strip",
Zone_ID == 333311 & Rates == 25 & Yld > 1.5 ~ "N Strip",
Zone_ID == 333311 & Rates == 50 & Yld < 1.5 ~ "N Strip",
Zone_ID == 333311 & Rates == 0 & Yld > 1.5 ~ "N Strip",
Zone_ID == 333311 & Rates == 25 & Yld < 1.5 ~ "N Strip",
Zone_ID == 333311 & Rates == 50 & Yld > 1.5 ~ "N Strip",
Zone_ID == 333310 & Rates == 0 & Yld < 1.8 ~ "N Strip",
Zone_ID == 333310 & Rates == 25 & Yld > 1.7 ~ "N Strip",
Zone_ID == 333310 & Rates == 50 & Yld < 1.7 ~ "N Strip",
TRUE ~ Strip_Type
))
# is paddock is missing code "Landmark James_Falvey_2 Tim_McClelland_4 Mervyns" but I am not sure it should be included??
names(t.test_2019)
t.test_2019 <- t.test_2019 %>%
dplyr::mutate(Zone_ID = case_when(
Paddock_tested == "Mervyns" ~ 312431,
TRUE ~ as.double(Zone_ID)
) )
## I want to join details_2019 to t.test
str(t.test_2019)
str(details_2019)
t.test_2019 <- t.test_2019 %>%
dplyr::mutate(for_join = paste0(Paddock_ID, Strip_Type, Rates))
details_2019 <- details_2019 %>%
dplyr::mutate(for_join = paste0(Paddock_ID, Strip_Type, Rate))
t.test2019_details <- full_join(details_2019, t.test_2019)
names(t.test2019_details)
# I have two problem paddocks Jeff (53621) no rates no analysis and Mervyns non zone 312431
t.test2019_details <- t.test2019_details %>%
filter(Paddock_tested != "Mervyns")
t.test2019_details <- t.test2019_details %>%
filter(Paddock_ID != 53621)
t.test2019_details <- t.test2019_details %>%
dplyr::select(Zone_ID,
Paddock_ID,
Strip_Type,
Rate,
GSP,
Strip_Type,
Total_sum_N_content,
Total_sum_P_content,
Yld,
Organisation,
Contact,
Farmer,
Paddock_tested,
rainfall_class,
AGROECOLOG,
P_value,
Mean_diff,
rounded,
Significant)
#### This is ready for more the recomm rates
####################################################################################################################################################
## bring in rec rates ##
recom_rateDB2019 <- read_excel( "W:/value_soil_testing_prj/data_base/downloaded_sep2021/GRDC 2019 Paddock Database_SA_VIC_June11 2021.xlsx")
str(recom_rateDB2019)
# select only a few clms with recommedation
recom_rateDB2019 <- recom_rateDB2019 %>%
dplyr::select(Zone_ID = `Paddock code` ,
Total_N = `Total N`,
p_rec = `P rec`,
n_rec_yld_low = `N Rec (< 3 t/ha)` ,
n_rec_yld_med = `N Rec (3-5 t/ha)` ,
n_rec_yld_high = `N Rec (> 5 t/ha)`,
Colwell,
DGT,
PBI
)
recom_rateDB2019$n_rec_yld_low <- as.double(recom_rateDB2019$n_rec_yld_low)
recom_rateDB2019$n_rec_yld_med <- as.double(recom_rateDB2019$n_rec_yld_med)
recom_rateDB2019$n_rec_yld_high <- as.double(recom_rateDB2019$n_rec_yld_high)
recom_rateDB2019$Colwell <- as.double(recom_rateDB2019$Colwell)
recom_rateDB2019$DGT <- as.double(recom_rateDB2019$DGT)
recom_rateDB2019$PBI <- as.double(recom_rateDB2019$PBI)
recom_rateDB2019 <- dplyr::mutate(recom_rateDB2019, maxN = apply(recom_rateDB2019[4:6], 1, max, na.rm = TRUE))
# remove redunant clm and replace inf
recom_rateDB2019 <- recom_rateDB2019 %>%
mutate(
maxN = case_when(
maxN >= 0 ~ maxN,
TRUE ~ NA_real_
)
)
recom_rateDB2019 <- recom_rateDB2019 %>%
dplyr::select(Zone_ID ,
p_rec ,
maxN,
Total_N,
Colwell,
DGT,
PBI)
##make a paddock_ID clm
recom_rateDB2019$length_zoneID <- nchar(recom_rateDB2019$Zone_ID)
recom_rateDB2019 <- recom_rateDB2019 %>%
mutate(Paddock_ID =
case_when(length_zoneID == 6 ~ substr(Zone_ID, start = 1, stop = 5),
length_zoneID == 7 ~ substr(Zone_ID, start = 1, stop = 6)))
recom_rateDB2019$Paddock_ID <- as.double(recom_rateDB2019$Paddock_ID)
str(recom_rateDB2019) #this has all the zones
str(fert_2019_rain_GSP) #this has the paddock details
rm(details_2019, t.test_2019)
### join the t.test data to the recom rates
str(recom_rateDB2019)
str(t.test2019_details)
t.test_details_rec_rates <- left_join(t.test2019_details, recom_rateDB2019)
rm(recom_rateDB2019, t.test2019_details)
#############################################################################################################
### Redo the rec rates for N with my rainfall zone
t.test_details_rec_rates <- t.test_details_rec_rates %>%
mutate(Rec_N_jax = case_when(
rainfall_class == "low" & Total_N <= 80 ~ ((80 -Total_N)/0.5),
rainfall_class == "medium" & Total_N <= 160 ~ ((160 -Total_N)/0.5),
rainfall_class == "high" & Total_N <= 240 ~ ((240 -Total_N)/0.5),
TRUE ~ 0 ))
str(t.test_details_rec_rates)
# t.test_details_rec_rates <- t.test_details_rec_rates %>%
# mutate(critical_colwell = 4.6*( PBI^ (0.393)))
# ## is colwell greater than critical colwell?
# t.test_details_rec_rates <- t.test_details_rec_rates %>%
# mutate(colwell_thershold = case_when(
# Colwell > critical_colwell ~ "adequate",
# Colwell < critical_colwell ~ "p_required") )
## if p is required how much extra colwell p is needed to get to critical thershold?
# t.test_details_rec_rates <- t.test_details_rec_rates %>%
# mutate(to_reach_col_thershold = case_when(
# colwell_thershold == "p_required" ~ critical_colwell - Colwell))
#
# ## what is the recomm P rate?
# t.test_details_rec_rates <- t.test_details_rec_rates %>%
# mutate(p_rec_jax = case_when(
# colwell_thershold == "p_required" ~ ((0.0019*PBI+2.146)*to_reach_col_thershold),
# colwell_thershold == "adequate" ~ 5
# ))
# ## clean up extra clms
#
# names(t.test_details_rec_rates)
#
# t.test_details_rec_rates <- t.test_details_rec_rates %>%
# dplyr::select(-"length_zoneID",
# - critical_colwell,
# - colwell_thershold,
# - to_reach_col_thershold)
###################################################################################################################################
#write this out
write.csv(t.test_details_rec_rates, "W:/value_soil_testing_prj/Yield_data/2020/processing/processing_files/for_econmics/t.test_details_rec_rates_2019.csv")
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/find_sets.R
\name{find_sets_biggest}
\alias{find_sets_biggest}
\title{Find the biggest sets within a sample_selection object}
\usage{
find_sets_biggest(sample_selection_object)
}
\arguments{
\item{sample_selection_object}{A list of admissable samples, created by a create_set() function (create_set_spatial_buffer() etc.)}
}
\value{
A list of the biggest admissable sets.
}
\description{
Find the biggest sets within a sample_selection object
}
| /man/find_sets_biggest.Rd | no_license | jaspercooper/sampleselector | R | false | false | 532 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/find_sets.R
\name{find_sets_biggest}
\alias{find_sets_biggest}
\title{Find the biggest sets within a sample_selection object}
\usage{
find_sets_biggest(sample_selection_object)
}
\arguments{
\item{sample_selection_object}{A list of admissable samples, created by a create_set() function (create_set_spatial_buffer() etc.)}
}
\value{
A list of the biggest admissable sets.
}
\description{
Find the biggest sets within a sample_selection object
}
|
156770957ab2f5f43aca2c877f5af191 query33_query57_1344n.qdimacs 1157 2673 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query33_query57_1344n/query33_query57_1344n.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 72 | r | 156770957ab2f5f43aca2c877f5af191 query33_query57_1344n.qdimacs 1157 2673 |
\encoding{UTF-8}
\name{roc.test}
\alias{roc.test}
\alias{roc.test.default}
\alias{roc.test.roc}
\alias{roc.test.formula}
\alias{roc.test.auc}
\alias{roc.test.smooth.roc}
\title{
Compare the AUC of two ROC curves
}
\description{
This function compares the AUC or partial AUC of two correlated (or
paired) or uncorrelated (unpaired) ROC curves. Several syntaxes are
available: two object of class roc (which can be AUC or smoothed ROC),
or either three vectors (response, predictor1, predictor2) or a
response vector and a matrix or data.frame with two columns
(predictors).
}
\usage{
# roc.test(...)
\S3method{roc.test}{roc}(roc1, roc2, method=c("delong", "bootstrap",
"venkatraman", "sensitivity", "specificity"), sensitivity = NULL,
specificity = NULL, alternative = c("two.sided", "less", "greater"),
paired=NULL, reuse.auc=TRUE, boot.n=2000, boot.stratified=TRUE,
ties.method="first", progress=getOption("pROCProgress")$name,
parallel=FALSE, ...)
\S3method{roc.test}{auc}(roc1, roc2, ...)
\S3method{roc.test}{smooth.roc}(roc1, roc2, ...)
\S3method{roc.test}{formula}(formula, data, ...)
\S3method{roc.test}{default}(response, predictor1, predictor2=NULL,
na.rm=TRUE, method=NULL, ...)
}
\arguments{
\item{roc1, roc2}{the two ROC curves to compare. Either
\dQuote{\link{roc}}, \dQuote{\link{auc}} or
\dQuote{\link{smooth.roc}} objects (types can be mixed).
}
\item{response}{a vector or factor, as for the \link{roc} function.}
\item{predictor1}{a numeric or ordered vector as for the \link{roc} function, or
a matrix or data.frame with predictors two colums.}
\item{predictor2}{only if predictor1 was a vector, the second
predictor as a numeric vector.
}
\item{formula}{a formula of the type response~predictor1+predictor2.}
\item{data}{a matrix or data.frame containing the variables in the
formula. See \code{\link{model.frame}} for more details.}
\item{na.rm}{if \code{TRUE}, the observations with \code{NA} values
will be removed.
}
\item{method}{the method to use, either \dQuote{delong},
\dQuote{bootstrap} or \dQuote{venkatraman}. The first letter is sufficient. If omitted, the
appropriate method is selected as explained in details.
}
\item{sensitivity, specificity}{if \code{method="sensitivity"} or
\code{method="specificity"}, the respective level where the test
must be assessed as a numeric of length 1. }
\item{alternative}{specifies the alternative hypothesis. Either of
\dQuote{two.sided}, \dQuote{less} or \dQuote{greater}. The first letter is
sufficient. Default: \dQuote{two.sided}. Only \dQuote{two.sided} is available
with \code{method="venkatraman"}.
}
\item{paired}{a logical indicating whether you want a paired roc.test.
If \code{NULL}, the paired status will be auto-detected by \code{\link{are.paired}}.
If \code{TRUE} but the paired status cannot be assessed by \code{\link{are.paired}}
will produce an error.
}
\item{reuse.auc}{if \code{TRUE} (default) and the \dQuote{roc} objects
contain an \dQuote{auc} field, re-use these specifications for the
test. See the \emph{AUC specification} section for more details.
}
\item{boot.n}{for \code{method="bootstrap"} and \code{method="venkatraman"} only: the number of
bootstrap replicates or permutations. Default: \var{2000}.
}
\item{boot.stratified}{for \code{method="bootstrap"} only:
should the bootstrap be stratified (same number
of cases/controls in each replicate than in the original sample) or
not. Ignored with \code{method="venkatraman"}. Default: \var{TRUE}.
}
\item{ties.method}{for \code{method="venkatraman"} only: argument for
\code{\link{rank}} specifying how ties are handled. Defaults to
\dQuote{first} as described in the paper.
}
\item{progress}{the name of progress bar to display. Typically
\dQuote{none}, \dQuote{win}, \dQuote{tk} or \dQuote{text} (see the
\code{name} argument to \code{\link[plyr]{create_progress_bar}} for
more information), but a list as returned by \code{\link[plyr]{create_progress_bar}}
is also accepted. See also the \dQuote{Progress bars} section of
\link[=pROC-package]{this package's documentation}.
}
\item{parallel}{if TRUE, the bootstrap is processed in parallel, using
parallel backend provided by plyr (foreach).
}
\item{\dots}{further arguments passed to or from other methods,
especially arguments for \code{\link{roc}} and \code{roc.test.roc}
when calling \code{roc.test.default} or \code{roc.test.formula}.
Arguments for \code{\link{auc}},
and \code{\link{txtProgressBar}} (only \code{char} and \code{style})
if applicable.
}
}
\details{
This function compares two ROC curves. It is typically called with the two \link{roc} objects to
compare. \code{roc.test.default} is provided as a convenience
method and creates two \link{roc} objects before calling
\code{roc.test.roc}.
Three methods are available: \dQuote{delong}, \dQuote{bootstrap} and \dQuote{venkatraman} (see
\dQuote{Computational details} section below). \dQuote{delong} and
\dQuote{bootstrap} are tests over the AUC whereas \dQuote{venkatraman}
compares the the ROC curves themselves.
Default is to use \dQuote{delong} method except for comparison of partial AUC, smoothed
curves and curves with different \code{direction}, where \code{bootstrap}
is used. Using \dQuote{delong} for partial AUC and smoothed ROCs is not
supported in pROC (a warning is produced and \dQuote{bootstrap} is employed instead).
It is spurious to use \dQuote{delong} for \code{\link{roc}} with different
\code{direction} (a warning is issued but the spurious comparison is
enforced). \dQuote{venkatraman}'s test cannot be employed to compare smoothed
ROC curves. Additionally, partial AUC specifications are ignored (with
a warning), and comparison of ROC curves with different
\code{direction} should be used with care (a warning is produced as well).
If \code{alternative="two.sided"}, a two-sided test for difference in AUC is performed. If
\code{alternative="less"}, the alternative is that the AUC of roc1 is
smaller than the AUC of roc2. For \code{method="venkatraman"}, only
\dQuote{two.sided} test is available.
If the \code{paired} argument is not provided, the \code{\link{are.paired}} function is
employed to detect the paired status of the ROC curves. It will test if the original \code{response} is
identical between the two ROC curves (this is always the case if the call is made with
\code{roc.test.default}). This detection is unlikely to raise false positives, but
this possibility cannot be excluded entierly. It would require equal sample sizes
and \code{response} values and order in both ROC curves. If it happens to you, use \code{paired=FALSE}.
If you know the ROC curves are paired you can pass \code{paired=TRUE}. However this is
useless as it will be tested anyway.
For \link[=smooth.roc]{smoothed ROC curves}, smoothing is performed again at each
bootstrap replicate with the parameters originally provided.
If a density smoothing was performed with user-provided
\code{density.cases} or \code{density.controls} the bootstrap cannot
be performed and an error is issued.
}
\section{AUC specification}{
The comparison of the AUC of the ROC curves needs a specification of the
AUC. The specification is defined by:
\enumerate{
\item the \dQuote{auc} field in the \dQuote{\link{roc}} objects if
\code{reuse.auc} is set to \code{TRUE} (default)
\item passing the specification to \code{\link{auc}} with \dots
(arguments \code{partial.auc}, \code{partial.auc.correct} and
\code{partial.auc.focus}). In this case, you must ensure either that
the \code{\link{roc}} object do not contain an \code{auc} field (if
you called \code{\link{roc}} with \code{auc=FALSE}), or set
\code{reuse.auc=FALSE}.
}
If \code{reuse.auc=FALSE} the \code{\link{auc}} function will always
be called with \code{\dots} to determine the specification, even if
the \dQuote{\link{roc}} objects do contain an \code{auc} field.
As well if the \dQuote{\link{roc}} objects do not contain an \code{auc}
field, the \code{\link{auc}} function will always be called with
\code{\dots} to determine the specification.
The AUC specification is ignored in the Venkatraman test.
Warning: if the roc object passed to roc.test contains an \code{auc}
field and \code{reuse.auc=TRUE}, \link{auc} is not called and
arguments such as \code{partial.auc} are silently ignored.
}
\section{Computation details}{
With \code{method="bootstrap"}, the processing is done as follow:
\enumerate{
\item \code{boot.n} bootstrap replicates are drawn from the
data. If \code{boot.stratified} is \var{TRUE}, each replicate contains
exactly the same number of controls and cases than the original
sample, otherwise if \var{FALSE} the numbers can vary.
\item for each bootstrap replicate, the AUC of the two ROC curves
are computed and the difference is stored.
\item The following formula is used:
\deqn{D=\frac{AUC1-AUC2}{s}}{D=(AUC1-AUC2)/s}
where s is the standard deviation of
the bootstrap differences and AUC1 and AUC2 the AUC of the two
(original) ROC curves.
\item \var{D} is then compared to the normal distribution,
according to the value of \code{alternative}.
}
See also the Bootstrap section in
\link[=pROC-package]{this package's documentation}.
With \code{method="delong"}, the processing is done as described in
DeLong \emph{et al.} (1988) for paired ROC curves. Only comparison of
two ROC curves is implemented. The method has been extended for
unpaired ROC curves where the p-value is computed with an unpaired
t-test with unequal sample size and unequal variance.
With \code{method="venkatraman"}, the processing is done as described
in Venkatraman and Begg (1996) (for paired ROC curves) and Venkatraman
(2000) (for unpaired ROC curves) with \code{boot.n} permutation of
sample ranks (with ties breaking). For consistency reasons, the same argument \code{boot.n} as
in bootstrap defines the number of permutations to execute,
even though no bootstrap is performed.
For \code{method="specificity"}, the test assesses if the sensitivity of
the ROC curves are different at the level of specificity given by the
\code{specificity} argument, which must be a numeric of length 1. Bootstrap is employed as with \code{method="bootstrap"}
and \code{boot.n} and \code{boot.stratified} are available. This is
identical to the test proposed by Pepe \emph{et al.} (2009).
The \code{method="sensitivity"} is very similar, but assesses if the specificity of
the ROC curves are different at the level of sensitivity given by the
\code{sensitivity} argument.
}
\section{Warnings}{
If \dQuote{auc} specifications are different in both roc objects, the
warning \dQuote{Different AUC specifications in the ROC
curves. Enforcing the inconsistency, but unexpected results may be
produced.} is issued. Unexpected results may be produced.
If one or both ROC curves are \dQuote{smooth.roc} objects with
different smoothing specifications, the warning
\dQuote{Different smoothing parameters in the ROC curves. Enforcing
the inconsistency, but unexpected results may be produced.} is issued.
This warning can be benign, especially if ROC curves were generated
with \code{roc(\ldots, smooth=TRUE)} with different arguments to other
functions (such as plot), or if you really want to compare two ROC
curves smoothed differently.
If \code{method="delong"} and the AUC specification specifies a
partial AUC, the warning \dQuote{Using DeLong's test for partial AUC is
not supported. Using bootstrap test instead.} is issued. The
\code{method} argument is ignored and \dQuote{bootstrap} is used instead.
If \code{method="delong"} and the ROC curve is smoothed, the warning
\dQuote{Using DeLong's test for smoothed ROCs is not supported. Using
bootstrap test instead.} is issued. The \code{method} argument is
ignored and \dQuote{bootstrap} is used instead.
If \code{method="venkatraman"}, and the AUC specification specifies a
partial AUC, the AUC specification is ignored with the warning
\dQuote{Partial AUC is ignored in Venkatraman's test.}.
If \code{method="venkatraman"}, and \code{alternative} is
\dQuote{less} or \dQuote{greater}, the warning \dQuote{Only two-sided
tests are available for Venkatraman. Performing two-sided test instead.}
is produced and a two tailed test is performed.
Both DeLong and Venkatraman's test ignores the direction of the ROC curve so that if two
ROC curves have a different differ in the value of
\code{direction}, the warning \dQuote{(DeLong|Venkatraman)'s test should not be
applied to ROC curves with different directions.} is
printed. However, the spurious test is enforced.
If \code{boot.stratified=FALSE} and the sample has a large imbalance between
cases and controls, it could happen that one or more of the replicates
contains no case or control observation, or that there are not enough
points for smoothing, producing a \code{NA} area.
The warning \dQuote{NA value(s) produced during bootstrap were ignored.}
will be issued and the observation will be ignored. If you have a large
imbalance in your sample, it could be safer to keep
\code{boot.stratified=TRUE}.
When both ROC curves have an \code{\link{auc}} of 1 (or 100\%), their variances and covariance will always be null,
and therefore the p-value will always be 1. This is true for both \dQuote{delong}, \dQuote{bootstrap} and
\dQuote{venkatraman} methods. This result is misleading, as the variances and covariance are of course not null.
A \code{\link{warning}} will be displayed to inform of this condition, and of the misleading output.
}
\section{Errors}{
An error will also occur if you give a \code{predictor2} when
\code{predictor1} is a \code{\link{matrix}} or a
\code{\link{data.frame}}, if \code{predictor1} has more than two
columns, or if you do not give a \code{predictor2} when
\code{predictor1} is a vector.
If \code{density.cases} and \code{density.controls} were provided
for smoothing, the error \dQuote{Cannot compute the statistic on ROC
curves smoothed with density.controls and density.cases.} is
issued.
If \code{method="venkatraman"} and one of the ROC curves is smoothed,
the error \dQuote{Using Venkatraman's test for smoothed ROCs is not
supported.} is produced.
With \code{method="specificity"}, the error \dQuote{Argument
'specificity' must be numeric of length 1 for a specificity test.}
is given unless the specificity argument is specified as a numeric of
length 1. The \dQuote{Argument 'sensitivity' must be numeric of length
1 for a sensitivity test.} message is given for
\code{method="sensitivity"} under similar conditions.
}
\value{
A list of class "htest" with following content:
\item{p.value}{the p-value of the test.}
\item{statistic}{the value of the Z (\code{method="delong"}) or D
(\code{method="bootstrap"}) statistics.
}
\item{alternative}{the alternative hypothesis.}
\item{method}{the character string \dQuote{DeLong's test for two
correlated ROC curves} (if \code{method="delong"}) or
\dQuote{Bootstrap test for two correlated ROC curves} (if
\code{method="bootstrap"}).
}
\item{null.value}{the expected value of the statistic under the null
hypothesis, that is 0.}
\item{estimate}{the AUC in the two ROC curves.}
\item{data.name}{the names of the data that was used.}
\item{parameter}{for \code{method="bootstrap"} only: the values of the
\code{boot.n} and \code{boot.stratified} arguments.
}
}
\section{Acknowledgements}{
We would like to thank E. S. Venkatraman and Colin B. Begg for their
support in the implementation of their test.
}
\references{
Elisabeth R. DeLong, David M. DeLong and Daniel L. Clarke-Pearson
(1988) ``Comparing the areas under two or more correlated receiver
operating characteristic curves: a nonparametric
approach''. \emph{Biometrics} \bold{44}, 837--845.
James A. Hanley and Barbara J. McNeil (1982) ``The meaning and use of
the area under a receiver operating characteristic (ROC)
curve''. \emph{Radiology} \bold{143}, 29--36.
Margaret Pepe, Gary Longton and Holly Janes (2009) ``Estimation and
Comparison of Receiver Operating Characteristic Curves''. \emph{The
Stata journal} \bold{9}, 1.
Xavier Robin, Natacha Turck, Jean-Charles Sanchez and Markus Müller
(2009) ``Combination of protein biomarkers''. \emph{useR! 2009}, Rennes.
\url{http://www.r-project.org/nosvn/conferences/useR-2009/abstracts/user_author.html}
Xavier Robin, Natacha Turck, Alexandre Hainard, \emph{et al.}
(2011) ``pROC: an open-source package for R and S+ to analyze and
compare ROC curves''. \emph{BMC Bioinformatics}, \bold{7}, 77.
DOI: \href{http://dx.doi.org/10.1186/1471-2105-12-77}{10.1186/1471-2105-12-77}.
E. S. Venkatraman and Colin B. Begg (1996) ``A distribution-free
procedure for comparing receiver operating characteristic curves from
a paired experiment''. \emph{Biometrika} \bold{83}, 835--848.
DOI: \href{http://dx.doi.org/10.1093/biomet/83.4.835}{10.1093/biomet/83.4.835}.
E. S. Venkatraman (2000) ``A Permutation Test to Compare Receiver
Operating Characteristic Curves''. \emph{Biometrics} \bold{56},
1134--1138. DOI: \href{http://dx.doi.org/10.1111/j.0006-341X.2000.01134.x}{10.1111/j.0006-341X.2000.01134.x}.
Hadley Wickham (2011) ``The Split-Apply-Combine Strategy for Data Analysis''. \emph{Journal of Statistical Software}, \bold{40}, 1--29.
URL: \href{http://www.jstatsoft.org/v40/i01}{www.jstatsoft.org/v40/i01}.
}
\seealso{
\code{\link{roc}}, \code{\link{power.roc.test}}
CRAN package \pkg{plyr}, employed in this function.
}
\examples{
data(aSAH)
# Basic example with 2 roc objects
roc1 <- roc(aSAH$outcome, aSAH$s100b)
roc2 <- roc(aSAH$outcome, aSAH$wfns)
roc.test(roc1, roc2)
\dontrun{
# The latter used Delong's test. To use bootstrap test:
roc.test(roc1, roc2, method="bootstrap")
# Increase boot.n for a more precise p-value:
roc.test(roc1, roc2, method="bootstrap", boot.n=10000)
}
# Alternative syntaxes
roc.test(aSAH$outcome, aSAH$s100b, aSAH$wfns)
roc.test(aSAH$outcome, data.frame(aSAH$s100b, aSAH$wfns))
# If we had a good a priori reason to think that wfns gives a
# better classification than s100b (in other words, AUC of roc1
# should be lower than AUC of roc2):
roc.test(roc1, roc2, alternative="less")
\dontrun{
# Comparison can be done on smoothed ROCs
# Smoothing is re-done at each iteration, and execution is slow
roc.test(smooth(roc1), smooth(roc2))
# or:
roc.test(aSAH$outcome, aSAH$s100b, aSAH$wfns, smooth=TRUE, boot.n=100)
}
# or from an AUC (no smoothing)
roc.test(auc(roc1), roc2)
\dontrun{
# Comparison of partial AUC:
roc3 <- roc(aSAH$outcome, aSAH$s100b, partial.auc=c(1, 0.8), partial.auc.focus="se")
roc4 <- roc(aSAH$outcome, aSAH$wfns, partial.auc=c(1, 0.8), partial.auc.focus="se")
roc.test(roc3, roc4)
# This is strictly equivalent to:
roc.test(roc3, roc4, method="bootstrap")
# Alternatively, we could re-use roc1 and roc2 to get the same result:
roc.test(roc1, roc2, reuse.auc=FALSE, partial.auc=c(1, 0.8), partial.auc.focus="se")
# Comparison on specificity and sensitivity
roc.test(roc1, roc2, method="specificity", specificity=0.9)
roc.test(roc1, roc2, method="sensitivity", sensitivity=0.9)
}
# Spurious use of DeLong's test with different direction:
roc5 <- roc(aSAH$outcome, aSAH$s100b, direction="<")
roc6 <- roc(aSAH$outcome, aSAH$s100b, direction=">")
roc.test(roc5, roc6, method="delong")
\dontrun{
# Comparisons of the ROC curves
roc.test(roc1, roc2, method="venkatraman")
}
# Unpaired tests
roc7 <- roc(aSAH$outcome, aSAH$s100b)
# artificially create an roc8 unpaired with roc7
roc8 <- roc(aSAH$outcome[1:100], aSAH$s100b[1:100])
\dontrun{
roc.test(roc7, roc8, paired=FALSE, method="delong")
roc.test(roc7, roc8, paired=FALSE, method="bootstrap")
roc.test(roc7, roc8, paired=FALSE, method="venkatraman")
roc.test(roc7, roc8, paired=FALSE, method="specificity", specificity=0.9)
}
}
\keyword{multivariate}
\keyword{nonparametric}
\keyword{utilities}
\keyword{htest}
\keyword{roc}
| /man/roc.test.Rd | no_license | dcd2/pROC | R | false | false | 20,413 | rd | \encoding{UTF-8}
\name{roc.test}
\alias{roc.test}
\alias{roc.test.default}
\alias{roc.test.roc}
\alias{roc.test.formula}
\alias{roc.test.auc}
\alias{roc.test.smooth.roc}
\title{
Compare the AUC of two ROC curves
}
\description{
This function compares the AUC or partial AUC of two correlated (or
paired) or uncorrelated (unpaired) ROC curves. Several syntaxes are
available: two object of class roc (which can be AUC or smoothed ROC),
or either three vectors (response, predictor1, predictor2) or a
response vector and a matrix or data.frame with two columns
(predictors).
}
\usage{
# roc.test(...)
\S3method{roc.test}{roc}(roc1, roc2, method=c("delong", "bootstrap",
"venkatraman", "sensitivity", "specificity"), sensitivity = NULL,
specificity = NULL, alternative = c("two.sided", "less", "greater"),
paired=NULL, reuse.auc=TRUE, boot.n=2000, boot.stratified=TRUE,
ties.method="first", progress=getOption("pROCProgress")$name,
parallel=FALSE, ...)
\S3method{roc.test}{auc}(roc1, roc2, ...)
\S3method{roc.test}{smooth.roc}(roc1, roc2, ...)
\S3method{roc.test}{formula}(formula, data, ...)
\S3method{roc.test}{default}(response, predictor1, predictor2=NULL,
na.rm=TRUE, method=NULL, ...)
}
\arguments{
\item{roc1, roc2}{the two ROC curves to compare. Either
\dQuote{\link{roc}}, \dQuote{\link{auc}} or
\dQuote{\link{smooth.roc}} objects (types can be mixed).
}
\item{response}{a vector or factor, as for the \link{roc} function.}
\item{predictor1}{a numeric or ordered vector as for the \link{roc} function, or
a matrix or data.frame with predictors two colums.}
\item{predictor2}{only if predictor1 was a vector, the second
predictor as a numeric vector.
}
\item{formula}{a formula of the type response~predictor1+predictor2.}
\item{data}{a matrix or data.frame containing the variables in the
formula. See \code{\link{model.frame}} for more details.}
\item{na.rm}{if \code{TRUE}, the observations with \code{NA} values
will be removed.
}
\item{method}{the method to use, either \dQuote{delong},
\dQuote{bootstrap} or \dQuote{venkatraman}. The first letter is sufficient. If omitted, the
appropriate method is selected as explained in details.
}
\item{sensitivity, specificity}{if \code{method="sensitivity"} or
\code{method="specificity"}, the respective level where the test
must be assessed as a numeric of length 1. }
\item{alternative}{specifies the alternative hypothesis. Either of
\dQuote{two.sided}, \dQuote{less} or \dQuote{greater}. The first letter is
sufficient. Default: \dQuote{two.sided}. Only \dQuote{two.sided} is available
with \code{method="venkatraman"}.
}
\item{paired}{a logical indicating whether you want a paired roc.test.
If \code{NULL}, the paired status will be auto-detected by \code{\link{are.paired}}.
If \code{TRUE} but the paired status cannot be assessed by \code{\link{are.paired}}
will produce an error.
}
\item{reuse.auc}{if \code{TRUE} (default) and the \dQuote{roc} objects
contain an \dQuote{auc} field, re-use these specifications for the
test. See the \emph{AUC specification} section for more details.
}
\item{boot.n}{for \code{method="bootstrap"} and \code{method="venkatraman"} only: the number of
bootstrap replicates or permutations. Default: \var{2000}.
}
\item{boot.stratified}{for \code{method="bootstrap"} only:
should the bootstrap be stratified (same number
of cases/controls in each replicate than in the original sample) or
not. Ignored with \code{method="venkatraman"}. Default: \var{TRUE}.
}
\item{ties.method}{for \code{method="venkatraman"} only: argument for
\code{\link{rank}} specifying how ties are handled. Defaults to
\dQuote{first} as described in the paper.
}
\item{progress}{the name of progress bar to display. Typically
\dQuote{none}, \dQuote{win}, \dQuote{tk} or \dQuote{text} (see the
\code{name} argument to \code{\link[plyr]{create_progress_bar}} for
more information), but a list as returned by \code{\link[plyr]{create_progress_bar}}
is also accepted. See also the \dQuote{Progress bars} section of
\link[=pROC-package]{this package's documentation}.
}
\item{parallel}{if TRUE, the bootstrap is processed in parallel, using
parallel backend provided by plyr (foreach).
}
\item{\dots}{further arguments passed to or from other methods,
especially arguments for \code{\link{roc}} and \code{roc.test.roc}
when calling \code{roc.test.default} or \code{roc.test.formula}.
Arguments for \code{\link{auc}},
and \code{\link{txtProgressBar}} (only \code{char} and \code{style})
if applicable.
}
}
\details{
This function compares two ROC curves. It is typically called with the two \link{roc} objects to
compare. \code{roc.test.default} is provided as a convenience
method and creates two \link{roc} objects before calling
\code{roc.test.roc}.
Three methods are available: \dQuote{delong}, \dQuote{bootstrap} and \dQuote{venkatraman} (see
\dQuote{Computational details} section below). \dQuote{delong} and
\dQuote{bootstrap} are tests over the AUC whereas \dQuote{venkatraman}
compares the the ROC curves themselves.
Default is to use \dQuote{delong} method except for comparison of partial AUC, smoothed
curves and curves with different \code{direction}, where \code{bootstrap}
is used. Using \dQuote{delong} for partial AUC and smoothed ROCs is not
supported in pROC (a warning is produced and \dQuote{bootstrap} is employed instead).
It is spurious to use \dQuote{delong} for \code{\link{roc}} with different
\code{direction} (a warning is issued but the spurious comparison is
enforced). \dQuote{venkatraman}'s test cannot be employed to compare smoothed
ROC curves. Additionally, partial AUC specifications are ignored (with
a warning), and comparison of ROC curves with different
\code{direction} should be used with care (a warning is produced as well).
If \code{alternative="two.sided"}, a two-sided test for difference in AUC is performed. If
\code{alternative="less"}, the alternative is that the AUC of roc1 is
smaller than the AUC of roc2. For \code{method="venkatraman"}, only
\dQuote{two.sided} test is available.
If the \code{paired} argument is not provided, the \code{\link{are.paired}} function is
employed to detect the paired status of the ROC curves. It will test if the original \code{response} is
identical between the two ROC curves (this is always the case if the call is made with
\code{roc.test.default}). This detection is unlikely to raise false positives, but
this possibility cannot be excluded entierly. It would require equal sample sizes
and \code{response} values and order in both ROC curves. If it happens to you, use \code{paired=FALSE}.
If you know the ROC curves are paired you can pass \code{paired=TRUE}. However this is
useless as it will be tested anyway.
For \link[=smooth.roc]{smoothed ROC curves}, smoothing is performed again at each
bootstrap replicate with the parameters originally provided.
If a density smoothing was performed with user-provided
\code{density.cases} or \code{density.controls} the bootstrap cannot
be performed and an error is issued.
}
\section{AUC specification}{
The comparison of the AUC of the ROC curves needs a specification of the
AUC. The specification is defined by:
\enumerate{
\item the \dQuote{auc} field in the \dQuote{\link{roc}} objects if
\code{reuse.auc} is set to \code{TRUE} (default)
\item passing the specification to \code{\link{auc}} with \dots
(arguments \code{partial.auc}, \code{partial.auc.correct} and
\code{partial.auc.focus}). In this case, you must ensure either that
the \code{\link{roc}} object do not contain an \code{auc} field (if
you called \code{\link{roc}} with \code{auc=FALSE}), or set
\code{reuse.auc=FALSE}.
}
If \code{reuse.auc=FALSE} the \code{\link{auc}} function will always
be called with \code{\dots} to determine the specification, even if
the \dQuote{\link{roc}} objects do contain an \code{auc} field.
As well if the \dQuote{\link{roc}} objects do not contain an \code{auc}
field, the \code{\link{auc}} function will always be called with
\code{\dots} to determine the specification.
The AUC specification is ignored in the Venkatraman test.
Warning: if the roc object passed to roc.test contains an \code{auc}
field and \code{reuse.auc=TRUE}, \link{auc} is not called and
arguments such as \code{partial.auc} are silently ignored.
}
\section{Computation details}{
With \code{method="bootstrap"}, the processing is done as follow:
\enumerate{
\item \code{boot.n} bootstrap replicates are drawn from the
data. If \code{boot.stratified} is \var{TRUE}, each replicate contains
exactly the same number of controls and cases than the original
sample, otherwise if \var{FALSE} the numbers can vary.
\item for each bootstrap replicate, the AUC of the two ROC curves
are computed and the difference is stored.
\item The following formula is used:
\deqn{D=\frac{AUC1-AUC2}{s}}{D=(AUC1-AUC2)/s}
where s is the standard deviation of
the bootstrap differences and AUC1 and AUC2 the AUC of the two
(original) ROC curves.
\item \var{D} is then compared to the normal distribution,
according to the value of \code{alternative}.
}
See also the Bootstrap section in
\link[=pROC-package]{this package's documentation}.
With \code{method="delong"}, the processing is done as described in
DeLong \emph{et al.} (1988) for paired ROC curves. Only comparison of
two ROC curves is implemented. The method has been extended for
unpaired ROC curves where the p-value is computed with an unpaired
t-test with unequal sample size and unequal variance.
With \code{method="venkatraman"}, the processing is done as described
in Venkatraman and Begg (1996) (for paired ROC curves) and Venkatraman
(2000) (for unpaired ROC curves) with \code{boot.n} permutation of
sample ranks (with ties breaking). For consistency reasons, the same argument \code{boot.n} as
in bootstrap defines the number of permutations to execute,
even though no bootstrap is performed.
For \code{method="specificity"}, the test assesses if the sensitivity of
the ROC curves are different at the level of specificity given by the
\code{specificity} argument, which must be a numeric of length 1. Bootstrap is employed as with \code{method="bootstrap"}
and \code{boot.n} and \code{boot.stratified} are available. This is
identical to the test proposed by Pepe \emph{et al.} (2009).
The \code{method="sensitivity"} is very similar, but assesses if the specificity of
the ROC curves are different at the level of sensitivity given by the
\code{sensitivity} argument.
}
\section{Warnings}{
If \dQuote{auc} specifications are different in both roc objects, the
warning \dQuote{Different AUC specifications in the ROC
curves. Enforcing the inconsistency, but unexpected results may be
produced.} is issued. Unexpected results may be produced.
If one or both ROC curves are \dQuote{smooth.roc} objects with
different smoothing specifications, the warning
\dQuote{Different smoothing parameters in the ROC curves. Enforcing
the inconsistency, but unexpected results may be produced.} is issued.
This warning can be benign, especially if ROC curves were generated
with \code{roc(\ldots, smooth=TRUE)} with different arguments to other
functions (such as plot), or if you really want to compare two ROC
curves smoothed differently.
If \code{method="delong"} and the AUC specification specifies a
partial AUC, the warning \dQuote{Using DeLong's test for partial AUC is
not supported. Using bootstrap test instead.} is issued. The
\code{method} argument is ignored and \dQuote{bootstrap} is used instead.
If \code{method="delong"} and the ROC curve is smoothed, the warning
\dQuote{Using DeLong's test for smoothed ROCs is not supported. Using
bootstrap test instead.} is issued. The \code{method} argument is
ignored and \dQuote{bootstrap} is used instead.
If \code{method="venkatraman"}, and the AUC specification specifies a
partial AUC, the AUC specification is ignored with the warning
\dQuote{Partial AUC is ignored in Venkatraman's test.}.
If \code{method="venkatraman"}, and \code{alternative} is
\dQuote{less} or \dQuote{greater}, the warning \dQuote{Only two-sided
tests are available for Venkatraman. Performing two-sided test instead.}
is produced and a two tailed test is performed.
Both DeLong and Venkatraman's test ignores the direction of the ROC curve so that if two
ROC curves have a different differ in the value of
\code{direction}, the warning \dQuote{(DeLong|Venkatraman)'s test should not be
applied to ROC curves with different directions.} is
printed. However, the spurious test is enforced.
If \code{boot.stratified=FALSE} and the sample has a large imbalance between
cases and controls, it could happen that one or more of the replicates
contains no case or control observation, or that there are not enough
points for smoothing, producing a \code{NA} area.
The warning \dQuote{NA value(s) produced during bootstrap were ignored.}
will be issued and the observation will be ignored. If you have a large
imbalance in your sample, it could be safer to keep
\code{boot.stratified=TRUE}.
When both ROC curves have an \code{\link{auc}} of 1 (or 100\%), their variances and covariance will always be null,
and therefore the p-value will always be 1. This is true for both \dQuote{delong}, \dQuote{bootstrap} and
\dQuote{venkatraman} methods. This result is misleading, as the variances and covariance are of course not null.
A \code{\link{warning}} will be displayed to inform of this condition, and of the misleading output.
}
\section{Errors}{
An error will also occur if you give a \code{predictor2} when
\code{predictor1} is a \code{\link{matrix}} or a
\code{\link{data.frame}}, if \code{predictor1} has more than two
columns, or if you do not give a \code{predictor2} when
\code{predictor1} is a vector.
If \code{density.cases} and \code{density.controls} were provided
for smoothing, the error \dQuote{Cannot compute the statistic on ROC
curves smoothed with density.controls and density.cases.} is
issued.
If \code{method="venkatraman"} and one of the ROC curves is smoothed,
the error \dQuote{Using Venkatraman's test for smoothed ROCs is not
supported.} is produced.
With \code{method="specificity"}, the error \dQuote{Argument
'specificity' must be numeric of length 1 for a specificity test.}
is given unless the specificity argument is specified as a numeric of
length 1. The \dQuote{Argument 'sensitivity' must be numeric of length
1 for a sensitivity test.} message is given for
\code{method="sensitivity"} under similar conditions.
}
\value{
A list of class "htest" with following content:
\item{p.value}{the p-value of the test.}
\item{statistic}{the value of the Z (\code{method="delong"}) or D
(\code{method="bootstrap"}) statistics.
}
\item{alternative}{the alternative hypothesis.}
\item{method}{the character string \dQuote{DeLong's test for two
correlated ROC curves} (if \code{method="delong"}) or
\dQuote{Bootstrap test for two correlated ROC curves} (if
\code{method="bootstrap"}).
}
\item{null.value}{the expected value of the statistic under the null
hypothesis, that is 0.}
\item{estimate}{the AUC in the two ROC curves.}
\item{data.name}{the names of the data that was used.}
\item{parameter}{for \code{method="bootstrap"} only: the values of the
\code{boot.n} and \code{boot.stratified} arguments.
}
}
\section{Acknowledgements}{
We would like to thank E. S. Venkatraman and Colin B. Begg for their
support in the implementation of their test.
}
\references{
Elisabeth R. DeLong, David M. DeLong and Daniel L. Clarke-Pearson
(1988) ``Comparing the areas under two or more correlated receiver
operating characteristic curves: a nonparametric
approach''. \emph{Biometrics} \bold{44}, 837--845.
James A. Hanley and Barbara J. McNeil (1982) ``The meaning and use of
the area under a receiver operating characteristic (ROC)
curve''. \emph{Radiology} \bold{143}, 29--36.
Margaret Pepe, Gary Longton and Holly Janes (2009) ``Estimation and
Comparison of Receiver Operating Characteristic Curves''. \emph{The
Stata journal} \bold{9}, 1.
Xavier Robin, Natacha Turck, Jean-Charles Sanchez and Markus Müller
(2009) ``Combination of protein biomarkers''. \emph{useR! 2009}, Rennes.
\url{http://www.r-project.org/nosvn/conferences/useR-2009/abstracts/user_author.html}
Xavier Robin, Natacha Turck, Alexandre Hainard, \emph{et al.}
(2011) ``pROC: an open-source package for R and S+ to analyze and
compare ROC curves''. \emph{BMC Bioinformatics}, \bold{7}, 77.
DOI: \href{http://dx.doi.org/10.1186/1471-2105-12-77}{10.1186/1471-2105-12-77}.
E. S. Venkatraman and Colin B. Begg (1996) ``A distribution-free
procedure for comparing receiver operating characteristic curves from
a paired experiment''. \emph{Biometrika} \bold{83}, 835--848.
DOI: \href{http://dx.doi.org/10.1093/biomet/83.4.835}{10.1093/biomet/83.4.835}.
E. S. Venkatraman (2000) ``A Permutation Test to Compare Receiver
Operating Characteristic Curves''. \emph{Biometrics} \bold{56},
1134--1138. DOI: \href{http://dx.doi.org/10.1111/j.0006-341X.2000.01134.x}{10.1111/j.0006-341X.2000.01134.x}.
Hadley Wickham (2011) ``The Split-Apply-Combine Strategy for Data Analysis''. \emph{Journal of Statistical Software}, \bold{40}, 1--29.
URL: \href{http://www.jstatsoft.org/v40/i01}{www.jstatsoft.org/v40/i01}.
}
\seealso{
\code{\link{roc}}, \code{\link{power.roc.test}}
CRAN package \pkg{plyr}, employed in this function.
}
\examples{
data(aSAH)
# Basic example with 2 roc objects
roc1 <- roc(aSAH$outcome, aSAH$s100b)
roc2 <- roc(aSAH$outcome, aSAH$wfns)
roc.test(roc1, roc2)
\dontrun{
# The latter used Delong's test. To use bootstrap test:
roc.test(roc1, roc2, method="bootstrap")
# Increase boot.n for a more precise p-value:
roc.test(roc1, roc2, method="bootstrap", boot.n=10000)
}
# Alternative syntaxes
roc.test(aSAH$outcome, aSAH$s100b, aSAH$wfns)
roc.test(aSAH$outcome, data.frame(aSAH$s100b, aSAH$wfns))
# If we had a good a priori reason to think that wfns gives a
# better classification than s100b (in other words, AUC of roc1
# should be lower than AUC of roc2):
roc.test(roc1, roc2, alternative="less")
\dontrun{
# Comparison can be done on smoothed ROCs
# Smoothing is re-done at each iteration, and execution is slow
roc.test(smooth(roc1), smooth(roc2))
# or:
roc.test(aSAH$outcome, aSAH$s100b, aSAH$wfns, smooth=TRUE, boot.n=100)
}
# or from an AUC (no smoothing)
roc.test(auc(roc1), roc2)
\dontrun{
# Comparison of partial AUC:
roc3 <- roc(aSAH$outcome, aSAH$s100b, partial.auc=c(1, 0.8), partial.auc.focus="se")
roc4 <- roc(aSAH$outcome, aSAH$wfns, partial.auc=c(1, 0.8), partial.auc.focus="se")
roc.test(roc3, roc4)
# This is strictly equivalent to:
roc.test(roc3, roc4, method="bootstrap")
# Alternatively, we could re-use roc1 and roc2 to get the same result:
roc.test(roc1, roc2, reuse.auc=FALSE, partial.auc=c(1, 0.8), partial.auc.focus="se")
# Comparison on specificity and sensitivity
roc.test(roc1, roc2, method="specificity", specificity=0.9)
roc.test(roc1, roc2, method="sensitivity", sensitivity=0.9)
}
# Spurious use of DeLong's test with different direction:
roc5 <- roc(aSAH$outcome, aSAH$s100b, direction="<")
roc6 <- roc(aSAH$outcome, aSAH$s100b, direction=">")
roc.test(roc5, roc6, method="delong")
\dontrun{
# Comparisons of the ROC curves
roc.test(roc1, roc2, method="venkatraman")
}
# Unpaired tests
roc7 <- roc(aSAH$outcome, aSAH$s100b)
# artificially create an roc8 unpaired with roc7
roc8 <- roc(aSAH$outcome[1:100], aSAH$s100b[1:100])
\dontrun{
roc.test(roc7, roc8, paired=FALSE, method="delong")
roc.test(roc7, roc8, paired=FALSE, method="bootstrap")
roc.test(roc7, roc8, paired=FALSE, method="venkatraman")
roc.test(roc7, roc8, paired=FALSE, method="specificity", specificity=0.9)
}
}
\keyword{multivariate}
\keyword{nonparametric}
\keyword{utilities}
\keyword{htest}
\keyword{roc}
|
########################
library(vmstools)
library(plyr)
library (lubridate)
library(rgdal)
library(raster)
library(rgeos)
library(ggplot2)
library("gridExtra")
library(scales)
library(rlang)
year <- 2017
##### -- set directories and load data -- #####
################################################
sysPa <-"Q:\\dfad\\users\\joeg\\home\\VMS\\180924_FishPi_WP5_SSF\\R_workflow\\"
dataPath <- paste(sysPa,"Rdata\\",sep="")
inputpath<- "Q:\\dfad\\data\\Data\\eflalo\\"
res.path<- "Q:\\dfad\\users\\joeg\\home\\VMS\\180924_FishPi_WP5_SSF\\R_workflow\\Results"
map.path<- "Q:\\dfad\\users\\joeg\\home\\VMS\\180924_FishPi_WP5_SSF\\R_workflow\\basemaps"
tacsat <- fread(paste(inputpath,"tacsat2_ais_u_vms_total_", year,".csv",sep=''))
tacsat <- data.frame(tacsat)
tacsat <- formatTacsat(tacsat) # format each of the columns to the specified class
tacsat <- data.table(tacsat)
######### -- subset -- ###########
############################################
# subset eflalo to take only the vessel whe have AIS data for
tacsat_0 <- tacsat
eflalo_0 <- subset(eflalo, tolower(VE_REF) %in% tolower(unique(tacsat_0$VE_REF)))
######### -- explore data -- ###########
############################################
sort(unique(tacsat_0$VE_REF1))
sort(unique(eflalo_0$VE_REF1))
str(tacsat_0)
str(eflalo_0)
head(tacsat_0)
head(eflalo_0)
summary(tacsat_0)
summary(eflalo_0)
############## - cleaning spatial data - ################
##########################################################
# 1. Wrong VMS positions
#####################################################
# VMS positions that are not actually on planet Earth
idx <- which(abs(tacsat_0$SI_LATI) > 90 | abs(tacsat_0$SI_LONG) > 180) #points not on the globe
idx <- unique(c(idx,which(tacsat_0$SI_HE < 0 | tacsat_0$SI_HE > 360))) #adding points with heading outside compass range
length(idx)
# 2. Duplicates
#####################################################
tacsat_0 <- sortTacsat(tacsat_0)
tacsat_0$YEAR <- year(tacsat_0$SI_DATIM)
# we define duplicated with the combination of vessel name + lat + long + DATIM
uniqueTacsat <- paste(tacsat_0$VE_REF,tacsat_0$SI_LATI,tacsat_0$SI_LONG,tacsat_0$SI_DATIM)
print(nrow(tacsat_0)) # n rows total
print(nrow(tacsat_0[duplicated(uniqueTacsat),])) # n rows duplicated
head(tacsat_0[duplicated(uniqueTacsat),]) # duplicated rows, one trip selling in two ports
# get rid of the duplicates
tacsat_0 <- tacsat_0[!duplicated(uniqueTacsat),]
print(nrow(tacsat_0))
#
#3. Points in land
#####################################################
setwd(map.path)
## Read highresolution base map.
country.poly <- readOGR('Europe_costline_poly_wgs84_EEA.shp')
extent(country.poly)
## Create the clipping polygon for our fleet
CP <- as(extent(6,17,53,59), "SpatialPolygons")
proj4string(CP) <- CRS(proj4string(country.poly))
summary(CP)
## Clip the map
coastline_pol <- gIntersection(country.poly, CP, byid=TRUE)
## Convert to Spatial Polygon
country.poly <- SpatialPolygons(coastline_pol@polygons,proj4string=coastline_pol@proj4string)
summary(country.poly)
#Identify points on land
idx <- pointOnLand(tacsat_0,country.poly,proj4string=coastline_pol@proj4string)
table (idx)
tacsat_land <- tacsat_0[which(idx==1),] #points on land
tacsat_sea <- tacsat_0[which(idx==0),] #points not on sea
#4. points in a harbour
#####################################################
data(euharbours)
#points in harbour
idx <- pointInHarbour(lon=tacsat_sea$SI_LONG,lat=tacsat_sea$SI_LATI,harbours=harbours,saveHarbourList=F)
table(idx)
tacsat_har <- tacsat_sea[which(idx==1),] #points on harbour
tacsat_sea$SI_HARB[which(idx==1)] <- "0"
tacsat_sea$SI_HARB[which(idx==0)] <- "1"
tacsat <- tacsat_sea
save(tacsat,file=file.path(dataPath, paste0('tacsat_SI_HARB_', year, '.RData')))
#3. AIS records associated with very high speeds
#####################################################
#We set in 20knots is the maximum speed allowed in the dataset
windows()
hist(tacsat$SI_SP,breaks=40)
spThres <- 20
idx <- which(tacsat$SI_SP > spThres)
tacsat <- tacsat[-idx,]
#4. Anomalous intervals
#####################################################
# define trip (one-day trips. we sum one day to the track)
tacsat$FT_REF <- paste(tacsat$VE_REF1, format(as.Date(tacsat$SI_DATE, format = "%d/%m/%Y")+1, format="%d/%m/%Y"), sep="_")
#sort the data by vessel and time
tacsat <- sortTacsat(tacsat)
#Already have the intervals in seconds, but need to get them in minutes
tacsat$INTV <- tacsat$INTV/60
## Remove pings with interval = 0 and convert NA to 1
tacsat$INTV[is.na(tacsat$INTV)]<-5
tacsat$INTV[which(tacsat$INTV<0)] <- 5
##Identify intervals between one day and the following and set them to 5
tacsat$INTV_0 <- tacsat$INTV
daydif <- diff(as.Date(tacsat$SI_DATE))
daydif <- c(0, daydif)
test <- tacsat$INTV[daydif>1]
tacsat$INTV[daydif>1]<- 5
# INTV between 10 and 30 are set to 10
intlimit <- 30
test <- tacsat[tacsat$INTV>intlimit]
length(tacsat$INTV[tacsat$INTV>10 & tacsat$INTV<intlimit])
length(tacsat$INTV[tacsat$INTV>10 & tacsat$INTV<intlimit])/length(tacsat$INTV) #1.8% #0%
tacsat$INTV[tacsat$INTV>10 & tacsat$INTV<intlimit] <- 5
# INTV higher than 30 are identified as holes and are set to 5 so that the do not interfer the catch alocation
length(tacsat$INTV[tacsat$INTV>intlimit])
length(tacsat$INTV[tacsat$INTV>intlimit])/length(tacsat$INTV)
tacsat$hole <- "no"
tacsat$hole[tacsat$INTV>intlimit] <- "yes"
tacsat$INTV[tacsat$INTV>intlimit] <- 5
tacsat_tot <- tacsat
tacsat <- tacsat[tacsat$SI_HARB==1,]
# Calculate the number of pings per trip
nping_trip<- table(tacsat$FT_REF)
tacsat$Nping_trip <- nping_trip[match(tacsat$FT_REF, names(nping_trip))]
#5. Identify preliminary fishing activity
#####################################################
speedmin<- 0.1
speedmax<- 4
tacsat$SI_STATE <- "0"
tacsat$SI_STATE[tacsat$SI_SP>=speedmin & tacsat$SI_SP<speedmax] <- "1"
############## - Save data - ################
############################################
save(tacsat,file=file.path(dataPath, paste0('tacsatClean_', year, '.RData')))
setwd(dataPath)
save(tacsat, tacsat_land, tacsat_har, harbours, coastline_pol, file="Cleantacsat.RData")
load("Cleantacsat.RData")
############## - Summary tables - ################
##################################################
setwd(res.path)
sumtab<- ddply(tacsat, .(VE_REF1,FT_REF), summarise,
Nping=length(VE_REF1),
NpingFishing=length(SI_STATE[SI_STATE=="1"]),
Nhole=length(hole[hole=="yes"]),
Timetrack=round(sum(INTV/60)),
Timefishing=round(sum((INTV[SI_STATE=="1"])/60)),
Timehole=round(sum(INTV_0[hole=="yes"]/60)))
write.table(sumtab, "Tacsat trip summary.csv", row.names = FALSE, sep=",", dec=".")
sumtab_vessel<- ddply(tacsat, .(VE_REF1), summarise,
Ntrips=length(unique(FT_REF)),
Ntrips_more5ping=length(unique(FT_REF[Nping_trip>5])),
Ntrips_fishing=length(unique(FT_REF[SI_STATE=="1"])),
Ntrips_fishing_more5ping=length(unique(FT_REF[Nping_trip>5 & SI_STATE=="1"])))
write.table(sumtab_vessel, "Tacsat vessel summary.csv", row.names = FALSE, sep=",", dec=".")
############## - Exploratory Plots - ################
#########################################
#plot different ports
#######
png(filename="0_Port_Position.png", width=900, height =500)
crswgs84=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
plot(coastline_pol, lwd=1, xlim=c(6,17), ylim=c(53, 59))
points(x=as.numeric(harbours$lon),
y=as.numeric(harbours$lat), col="blue", pch=19)
title("Port Position")
dev.off()
#plot tracks for every vessel
####################################
setwd(res.path)
for (vessel in unique(tacsat$VE_REF1)){
tacsat_sea_temp <- subset(tacsat, VE_REF1==vessel )
tacsat_land_temp <- subset(tacsat_land, VE_REF1==vessel )
tacsat_har_temp <- subset(tacsat_har, VE_REF1==vessel )
tacsat_tot_temp <- subset(tacsat_0, VE_REF1==vessel)
ves_ylim <- c(min(tacsat_tot_temp$SI_LATI)-0.01, max(tacsat_tot_temp$SI_LATI))
ves_xlim <- c(min(tacsat_tot_temp$SI_LONG), max(tacsat_tot_temp$SI_LONG))
cex.val=0.5
#windows()
map1 <- ggplot() +
ggspatial::geom_spatial(data = coastline_pol, fill = "lightgrey", colour = "black", cex=cex.val) +
geom_point(data = tacsat_sea_temp, mapping = aes(x = SI_LONG, y = SI_LATI), col="black", shape=21, cex=cex.val) +
geom_point(data = tacsat_sea_temp[tacsat_sea_temp$SI_STATE=="f",], mapping = aes(x = SI_LONG, y = SI_LATI), shape=21, col="green", cex=cex.val) +
geom_point(data = tacsat_land_temp, mapping = aes(x = SI_LONG, y = SI_LATI), col="brown", shape=21, cex=cex.val) +
geom_point(data = tacsat_har_temp, mapping = aes(x = SI_LONG, y = SI_LATI), col="blue", shape=21, cex=cex.val) +
geom_point(data = harbours, mapping = aes(x = lon, y = lat), shape=16, col="red", cex=cex.val+2) +
#theme_void() +
coord_map(xlim = ves_xlim,ylim = ves_ylim)+
ggtitle(vessel)+
theme(plot.title = element_text(hjust = 0.5))
ggsave(map1, filename = paste(vessel, ".png"), width = 10, height = 10)
}
# plot tracks near ports
####################################
# subset harbours
har <- subset(harbours, lon>(6) & lon<(17) & lat>53 &lat<59 )
# plot
for (port in unique(har$harbour)){
har_temp <- subset(har, harbour==port )
xlim <- har_temp$lon+ c(-0.05,0.05)
ylim <- har_temp$lat+ c(-0.05,0.05)
cex.val=0.5
## subset the traks
tacsat_sea_port <- subset(tacsat, SI_LONG>xlim[1] & SI_LONG<xlim[2] & SI_LATI>ylim[1] &SI_LATI<ylim[2] )
map2 <- ggplot() +
ggspatial::geom_spatial(data = coastline_pol, fill = "lightgrey", colour = "black") +
geom_point(data = tacsat_sea_port, mapping = aes(x = SI_LONG, y = SI_LATI), shape=21,col="black", cex=cex.val) +
geom_point(data = tacsat_sea_port[tacsat_sea_port$SI_STATE=="f",], mapping = aes(x = SI_LONG, y = SI_LATI), shape=21, col="green", cex=cex.val) +
geom_point(data = tacsat_har, mapping = aes(x = SI_LONG, y = SI_LATI), shape=21, col="blue", cex=cex.val) +
geom_point(data = har, mapping = aes(x = lon, y = lat), shape=16, col="red", cex=cex.val+2) +
geom_point(data = har_temp, mapping = aes(x = lon, y = lat), shape=16, col="red", cex=cex.val+4) +
#theme_void() +
coord_map(xlim = har_temp$lon+ c(-0.05,0.05),ylim = har_temp$lat+ c(-0.05,0.05)) +
ggtitle(port)+
theme(plot.title = element_text(hjust = 0.5))
ggsave(map2, filename = paste("port_", port, ".png", sep=""), width = 10, height = 10)
}
#plot all trips per vessel
####################################
sort(unique(tacsat$VE_REF1))
for (vessel in unique(tacsat$VE_REF1)){
#vessel <- "abelan uno"
temp <- subset(tacsat, VE_REF1==vessel)
cex.val=1
for (trip in unique(temp$FT_REF )){
temp_trip <- subset(temp, FT_REF==trip )
ves_ylim <- c(min(temp_trip$SI_LATI)-0.01, max(temp_trip$SI_LATI)+0.01)
ves_xlim <- c(min(temp_trip$SI_LONG)-0.01, max(temp_trip$SI_LONG)+0.01)
map3 <- ggplot() +
ggspatial::geom_spatial(data = coastline_pol, fill = "lightgrey", colour = "black", cex=cex.val) +
geom_point(data = temp_trip, mapping = aes(x = SI_LONG, y = SI_LATI), col="black", shape=16, cex=cex.val) +
geom_point(data = temp_trip[temp_trip$SI_STATE=="f",], mapping = aes(x = SI_LONG, y = SI_LATI), shape=16, col="green", cex=cex.val) +
geom_point(data = harbours, mapping = aes(x = lon, y = lat), shape=16, col="red", cex=cex.val+2) +
#theme_void() +
coord_map(xlim = ves_xlim,ylim = ves_ylim)+
ggtitle(trip)+
theme(plot.title = element_text(hjust = 0.5))
speed1<- ggplot(data = temp_trip, mapping = aes(x = SI_DATIM, y = SI_SP), col="black") +
geom_line()+
geom_point()+
geom_point(data = temp_trip[temp_trip$SI_STATE=="f",], mapping = aes(x = SI_DATIM, y = SI_SP), col="green")+
ylim(0, max(temp_trip$SI_SP))+
ggtitle(trip)+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
theme(plot.title = element_text(hjust = 0.5))
p1<- grid.arrange(map3, speed1, ncol=2)
ggsave(p1, filename = paste(gsub("/","_", trip), ".png", sep=""), width = 20, height = 10)
}
}
#plot speed profile per vessel
####################################
sort(unique(tacsat$VE_REF1))
for (vessel in unique(tacsat$VE_REF1)){
temp_vessel <- subset(temp, VE_REF1==vessel )
speed1<- ggplot(data = temp_vessel, mapping = aes(x = SI_DATIM, y = SI_SP), col="black") +
geom_line()+
geom_point()+
geom_point(data = temp_vessel[temp_vessel$SI_STATE=="f",], mapping = aes(x = SI_DATIM, y = SI_SP), col="green")+
ylim(0, max(temp_vessel$SI_SP))+
ggtitle(vessel)+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
theme(plot.title = element_text(hjust = 0.5))
ggsave(speed1, filename = paste("speed_", vessel, ".png", sep=""), width = 20, height = 10)
}
#####
| /WP5/scriptsDanishCaseStudy/2_Clean tacsat_JE.R | no_license | ices-tools-dev/FishPi2 | R | false | false | 13,337 | r | ########################
library(vmstools)
library(plyr)
library (lubridate)
library(rgdal)
library(raster)
library(rgeos)
library(ggplot2)
library("gridExtra")
library(scales)
library(rlang)
year <- 2017
##### -- set directories and load data -- #####
################################################
sysPa <-"Q:\\dfad\\users\\joeg\\home\\VMS\\180924_FishPi_WP5_SSF\\R_workflow\\"
dataPath <- paste(sysPa,"Rdata\\",sep="")
inputpath<- "Q:\\dfad\\data\\Data\\eflalo\\"
res.path<- "Q:\\dfad\\users\\joeg\\home\\VMS\\180924_FishPi_WP5_SSF\\R_workflow\\Results"
map.path<- "Q:\\dfad\\users\\joeg\\home\\VMS\\180924_FishPi_WP5_SSF\\R_workflow\\basemaps"
tacsat <- fread(paste(inputpath,"tacsat2_ais_u_vms_total_", year,".csv",sep=''))
tacsat <- data.frame(tacsat)
tacsat <- formatTacsat(tacsat) # format each of the columns to the specified class
tacsat <- data.table(tacsat)
######### -- subset -- ###########
############################################
# subset eflalo to take only the vessel whe have AIS data for
tacsat_0 <- tacsat
eflalo_0 <- subset(eflalo, tolower(VE_REF) %in% tolower(unique(tacsat_0$VE_REF)))
######### -- explore data -- ###########
############################################
sort(unique(tacsat_0$VE_REF1))
sort(unique(eflalo_0$VE_REF1))
str(tacsat_0)
str(eflalo_0)
head(tacsat_0)
head(eflalo_0)
summary(tacsat_0)
summary(eflalo_0)
############## - cleaning spatial data - ################
##########################################################
# 1. Wrong VMS positions
#####################################################
# VMS positions that are not actually on planet Earth
idx <- which(abs(tacsat_0$SI_LATI) > 90 | abs(tacsat_0$SI_LONG) > 180) #points not on the globe
idx <- unique(c(idx,which(tacsat_0$SI_HE < 0 | tacsat_0$SI_HE > 360))) #adding points with heading outside compass range
length(idx)
# 2. Duplicates
#####################################################
tacsat_0 <- sortTacsat(tacsat_0)
tacsat_0$YEAR <- year(tacsat_0$SI_DATIM)
# we define duplicated with the combination of vessel name + lat + long + DATIM
uniqueTacsat <- paste(tacsat_0$VE_REF,tacsat_0$SI_LATI,tacsat_0$SI_LONG,tacsat_0$SI_DATIM)
print(nrow(tacsat_0)) # n rows total
print(nrow(tacsat_0[duplicated(uniqueTacsat),])) # n rows duplicated
head(tacsat_0[duplicated(uniqueTacsat),]) # duplicated rows, one trip selling in two ports
# get rid of the duplicates
tacsat_0 <- tacsat_0[!duplicated(uniqueTacsat),]
print(nrow(tacsat_0))
#
#3. Points in land
#####################################################
setwd(map.path)
## Read highresolution base map.
country.poly <- readOGR('Europe_costline_poly_wgs84_EEA.shp')
extent(country.poly)
## Create the clipping polygon for our fleet
CP <- as(extent(6,17,53,59), "SpatialPolygons")
proj4string(CP) <- CRS(proj4string(country.poly))
summary(CP)
## Clip the map
coastline_pol <- gIntersection(country.poly, CP, byid=TRUE)
## Convert to Spatial Polygon
country.poly <- SpatialPolygons(coastline_pol@polygons,proj4string=coastline_pol@proj4string)
summary(country.poly)
#Identify points on land
idx <- pointOnLand(tacsat_0,country.poly,proj4string=coastline_pol@proj4string)
table (idx)
tacsat_land <- tacsat_0[which(idx==1),] #points on land
tacsat_sea <- tacsat_0[which(idx==0),] #points not on sea
#4. points in a harbour
#####################################################
data(euharbours)
#points in harbour
idx <- pointInHarbour(lon=tacsat_sea$SI_LONG,lat=tacsat_sea$SI_LATI,harbours=harbours,saveHarbourList=F)
table(idx)
tacsat_har <- tacsat_sea[which(idx==1),] #points on harbour
tacsat_sea$SI_HARB[which(idx==1)] <- "0"
tacsat_sea$SI_HARB[which(idx==0)] <- "1"
tacsat <- tacsat_sea
save(tacsat,file=file.path(dataPath, paste0('tacsat_SI_HARB_', year, '.RData')))
#3. AIS records associated with very high speeds
#####################################################
#We set in 20knots is the maximum speed allowed in the dataset
windows()
hist(tacsat$SI_SP,breaks=40)
spThres <- 20
idx <- which(tacsat$SI_SP > spThres)
tacsat <- tacsat[-idx,]
#4. Anomalous intervals
#####################################################
# define trip (one-day trips. we sum one day to the track)
tacsat$FT_REF <- paste(tacsat$VE_REF1, format(as.Date(tacsat$SI_DATE, format = "%d/%m/%Y")+1, format="%d/%m/%Y"), sep="_")
#sort the data by vessel and time
tacsat <- sortTacsat(tacsat)
#Already have the intervals in seconds, but need to get them in minutes
tacsat$INTV <- tacsat$INTV/60
## Remove pings with interval = 0 and convert NA to 1
tacsat$INTV[is.na(tacsat$INTV)]<-5
tacsat$INTV[which(tacsat$INTV<0)] <- 5
##Identify intervals between one day and the following and set them to 5
tacsat$INTV_0 <- tacsat$INTV
daydif <- diff(as.Date(tacsat$SI_DATE))
daydif <- c(0, daydif)
test <- tacsat$INTV[daydif>1]
tacsat$INTV[daydif>1]<- 5
# INTV between 10 and 30 are set to 10
intlimit <- 30
test <- tacsat[tacsat$INTV>intlimit]
length(tacsat$INTV[tacsat$INTV>10 & tacsat$INTV<intlimit])
length(tacsat$INTV[tacsat$INTV>10 & tacsat$INTV<intlimit])/length(tacsat$INTV) #1.8% #0%
tacsat$INTV[tacsat$INTV>10 & tacsat$INTV<intlimit] <- 5
# INTV higher than 30 are identified as holes and are set to 5 so that the do not interfer the catch alocation
length(tacsat$INTV[tacsat$INTV>intlimit])
length(tacsat$INTV[tacsat$INTV>intlimit])/length(tacsat$INTV)
tacsat$hole <- "no"
tacsat$hole[tacsat$INTV>intlimit] <- "yes"
tacsat$INTV[tacsat$INTV>intlimit] <- 5
tacsat_tot <- tacsat
tacsat <- tacsat[tacsat$SI_HARB==1,]
# Calculate the number of pings per trip
nping_trip<- table(tacsat$FT_REF)
tacsat$Nping_trip <- nping_trip[match(tacsat$FT_REF, names(nping_trip))]
#5. Identify preliminary fishing activity
#####################################################
speedmin<- 0.1
speedmax<- 4
tacsat$SI_STATE <- "0"
tacsat$SI_STATE[tacsat$SI_SP>=speedmin & tacsat$SI_SP<speedmax] <- "1"
############## - Save data - ################
############################################
save(tacsat,file=file.path(dataPath, paste0('tacsatClean_', year, '.RData')))
setwd(dataPath)
save(tacsat, tacsat_land, tacsat_har, harbours, coastline_pol, file="Cleantacsat.RData")
load("Cleantacsat.RData")
############## - Summary tables - ################
##################################################
setwd(res.path)
sumtab<- ddply(tacsat, .(VE_REF1,FT_REF), summarise,
Nping=length(VE_REF1),
NpingFishing=length(SI_STATE[SI_STATE=="1"]),
Nhole=length(hole[hole=="yes"]),
Timetrack=round(sum(INTV/60)),
Timefishing=round(sum((INTV[SI_STATE=="1"])/60)),
Timehole=round(sum(INTV_0[hole=="yes"]/60)))
write.table(sumtab, "Tacsat trip summary.csv", row.names = FALSE, sep=",", dec=".")
sumtab_vessel<- ddply(tacsat, .(VE_REF1), summarise,
Ntrips=length(unique(FT_REF)),
Ntrips_more5ping=length(unique(FT_REF[Nping_trip>5])),
Ntrips_fishing=length(unique(FT_REF[SI_STATE=="1"])),
Ntrips_fishing_more5ping=length(unique(FT_REF[Nping_trip>5 & SI_STATE=="1"])))
write.table(sumtab_vessel, "Tacsat vessel summary.csv", row.names = FALSE, sep=",", dec=".")
############## - Exploratory Plots - ################
#########################################
#plot different ports
#######
png(filename="0_Port_Position.png", width=900, height =500)
crswgs84=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
plot(coastline_pol, lwd=1, xlim=c(6,17), ylim=c(53, 59))
points(x=as.numeric(harbours$lon),
y=as.numeric(harbours$lat), col="blue", pch=19)
title("Port Position")
dev.off()
#plot tracks for every vessel
####################################
setwd(res.path)
for (vessel in unique(tacsat$VE_REF1)){
tacsat_sea_temp <- subset(tacsat, VE_REF1==vessel )
tacsat_land_temp <- subset(tacsat_land, VE_REF1==vessel )
tacsat_har_temp <- subset(tacsat_har, VE_REF1==vessel )
tacsat_tot_temp <- subset(tacsat_0, VE_REF1==vessel)
ves_ylim <- c(min(tacsat_tot_temp$SI_LATI)-0.01, max(tacsat_tot_temp$SI_LATI))
ves_xlim <- c(min(tacsat_tot_temp$SI_LONG), max(tacsat_tot_temp$SI_LONG))
cex.val=0.5
#windows()
map1 <- ggplot() +
ggspatial::geom_spatial(data = coastline_pol, fill = "lightgrey", colour = "black", cex=cex.val) +
geom_point(data = tacsat_sea_temp, mapping = aes(x = SI_LONG, y = SI_LATI), col="black", shape=21, cex=cex.val) +
geom_point(data = tacsat_sea_temp[tacsat_sea_temp$SI_STATE=="f",], mapping = aes(x = SI_LONG, y = SI_LATI), shape=21, col="green", cex=cex.val) +
geom_point(data = tacsat_land_temp, mapping = aes(x = SI_LONG, y = SI_LATI), col="brown", shape=21, cex=cex.val) +
geom_point(data = tacsat_har_temp, mapping = aes(x = SI_LONG, y = SI_LATI), col="blue", shape=21, cex=cex.val) +
geom_point(data = harbours, mapping = aes(x = lon, y = lat), shape=16, col="red", cex=cex.val+2) +
#theme_void() +
coord_map(xlim = ves_xlim,ylim = ves_ylim)+
ggtitle(vessel)+
theme(plot.title = element_text(hjust = 0.5))
ggsave(map1, filename = paste(vessel, ".png"), width = 10, height = 10)
}
# plot tracks near ports
####################################
# subset harbours
har <- subset(harbours, lon>(6) & lon<(17) & lat>53 &lat<59 )
# plot
for (port in unique(har$harbour)){
har_temp <- subset(har, harbour==port )
xlim <- har_temp$lon+ c(-0.05,0.05)
ylim <- har_temp$lat+ c(-0.05,0.05)
cex.val=0.5
## subset the traks
tacsat_sea_port <- subset(tacsat, SI_LONG>xlim[1] & SI_LONG<xlim[2] & SI_LATI>ylim[1] &SI_LATI<ylim[2] )
map2 <- ggplot() +
ggspatial::geom_spatial(data = coastline_pol, fill = "lightgrey", colour = "black") +
geom_point(data = tacsat_sea_port, mapping = aes(x = SI_LONG, y = SI_LATI), shape=21,col="black", cex=cex.val) +
geom_point(data = tacsat_sea_port[tacsat_sea_port$SI_STATE=="f",], mapping = aes(x = SI_LONG, y = SI_LATI), shape=21, col="green", cex=cex.val) +
geom_point(data = tacsat_har, mapping = aes(x = SI_LONG, y = SI_LATI), shape=21, col="blue", cex=cex.val) +
geom_point(data = har, mapping = aes(x = lon, y = lat), shape=16, col="red", cex=cex.val+2) +
geom_point(data = har_temp, mapping = aes(x = lon, y = lat), shape=16, col="red", cex=cex.val+4) +
#theme_void() +
coord_map(xlim = har_temp$lon+ c(-0.05,0.05),ylim = har_temp$lat+ c(-0.05,0.05)) +
ggtitle(port)+
theme(plot.title = element_text(hjust = 0.5))
ggsave(map2, filename = paste("port_", port, ".png", sep=""), width = 10, height = 10)
}
#plot all trips per vessel
####################################
sort(unique(tacsat$VE_REF1))
for (vessel in unique(tacsat$VE_REF1)){
#vessel <- "abelan uno"
temp <- subset(tacsat, VE_REF1==vessel)
cex.val=1
for (trip in unique(temp$FT_REF )){
temp_trip <- subset(temp, FT_REF==trip )
ves_ylim <- c(min(temp_trip$SI_LATI)-0.01, max(temp_trip$SI_LATI)+0.01)
ves_xlim <- c(min(temp_trip$SI_LONG)-0.01, max(temp_trip$SI_LONG)+0.01)
map3 <- ggplot() +
ggspatial::geom_spatial(data = coastline_pol, fill = "lightgrey", colour = "black", cex=cex.val) +
geom_point(data = temp_trip, mapping = aes(x = SI_LONG, y = SI_LATI), col="black", shape=16, cex=cex.val) +
geom_point(data = temp_trip[temp_trip$SI_STATE=="f",], mapping = aes(x = SI_LONG, y = SI_LATI), shape=16, col="green", cex=cex.val) +
geom_point(data = harbours, mapping = aes(x = lon, y = lat), shape=16, col="red", cex=cex.val+2) +
#theme_void() +
coord_map(xlim = ves_xlim,ylim = ves_ylim)+
ggtitle(trip)+
theme(plot.title = element_text(hjust = 0.5))
speed1<- ggplot(data = temp_trip, mapping = aes(x = SI_DATIM, y = SI_SP), col="black") +
geom_line()+
geom_point()+
geom_point(data = temp_trip[temp_trip$SI_STATE=="f",], mapping = aes(x = SI_DATIM, y = SI_SP), col="green")+
ylim(0, max(temp_trip$SI_SP))+
ggtitle(trip)+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
theme(plot.title = element_text(hjust = 0.5))
p1<- grid.arrange(map3, speed1, ncol=2)
ggsave(p1, filename = paste(gsub("/","_", trip), ".png", sep=""), width = 20, height = 10)
}
}
#plot speed profile per vessel
####################################
sort(unique(tacsat$VE_REF1))
for (vessel in unique(tacsat$VE_REF1)){
temp_vessel <- subset(temp, VE_REF1==vessel )
speed1<- ggplot(data = temp_vessel, mapping = aes(x = SI_DATIM, y = SI_SP), col="black") +
geom_line()+
geom_point()+
geom_point(data = temp_vessel[temp_vessel$SI_STATE=="f",], mapping = aes(x = SI_DATIM, y = SI_SP), col="green")+
ylim(0, max(temp_vessel$SI_SP))+
ggtitle(vessel)+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
theme(plot.title = element_text(hjust = 0.5))
ggsave(speed1, filename = paste("speed_", vessel, ".png", sep=""), width = 20, height = 10)
}
#####
|
library(ape)
testtree <- read.tree("12335_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="12335_0_unrooted.txt") | /codeml_files/newick_trees_processed_and_cleaned/12335_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("12335_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="12335_0_unrooted.txt") |
##read in the data from the "household_power_consumption.txt" file
power = read.table("household_power_consumption.txt", header = TRUE, sep = ";")
##subset out the required dates ie 1st and 2nd February 2007
power2 = subset(power, Date == "1/2/2007" | Date == "2/2/2007")
##convert factor variables to numeric
power2$Global_active_power = as.numeric(as.character(power2$Global_active_power))
power2$Global_reactive_power = as.numeric(as.character(power2$Global_reactive_power))
power2$Voltage = as.numeric(as.character(power2$Voltage))
## paste the Date and Time variables together with 'paste'
library(stringr)
power2$date_time = paste(power2$Date, power2$Time)
## convert date_time to a POSIXct variable with the dmy_hms function from the lubridate package
library(lubridate)
power2$date_time = dmy_hms(power2$date_time)
## convert sub_metering_x to numeric
power2$sub1 = as.numeric(as.character(power2$Sub_metering_1))
power2$sub2 = as.numeric(as.character(power2$Sub_metering_2))
power2$sub3 = power2$Sub_metering_3
##save plot command
png("plot4.png", width=480, height=480)
## set the number of rows and columns for the plots
par(mfrow = c(2,2))
##plot
with (power2, {
##row 1, col 1 plot
plot(date_time, Global_active_power, pch=NA_integer_ , ylab = "Global Active Power (kilowatts)", xlab = "")
lines(date_time, Global_active_power)
##row 1, col 2 plot
plot(date_time, Voltage, pch=NA_integer_ , ylab = "Voltage", xlab = "datetime")
lines(date_time, Voltage)
##row 2, col 1 plot
plot(date_time, sub1, pch = NA_integer_, ylab = "Energy Sub Metering", xlab = "")
lines(date_time, sub1)
lines(date_time, sub2, col="red")
lines(date_time, sub3, col="blue")
legend("topright", lty="solid", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
##row 2, col 2 plot
plot(date_time, Global_reactive_power, pch=NA_integer_ , ylab = "Global_reactive_power", xlab = "datetime")
lines(date_time, Global_reactive_power)
})
dev.off() | /plot4.R | no_license | vijay7979/ExData_Plotting1 | R | false | false | 2,016 | r | ##read in the data from the "household_power_consumption.txt" file
power = read.table("household_power_consumption.txt", header = TRUE, sep = ";")
##subset out the required dates ie 1st and 2nd February 2007
power2 = subset(power, Date == "1/2/2007" | Date == "2/2/2007")
##convert factor variables to numeric
power2$Global_active_power = as.numeric(as.character(power2$Global_active_power))
power2$Global_reactive_power = as.numeric(as.character(power2$Global_reactive_power))
power2$Voltage = as.numeric(as.character(power2$Voltage))
## paste the Date and Time variables together with 'paste'
library(stringr)
power2$date_time = paste(power2$Date, power2$Time)
## convert date_time to a POSIXct variable with the dmy_hms function from the lubridate package
library(lubridate)
power2$date_time = dmy_hms(power2$date_time)
## convert sub_metering_x to numeric
power2$sub1 = as.numeric(as.character(power2$Sub_metering_1))
power2$sub2 = as.numeric(as.character(power2$Sub_metering_2))
power2$sub3 = power2$Sub_metering_3
##save plot command
png("plot4.png", width=480, height=480)
## set the number of rows and columns for the plots
par(mfrow = c(2,2))
##plot
with (power2, {
##row 1, col 1 plot
plot(date_time, Global_active_power, pch=NA_integer_ , ylab = "Global Active Power (kilowatts)", xlab = "")
lines(date_time, Global_active_power)
##row 1, col 2 plot
plot(date_time, Voltage, pch=NA_integer_ , ylab = "Voltage", xlab = "datetime")
lines(date_time, Voltage)
##row 2, col 1 plot
plot(date_time, sub1, pch = NA_integer_, ylab = "Energy Sub Metering", xlab = "")
lines(date_time, sub1)
lines(date_time, sub2, col="red")
lines(date_time, sub3, col="blue")
legend("topright", lty="solid", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
##row 2, col 2 plot
plot(date_time, Global_reactive_power, pch=NA_integer_ , ylab = "Global_reactive_power", xlab = "datetime")
lines(date_time, Global_reactive_power)
})
dev.off() |
otus<-read.table("human_pathway.tsv",sep = "\t",header = T,row.names = 1)
map<-read.table("mappingfile_all.txt",sep = "\t",header=T,row.names=1)
tax<-read.table(file= "annotation_fish.tsv",header = T,row.names = 1,sep = "\t",quote = "")
otus_m<-as.matrix(otus)
tax_m<-as.matrix(tax)
otus_p = otu_table(otus_m, taxa_are_rows = TRUE)
tax_p = tax_table(tax_m)
map_p<-sample_data(map)
unfiltered <- merge_phyloseq(otus_p, map_p, tax_p)
save(unfiltered,file= "pathway_physeq_raw.data")
core_VSL<-unfiltered
otu_table(core_VSL) <- otu_table(core_VSL) + 1
myfunction <- function(core_VSL, Type,core_VSL_out,core_VSL_filter_out){
type<-as.formula(paste("~",Type,sep = ""))
type_group = get_variable(core_VSL, Type)
core_VS_ds = phyloseq_to_deseq2(core_VSL, type)
core_VS_ds<-estimateSizeFactors(core_VS_ds)
core_VS_ds<-estimateDispersions(core_VS_ds, fitType="local")
core_VS_ds<-varianceStabilizingTransformation(core_VS_ds, fitType="local")
counts_core_VSL_tt <- as.matrix(assay(core_VS_ds)) # assay() access transformed data matrix from DESeq2 objct
core_VSL_tt<-core_VSL
otu_table(core_VSL_tt) <- otu_table(counts_core_VSL_tt, taxa_are_rows = TRUE)
type_group = get_variable(core_VSL, "sub_Treat")
core_VS_ds = phyloseq_to_deseq2(core_VSL, type)
dds_mtx = DESeq(core_VS_ds, fitType = "local", test = "Wald", betaPrior = FALSE)
res_mtx = results(dds_mtx, cooksCutoff = FALSE)
pwy_rank = names(sort(taxa_sums(core_VSL_tt), decreasing=TRUE))
pwy_rkmx<-cbind(pwy_rank,c(1:length(pwy_rank)))
colnames(pwy_rkmx)<-c("pathway","rank")
pwy_rank_phy = prune_taxa(pwy_rank, core_VSL_tt)
taxon<-tax_table(core_VSL_tt)
taxon<-as.data.frame(taxon)
taxon$pwy_name<-row.names(taxon)
taxon_rank<-merge(taxon,pwy_rkmx,by.x="pwy_name",by.y="pathway",all.x=T)
res_mtx_df <- data.frame(res_mtx)
res_mtx_df$pathway<-row.names(res_mtx_df)
res_mtx_ant <- merge(res_mtx_df,taxon_rank,by.x="pathway",by.y="pwy_name")
write.table(res_mtx_ant,file=core_VSL_out,sep="\t",row.names = F,quote = F)
alpha=0.05
sigtab_mtx = res_mtx_ant[which(res_mtx_ant$padj < alpha), ]
write.table(sigtab_mtx,file=core_VSL_filter_out,sep="\t",row.names = F,quote = F)
}
hu0_hu7<-subset_samples(core_VSL, Treat == "Human")
myfunction(hu0_hu7,"sub_Treat","hu0_hu7_log2FC.txt","hu0_hu7_log2FC_filtered")
pathway = read.table("hu0_hu7_log2FC_filtered.txt",header=T,sep="\t")
pp = ggplot(pathway,aes(log2FoldChange,Pathway))
pp + geom_point()
pp + geom_point(aes(size=Abundance))
pbubble = pp + geom_point(aes(size=Abundance,color=Qvalue))
pbubble + scale_colour_gradient(low="darkseagreen1",high="deepskyblue2")
| /Supp_Figures/FIG_S4G.r | no_license | Dayueban/Host-dependent-co-evolution-of-supplemented-probiotic | R | false | false | 2,606 | r | otus<-read.table("human_pathway.tsv",sep = "\t",header = T,row.names = 1)
map<-read.table("mappingfile_all.txt",sep = "\t",header=T,row.names=1)
tax<-read.table(file= "annotation_fish.tsv",header = T,row.names = 1,sep = "\t",quote = "")
otus_m<-as.matrix(otus)
tax_m<-as.matrix(tax)
otus_p = otu_table(otus_m, taxa_are_rows = TRUE)
tax_p = tax_table(tax_m)
map_p<-sample_data(map)
unfiltered <- merge_phyloseq(otus_p, map_p, tax_p)
save(unfiltered,file= "pathway_physeq_raw.data")
core_VSL<-unfiltered
otu_table(core_VSL) <- otu_table(core_VSL) + 1
myfunction <- function(core_VSL, Type,core_VSL_out,core_VSL_filter_out){
type<-as.formula(paste("~",Type,sep = ""))
type_group = get_variable(core_VSL, Type)
core_VS_ds = phyloseq_to_deseq2(core_VSL, type)
core_VS_ds<-estimateSizeFactors(core_VS_ds)
core_VS_ds<-estimateDispersions(core_VS_ds, fitType="local")
core_VS_ds<-varianceStabilizingTransformation(core_VS_ds, fitType="local")
counts_core_VSL_tt <- as.matrix(assay(core_VS_ds)) # assay() access transformed data matrix from DESeq2 objct
core_VSL_tt<-core_VSL
otu_table(core_VSL_tt) <- otu_table(counts_core_VSL_tt, taxa_are_rows = TRUE)
type_group = get_variable(core_VSL, "sub_Treat")
core_VS_ds = phyloseq_to_deseq2(core_VSL, type)
dds_mtx = DESeq(core_VS_ds, fitType = "local", test = "Wald", betaPrior = FALSE)
res_mtx = results(dds_mtx, cooksCutoff = FALSE)
pwy_rank = names(sort(taxa_sums(core_VSL_tt), decreasing=TRUE))
pwy_rkmx<-cbind(pwy_rank,c(1:length(pwy_rank)))
colnames(pwy_rkmx)<-c("pathway","rank")
pwy_rank_phy = prune_taxa(pwy_rank, core_VSL_tt)
taxon<-tax_table(core_VSL_tt)
taxon<-as.data.frame(taxon)
taxon$pwy_name<-row.names(taxon)
taxon_rank<-merge(taxon,pwy_rkmx,by.x="pwy_name",by.y="pathway",all.x=T)
res_mtx_df <- data.frame(res_mtx)
res_mtx_df$pathway<-row.names(res_mtx_df)
res_mtx_ant <- merge(res_mtx_df,taxon_rank,by.x="pathway",by.y="pwy_name")
write.table(res_mtx_ant,file=core_VSL_out,sep="\t",row.names = F,quote = F)
alpha=0.05
sigtab_mtx = res_mtx_ant[which(res_mtx_ant$padj < alpha), ]
write.table(sigtab_mtx,file=core_VSL_filter_out,sep="\t",row.names = F,quote = F)
}
hu0_hu7<-subset_samples(core_VSL, Treat == "Human")
myfunction(hu0_hu7,"sub_Treat","hu0_hu7_log2FC.txt","hu0_hu7_log2FC_filtered")
pathway = read.table("hu0_hu7_log2FC_filtered.txt",header=T,sep="\t")
pp = ggplot(pathway,aes(log2FoldChange,Pathway))
pp + geom_point()
pp + geom_point(aes(size=Abundance))
pbubble = pp + geom_point(aes(size=Abundance,color=Qvalue))
pbubble + scale_colour_gradient(low="darkseagreen1",high="deepskyblue2")
|
# R script to map interaction eQTL for eGenes using adjusted expression data
# /apps/well/R/3.3.0/bin/Rscript --vanilla R.eQTL.r "Disease"
mod <- "Disease"
# mod <- "SCORE.1.Emirati.0.Non"
# load R packages
# suppressMessages(library(lme4))
# load necessary data
load("/well/jknight/AbuDhabiRNA/eQTL/eQTL.25PCs.RData")
# exprs
# pairs
# geno
# info
sig.eQTL <- read.delim("/well/jknight/AbuDhabiRNA/eQTL/Results/cis/Peak_sig_eQTL_empP.txt",
sep="\t", stringsAsFactors=F)
sig.eQTL <- readRDS("/well/jknight/AbuDhabiRNA/eQTL/Results/cis/Results_peak_final.rds")
sig.eQTL$Gene <- as.character(sig.eQTL$Gene)
sig.eQTL$SNP <- as.character(sig.eQTL$SNP)
# output directory
outputdir <- "/well/jknight/AbuDhabiRNA/eQTL/Results/cis/Interactions/"
# Test each gene
irange <- 1:nrow(sig.eQTL)
results <- do.call(rbind, lapply(irange, function(x){
print(x)
# define the gene being tested
genename <- sig.eQTL$Gene[x]
geneexprs <- exprs[match(genename, rownames(exprs)), ]
# snp for this gene
snp <- sig.eQTL$SNP[x]
# model
model <- lm(geneexprs ~ geno[, snp] + info[, mod] + geno[, snp]*info[, mod])
if (length(which(geno[, snp]==2 & info[, mod] == "Control")) > 1 &
#length(which(geno[, snp]==2 & info[, mod] == unique(info[, mod])[2])) > 1 &
#length(which(geno[, snp]==2 & info[, mod] == unique(info[, mod])[3])) > 1 &
length(which(geno[, snp]==2 & info[, mod] == "T2DM")) > 1){
c(summary(model)$coefficients["geno[, snp]", ],
summary(model)$coefficients["info[, mod]T2DM", ],
summary(model)$coefficients["geno[, snp]:info[, mod]T2DM", ])
} else {
rep(NA, 12)
}
}))
colnames(results)<-c("eQTL_beta", "eQTL_SE", "eQTL_t", "eQTL_pval",
"DE_beta", "DE_SE", "DE_t", "DE_pval",
"Interaction_beta", "Interaction_SE", "Interaction_t", "Interaction_pval")
results <- data.frame(Gene=sig.eQTL$Gene, SNP=sig.eQTL$SNP, results)
saveRDS(results, paste0(outputdir, "results_", mod, "_interactions.rds"))
################################################################################
################################################################################
################################################################################
mod <- "HbA1c"
results <- do.call(rbind, lapply(irange, function(x){
print(x)
# define the gene being tested
genename <- sig.eQTL$Gene[x]
geneexprs <- exprs[match(genename, rownames(exprs)), ]
# snp for this gene
snp <- sig.eQTL$SNP[x]
# model
model <- lm(geneexprs ~ geno[, snp] + info[, mod] + geno[, snp]*info[, mod])
if ((length(which(geno[, snp]==0 & !(is.na(info[, mod])))) > 1) &
(length(which(geno[, snp]==1 & !(is.na(info[, mod])))) > 1) &
(length(which(geno[, snp]==2 & !(is.na(info[, mod])))) > 1)) {
c(summary(model)$coefficients["geno[, snp]", ],
summary(model)$coefficients["info[, mod]", ],
summary(model)$coefficients["geno[, snp]:info[, mod]", ])
} else {
rep(NA, 12)
}
}))
colnames(results)<-c("eQTL_beta", "eQTL_SE", "eQTL_t", "eQTL_pval",
"DE_beta", "DE_SE", "DE_t", "DE_pval",
"Interaction_beta", "Interaction_SE", "Interaction_t", "Interaction_pval")
results <- data.frame(Gene=sig.eQTL$Gene, SNP=sig.eQTL$SNP, results)
saveRDS(results, paste0(outputdir, "results_", mod, "_interactions.rds"))
################################################################################
################################################################################
################################################################################
mod <- "Glucose"
results <- do.call(rbind, lapply(irange, function(x){
print(x)
# define the gene being tested
genename <- sig.eQTL$Gene[x]
geneexprs <- exprs[match(genename, rownames(exprs)), ]
# snp for this gene
snp <- sig.eQTL$SNP[x]
# model
model <- lm(geneexprs ~ geno[, snp] + info[, mod] + geno[, snp]*info[, mod])
if ((length(which(geno[, snp]==0 & !(is.na(info[, mod])))) > 1) &
(length(which(geno[, snp]==1 & !(is.na(info[, mod])))) > 1) &
(length(which(geno[, snp]==2 & !(is.na(info[, mod])))) > 1)) {
c(summary(model)$coefficients["geno[, snp]", ],
summary(model)$coefficients["info[, mod]", ],
summary(model)$coefficients["geno[, snp]:info[, mod]", ])
} else {
rep(NA, 12)
}
}))
colnames(results)<-c("eQTL_beta", "eQTL_SE", "eQTL_t", "eQTL_pval",
"DE_beta", "DE_SE", "DE_t", "DE_pval",
"Interaction_beta", "Interaction_SE", "Interaction_t", "Interaction_pval")
results <- data.frame(Gene=sig.eQTL$Gene, SNP=sig.eQTL$SNP, results)
saveRDS(results, paste0(outputdir, "results_", mod, "_interactions.rds"))
| /eQTL/Interaction_QTL.R | no_license | jknightlab/Abu-Dhabi | R | false | false | 4,823 | r | # R script to map interaction eQTL for eGenes using adjusted expression data
# /apps/well/R/3.3.0/bin/Rscript --vanilla R.eQTL.r "Disease"
mod <- "Disease"
# mod <- "SCORE.1.Emirati.0.Non"
# load R packages
# suppressMessages(library(lme4))
# load necessary data
load("/well/jknight/AbuDhabiRNA/eQTL/eQTL.25PCs.RData")
# exprs
# pairs
# geno
# info
sig.eQTL <- read.delim("/well/jknight/AbuDhabiRNA/eQTL/Results/cis/Peak_sig_eQTL_empP.txt",
sep="\t", stringsAsFactors=F)
sig.eQTL <- readRDS("/well/jknight/AbuDhabiRNA/eQTL/Results/cis/Results_peak_final.rds")
sig.eQTL$Gene <- as.character(sig.eQTL$Gene)
sig.eQTL$SNP <- as.character(sig.eQTL$SNP)
# output directory
outputdir <- "/well/jknight/AbuDhabiRNA/eQTL/Results/cis/Interactions/"
# Test each gene
irange <- 1:nrow(sig.eQTL)
results <- do.call(rbind, lapply(irange, function(x){
print(x)
# define the gene being tested
genename <- sig.eQTL$Gene[x]
geneexprs <- exprs[match(genename, rownames(exprs)), ]
# snp for this gene
snp <- sig.eQTL$SNP[x]
# model
model <- lm(geneexprs ~ geno[, snp] + info[, mod] + geno[, snp]*info[, mod])
if (length(which(geno[, snp]==2 & info[, mod] == "Control")) > 1 &
#length(which(geno[, snp]==2 & info[, mod] == unique(info[, mod])[2])) > 1 &
#length(which(geno[, snp]==2 & info[, mod] == unique(info[, mod])[3])) > 1 &
length(which(geno[, snp]==2 & info[, mod] == "T2DM")) > 1){
c(summary(model)$coefficients["geno[, snp]", ],
summary(model)$coefficients["info[, mod]T2DM", ],
summary(model)$coefficients["geno[, snp]:info[, mod]T2DM", ])
} else {
rep(NA, 12)
}
}))
colnames(results)<-c("eQTL_beta", "eQTL_SE", "eQTL_t", "eQTL_pval",
"DE_beta", "DE_SE", "DE_t", "DE_pval",
"Interaction_beta", "Interaction_SE", "Interaction_t", "Interaction_pval")
results <- data.frame(Gene=sig.eQTL$Gene, SNP=sig.eQTL$SNP, results)
saveRDS(results, paste0(outputdir, "results_", mod, "_interactions.rds"))
################################################################################
################################################################################
################################################################################
mod <- "HbA1c"
results <- do.call(rbind, lapply(irange, function(x){
print(x)
# define the gene being tested
genename <- sig.eQTL$Gene[x]
geneexprs <- exprs[match(genename, rownames(exprs)), ]
# snp for this gene
snp <- sig.eQTL$SNP[x]
# model
model <- lm(geneexprs ~ geno[, snp] + info[, mod] + geno[, snp]*info[, mod])
if ((length(which(geno[, snp]==0 & !(is.na(info[, mod])))) > 1) &
(length(which(geno[, snp]==1 & !(is.na(info[, mod])))) > 1) &
(length(which(geno[, snp]==2 & !(is.na(info[, mod])))) > 1)) {
c(summary(model)$coefficients["geno[, snp]", ],
summary(model)$coefficients["info[, mod]", ],
summary(model)$coefficients["geno[, snp]:info[, mod]", ])
} else {
rep(NA, 12)
}
}))
colnames(results)<-c("eQTL_beta", "eQTL_SE", "eQTL_t", "eQTL_pval",
"DE_beta", "DE_SE", "DE_t", "DE_pval",
"Interaction_beta", "Interaction_SE", "Interaction_t", "Interaction_pval")
results <- data.frame(Gene=sig.eQTL$Gene, SNP=sig.eQTL$SNP, results)
saveRDS(results, paste0(outputdir, "results_", mod, "_interactions.rds"))
################################################################################
################################################################################
################################################################################
mod <- "Glucose"
results <- do.call(rbind, lapply(irange, function(x){
print(x)
# define the gene being tested
genename <- sig.eQTL$Gene[x]
geneexprs <- exprs[match(genename, rownames(exprs)), ]
# snp for this gene
snp <- sig.eQTL$SNP[x]
# model
model <- lm(geneexprs ~ geno[, snp] + info[, mod] + geno[, snp]*info[, mod])
if ((length(which(geno[, snp]==0 & !(is.na(info[, mod])))) > 1) &
(length(which(geno[, snp]==1 & !(is.na(info[, mod])))) > 1) &
(length(which(geno[, snp]==2 & !(is.na(info[, mod])))) > 1)) {
c(summary(model)$coefficients["geno[, snp]", ],
summary(model)$coefficients["info[, mod]", ],
summary(model)$coefficients["geno[, snp]:info[, mod]", ])
} else {
rep(NA, 12)
}
}))
colnames(results)<-c("eQTL_beta", "eQTL_SE", "eQTL_t", "eQTL_pval",
"DE_beta", "DE_SE", "DE_t", "DE_pval",
"Interaction_beta", "Interaction_SE", "Interaction_t", "Interaction_pval")
results <- data.frame(Gene=sig.eQTL$Gene, SNP=sig.eQTL$SNP, results)
saveRDS(results, paste0(outputdir, "results_", mod, "_interactions.rds"))
|
\name{findStartEnd}
\alias{findStartEnd}
\title{Detects the first January and the last december in a time series
}
\description{Clipping or windowing time series data often requires
detecting which year is the first full year in a dataset and which
year is the last full year. Some datasources "pad" out their data
to full years, while others do not. This function quickly
returns the first full year ( with no missing data) and the
last year with no missing data
}
\usage{
findStartEnd(Data)
}
\arguments{
\item{Data}{ An Array object, Mts object or Zoo object with
multiple time series
}
}
\details{ The function uses \code{allNA} to determine the first
year in the dataset where every month has at least one station
report. In addition, the last year in which every month has
at least one station report is returned.
}
\value{ Returns a list of Start and End
\item{Start}{the first full year where every month has at least one station}
\item{End}{the last full year where every month has at least one station}
}
\author{Steven Mosher
}
\note{ If by change some month in the middle of a collection
has a missing month, or a month with no stations, this function
will not detect that.
}
\keyword{ Timeseries }
| /man/findStartEnd.Rd | no_license | cran/RghcnV3 | R | false | false | 1,318 | rd | \name{findStartEnd}
\alias{findStartEnd}
\title{Detects the first January and the last december in a time series
}
\description{Clipping or windowing time series data often requires
detecting which year is the first full year in a dataset and which
year is the last full year. Some datasources "pad" out their data
to full years, while others do not. This function quickly
returns the first full year ( with no missing data) and the
last year with no missing data
}
\usage{
findStartEnd(Data)
}
\arguments{
\item{Data}{ An Array object, Mts object or Zoo object with
multiple time series
}
}
\details{ The function uses \code{allNA} to determine the first
year in the dataset where every month has at least one station
report. In addition, the last year in which every month has
at least one station report is returned.
}
\value{ Returns a list of Start and End
\item{Start}{the first full year where every month has at least one station}
\item{End}{the last full year where every month has at least one station}
}
\author{Steven Mosher
}
\note{ If by change some month in the middle of a collection
has a missing month, or a month with no stations, this function
will not detect that.
}
\keyword{ Timeseries }
|
#' @title MODIStsp main function
#' @description Main function for the MODIS Time Series Processing Tool
#' (MODIStsp)
#' @details The function is used to:
#' - initialize the processing (folder names, packages, etc.);
#' - launch the GUI ([MODIStsp_GUI()]) on interactive
#' execution, or load an options file to set processing arguments and/or
#' retrieve CLI inputs and run processing on non-interactive execution;
#' - launch the routines for downloading and processing the requested datasets.
#' ([MODIStsp_process()])
#' - launching the function with GUI = FALSE and without specifying a opts_file
#' initializes arguments with default values. This allows making a test run.
#' @param gui `logical` if TRUE: the GUI is opened before processing. If FALSE:
#' processing parameters are retrieved from the provided `opts_file`
#' argument), Default: TRUE
#' @param out_folder `character` Main output folder, default: NULL.
#' @param out_folder_mod `character` Output folder for original HDF storage.
#' If `"$tempdir"` (default), a temporary directory is used.
#' @param opts_file `character` full path to a JSON file
#' containing MODIStsp processing options saved from the GUI, Default: NULL
#' @param selprod `character` Name of selected MODIS product (e.g.,
#' Vegetation Indexes_16Days_250m (M*D13Q1)). You can get
#' a list of available product names using function `MODIStsp_get_prodnames`,
#' Default: NULL
#' @param prod_version Version of the selected MODIS product.
#' Currently versions `"006"` and/or `"061"` can be chosen.
#' Default value is `"006"` until decommission of this version will be
#' announced by USGS.
#' Products with version `"061` are experimental: in case users would encounter
#' an error in the encoding of bands or quality flags they are encouraged
#' to report it by opening a new issue on GitHub at
#' \url{https://github.com/ropensci/MODIStsp/issues}.
#' @param bandsel `character array` Original MODIS layers to be processed.
#' You can get a list of available layers for a given product
#' using function `MODIStsp_get_prodlayers` (e.g., MODIStsp_get_prodlayers("M*D13Q1")$bandnames),
#' Default: NULL
#' @param quality_bandsel `character array` Quality Indicators to be computed starting from
#' bit fields of original MODIS layers. You can get a list of available quality layers for a given product
#' using function `MODIStsp_get_prodlayers` (e.g., MODIStsp_get_prodlayers("M*D13Q1")$quality_bandnames),
#' Default: NULL
#' @param indexes_bandsel `character array`Spectral Indexes to be computed starting from reflectance bands.
#' You can get a list of available quality layers for a given product
#' using function `MODIStsp_get_prodlayers` (e.g., MODIStsp_get_prodlayers("M*D13Q1")$indexes_bandnames),
#' Default: NULL
#' @param sensor `character ["Terra"| "Aqua" | "Both"]` MODIS platform to be considered.
#' (Ignored for MCD* products). Default: "Both"
#' @param download_server `character ["http" | "offline"]` service to be used for
#' download. Default: "http"
#' @param downloader download_server `character ["http" | "aria2"]` downloader to be used,
#' Default: "http"
#' @param user `character` Username for NASA http server.
#' ([urs.earthdata.nasa.gov/home](https://urs.earthdata.nasa.gov/home)).
#' @param password `character` Password for NASA http server
#' ([urs.earthdata.nasa.gov/home](https://urs.earthdata.nasa.gov/home)).
#' @param download_range `character ["Full" | "Seasonal"]` If "full", all the
#' available images between the starting and the ending dates are downloaded;
#' If "seasonal", only the images included in the season are downloaded
#' (e.g: if the starting date is 2005-12-01 and the ending is 2010-02-31, only
#' the images of December, January and February from 2005 to 2010 - excluding
#' 2005-01, 2005-02 and 2010-12 - are downloaded), Default: Full
#' @param start_date `character` Start date for images download and preprocessing
#' (yyyy.mm.dd), Default: NULL
#' @param end_date `character` End date for images download and preprocessing
#' (yyyy.mm.dd), Default: NULL
#' @param spatmeth `character ["tiles" | "bbox" | "file"]`, indicates how the processing
#' extent is retrieves. if "tiles", use the specified tiles (start_x....).
#' If "file", retrieve extent from spatial file specifies in `spafile`. If
#' "bbox", use the specified bounding box, Default: "tiles"
#' @param start_x `integer [0-35]` Start MODIS horizontal tile defining spatial extent.
#' Ignored if spatmeth != "tiles", Default: 18
#' @param start_y `integer [0-17]` Start MODIS vertical tile defining spatial extent.
#' Ignored if spatmeth != "tiles", Default: 4
#' @param end_x `integer [0-35]` End MODIS horizontal tile defining spatial extent.
#' Ignored if spatmeth != "tiles", Default: 18
#' @param end_y `integer [0-17]` End MODIS vertical tile defining spatial extent.
#' Ignored if spatmeth != "tiles", Default: 4
#' @param bbox `numeric(4)` Output bounding box (xmin, ymin, xmax, ymax) in
#' out_proj coordinate system. Ignored if spatmeth == "tiles", Default: NULL
#' @param spafile `character` (optional) full path of a spatial file
#' to use to derive the processing extent. If not NULL, the processing options
#' which define the extent, the selected tiles and the "Full Tile / Custom"
#' in the JSON options file are overwritten and new files are created on the
#' extent of the provided spatial file. Ignored if spatmeth != "file", Default: NULL
#' @param out_projsel `character ["Native", "User Defined`] If "Native", the
#' outputs keep the original resolution of MODIS HDF images. Otherwise, the value
#' set in "out_res" is used, Default:Native
#' @param output_proj `character` either equal to "MODIS Sinusoidal",
#' or to the code of a valid EPSG or to a WKT projection string.
#' Ignored if outproj_sel == "Native", Default: NULL
#' @param out_res_sel `character ["Native", "User Defined`]. If "Native", the
#' outputs keep the original resolution of MODIS HDF images. Otherwise, the value
#' set in "out_res" is used.
#' @param out_res `float` Output resolution (in output projection measurement
#' unit). Ignored if out_res_sel == "Native".
#' @param resampling `character ["near" | "bilinear" | "cubic" | "cubicspline",
#' |lanczos"|, "average"|, "mode", |"max"|, |"min"|, |"q1"|, |"q3"|, |"sum"|]`
#' Resampling method to be used by `gdalwarp`.
#' @param reprocess `logical` If TRUE, reprocess data for already existing dates.
#' @param delete_hdf `logical` If TRUE, delete downloaded HDF files after completion.
#' @param nodata_change `logical` if TRUE, NoData values are set to the max value
#' of the datatype of the layer on the MODIStsp output rasters. NOTE: If multiple
#' nodata values are reported for a layer, all are reset to the new value.
#' @param scale_val `logical` If TRUE, scale and offset are applied to
#' original MODIS layers, and Spectral Indexes are saved as floating point. If
#' FALSE, no rescaling is done and Spectral Indexes are saved as integer, with a
#' 10000 scaling factor.
#' @param ts_format `character array including ["R RasterStack" | "ENVI Meta Files" | "GDAL VRT" |
#' "ENVI and GDAL"]` Selected virtual time series format.
#' @param out_format `character ["ENVI" | "GTiff"]` Desired output format.
#' @param compress `character ["None" | "PACKBITS" | "LZW" | "DEFLATE"]`
#' Compression method for GTiff outputs (Ignored if `out_format == ENVI`)
#' @param test `integer | character (e.g., "01a")` if set, MODIStsp is executed in
#' "test mode", using a preset Options File instead than opening the GUI or accepting the
#' `opts_file` parameter. This allows both to check correct installation on
#' user's machines, and to implement unit testing.
#' @param n_retries `numeric` maximum number of retries on download functions.
#' In case any download function fails more than `n_retries` times consecutively,
#' MODIStsp_process will abort, Default: 20
#' @param verbose `logical` If FALSE, suppress processing messages,
#' Default: TRUE
#' @param parallel `logical` If TRUE (default), the function is run using parallel
#' processing, to speed-up the computation for large rasters (with a maximum
#' of 8 cores).
#' The number of cores is automatically determined; specifying it is also
#' possible (e.g. `parallel = 4`). In this case, more than 8 cores can be
#' specified. If FALSE (default), single core processing is used.
#' @param ... not used for values, forces later arguments to bind by name
#' @return NULL
#'
#' @author Lorenzo Busetto, phD (2014-2017)
#' @author Luigi Ranghetti, phD (2015-2017)
#' @note License: GPL 3.0
#' @export
#' @seealso [MODIStsp_GUI()], [MODIStsp_process()]
#' @rdname MODIStsp
#' @importFrom raster rasterOptions
#' @importFrom sf sf_extSoftVersion
#' @importFrom jsonlite read_json write_json
#' @importFrom tools file_path_sans_ext
#' @importFrom utils unzip
#' @examples
#' \donttest{
#'
#' #' # - Running the tool using the GUI
#' # Running the tool without any option will start the GUI with the default or
#' # last used settings, in interactive mode (i.e., with gui = TRUE).
#' if (interactive()) {
#' MODIStsp()
#' }
#'
#'
#' #' # - Running the tool specifying processing arguments in the call
#'
#' # **NOTE** Output files of examples are saved to file.path(tempdir(), "MODIStsp").
#'
#' # Here we process layers __NDVI__ and __EVI__ and quality indicator __usefulness__
#' # of product __M*D13Q1__, considering both Terra and Aqua platforms, for dates
#' # comprised between 2020-06-01 and 2020-06-15 and saves output to R tempdir
#' # --> See name and available layers for product M*D13Q1.
#' # Note that this example (as well as the following ones) is run in single
#' # core to follow CRAN policies, by setting parallel = FALSE.
#' # Users can exploit multicore functionalities skipping to set this argument.
#'
#' # The following check is performed in order not to provide errors
#' # running the examples if HDF4 is not supported.
#' is_hdf4_supported <- "HDF4" %in% sf::st_drivers("raster")$name
#'
#' MODIStsp_get_prodlayers("M*D13A2")
#' if (is_hdf4_supported) {
#' MODIStsp(
#' gui = FALSE,
#' out_folder = "$tempdir",
#' selprod = "Vegetation_Indexes_16Days_1Km (M*D13A2)",
#' bandsel = c("EVI", "NDVI"),
#' quality_bandsel = "QA_usef",
#' indexes_bandsel = "SR",
#' user = "mstp_test" ,
#' password = "MSTP_test_01",
#' start_date = "2020.06.01",
#' end_date = "2020.06.15",
#' verbose = FALSE,
#' parallel = FALSE
#' )
#' }
#'
#'
#' #' # - Running the tool using the settings previously saved in a specific options file
#'
#' # **NOTE** Output files of examples are saved to file.path(tempdir(), "MODIStsp").
#' # You can run the examples with `gui = TRUE` to set a different output folder!
#'
#' # Here we use a test json file saved in MODIStsp installation folder which
#' # downloads and processed 3 MOD13A2 images over the Como Lake (Lombardy, Italy)
#' # and retrieves NDVI and EVI data, plus the Usefulness Index Quality Indicator.
#'
#' opts_file <- system.file("testdata/test_MOD13A2.json", package = "MODIStsp")
#'
#' if (is_hdf4_supported) {
#' MODIStsp(gui = FALSE, opts_file = opts_file, verbose = TRUE, parallel = FALSE)
#' }
#'
#'
#' # Running the tool using the settings previously saved in a specific option file
#' # and specifying the extent from a spatial file allows to re-use the same
#' # processing settings to perform download and reprocessing on a different area
#'
#' opts_file <- system.file("testdata/test_MOD13A2.json", package = "MODIStsp")
#' spatial_file <- system.file("testdata/lakeshapes/garda_lake.shp", package = "MODIStsp")
#' if (is_hdf4_supported) {
#' MODIStsp(
#' gui = FALSE,
#' opts_file = opts_file,
#' spatmeth = "file",
#' spafile = spatial_file,
#' verbose = TRUE,
#' parallel = FALSE
#' )
#' }
#'
#'
#' # Running the tool using the settings previously saved in a
#' # specific options file and specifying each time the extent from a different
#' # spatial file (e.g., to perform the same processing on several extents)
#' # Note that you can also put all your extent files in a specific folder and
#' # create the extent list using for example.
#'
#' extent_list = list.files(
#' system.file("testdata/lakeshapes/", package = "MODIStsp"),
#' "\\.shp$",
#' full.names = TRUE
#' )
#' extent_list
#' opts_file <- system.file("testdata/test_MOD13A2.json", package = "MODIStsp")
#'
#' if (is_hdf4_supported) {
#' for (single_shape in extent_list) {
#' MODIStsp(
#' gui = FALSE,
#' opts_file = opts_file,
#' spatmeth = "file",
#' spafile = single_shape,
#' verbose = TRUE,
#' parallel = FALSE
#' )
#' }
#' }
#'
#' # output files are placed in separate folders:
#' outfiles_garda <- list.files(
#' file.path(tempdir(), "MODIStsp/garda_lake/VI_16Days_1Km_v6/NDVI"),
#' full.names = TRUE
#' )
#' outfiles_garda
#' require(raster)
#' if (length(outfiles_garda) > 0) {
#' plot(raster(outfiles_garda[1] ))
#' }
#'
#' outfiles_iseo <- list.files(
#' file.path(tempdir(), "MODIStsp/iseo_lake/VI_16Days_1Km_v6/NDVI"),
#' full.names = TRUE
#' )
#' outfiles_iseo
#' if (length(outfiles_garda) > 0) {
#' plot(raster(outfiles_iseo[1]))
#' }
#'
#' # See also https://docs.ropensci.org/MODIStsp/articles/noninteractive_execution.html
#' }
MODIStsp <- function(...,
gui = TRUE,
out_folder = NULL,
out_folder_mod = NULL,
opts_file = NULL,
selprod = NULL,
prod_version = NULL,
bandsel = NULL,
quality_bandsel = NULL,
indexes_bandsel = NULL,
sensor = NULL,
download_server = NULL,
downloader = NULL,
user = NULL,
password = NULL,
download_range = NULL,
start_date = NULL,
end_date = NULL,
spatmeth = NULL,
start_x = NULL,
end_x = NULL,
start_y = NULL,
end_y = NULL,
bbox = NULL,
spafile = NULL,
out_projsel = NULL,
output_proj = NULL,
out_res_sel = NULL,
out_res = NULL,
resampling = NULL,
reprocess = NULL,
delete_hdf = NULL,
nodata_change = NULL,
scale_val = NULL,
ts_format = NULL,
out_format = NULL,
compress = NULL,
test = NULL,
n_retries = 5,
verbose = TRUE,
parallel = TRUE) {
# Make so that "raster" functions does not automatically add extensions on
# output files. This is automatically reset to TRUE at the end of the session
raster::rasterOptions(setfileext = FALSE)
proc_opts <- NULL
# _________________________________________________________________________
# check arguments ####
# if (is.null(opts_file) & gui == FALSE) {
# stop("You need to provide a valid `.json` options file to run MODIStsp",
# " in non-interactive mode. \n",
# "Please provide a valid \"opts_file\" path or run ",
# "with gui=TRUE to create and save one.")
# }
if (!is.null(opts_file)) {
if (!file.exists(opts_file))
stop("The specified `.json` options file was not found. \n",
"Please provide a valid \"opts_file\" path or run ",
"without specifying one to create and save one.")
}
# __________________________________________________________________________
# Initialize processing ####
# __________________________________________________________________________
# If test mode is selected, select the options file for testing ####
# and set other parameters
if (!is.null(test)) {
gui <- FALSE
message("MODIStsp is running in test mode.")
# read names of available json test files
test_files <- sort(list.files(
path = system.file("testdata", package = "MODIStsp"),
pattern = "^test[0-9]{2}[a-zA-Z]?\\.json$",
full.names = TRUE))
if (is.numeric(test)) {
test <- sprintf("%02d", test)
}
cur_test <- paste0("test", test, ".json")
avail_tests <- basename(test_files)
#nocov start
if (!cur_test %in% avail_tests) {
stop(paste0("The test is not available. These are the currently available tests: ", #nolint
paste(avail_tests, collapse = ", ")))
}
#nocov end
# check that the offline HDF files were unzipped - unzip them if not
tests_hdf_zipped <- list.files(
path = system.file("testdata", package = "MODIStsp"),
pattern = "\\.hdf\\.zip$",
full.names = TRUE
)
for (test_hdf in gsub("\\.zip$", "", tests_hdf_zipped)) {
if (!file.exists(file.path(tempdir(), "MODIStsp/HDFs", basename(test_hdf)))) {
unzip(zipfile = paste0(test_hdf, ".zip"),
files = basename(test_hdf),
exdir = file.path(tempdir(), "MODIStsp/HDFs"),
unzip = "internal")
}
}
# Assign the selected test Option File
opts_file <- list.files(
path = system.file("testdata", package = "MODIStsp"),
pattern = cur_test,
full.names = TRUE)
}
# __________________________________________________________________________
# On GUI execution, ensure that shiny libraries suggested are avauilable ####
if (gui) {
#nocov start
gui_deps <- c("leaflet", "shiny",
"shinydashboard","shinyFiles",
"shinyalert","rappdirs", "shinyjs",
"leafem", "mapedit",
"magrittr")
gui_deps_missing <- !sapply(gui_deps, requireNamespace, quietly = TRUE)
if (sum(gui_deps_missing) > 0) {
stop("You need to install the following Suggested packages to use the MODIStsp GUI.
Please install them with:
install.packages(c(\"leaflet\", \"shiny\",\"shinydashboard\",\"shinyFiles\",
\"shinyalert\", \"rappdirs\",\"shinyjs\",
\"leafem\", \"mapedit\", \"magrittr\"))")
} else {
requireNamespace("leaflet")
requireNamespace("shiny")
requireNamespace("shinydashboard")
requireNamespace("shinyFiles")
requireNamespace("shinyalert")
requireNamespace("shinyjs")
requireNamespace("leafem")
requireNamespace("rappdirs")
requireNamespace("mapedit")
requireNamespace("magrittr")
}
#nocov end
}
gdal_version <- sf::sf_extSoftVersion()[["GDAL"]]
if (verbose) message("GDAL version in use: ", as.character(gdal_version))
# __________________________________________________________________________
# On interactive execution, launch the GUI and wait for user selection. ####
# On non-interactive immediately start processing using the processing
# options contained in the provided `opts_file`
if (gui) {
#nocov start
MODIStsp_GUI()
#nocov end
} else {
if (!is.null(opts_file)) {
proc_opts <- try(jsonlite::read_json(opts_file))
if(inherits(proc_opts, "try-error")) {
stop("Unable to read the provided options file. Please check your ",
"inputs!")
}
if(proc_opts$MODIStspVersion < 2.0) {
stop("Options files saved with MODIStsp versions lower than 2.0
are no longer supported. Please save a new file using MODIStsp
GUI.")
}
#TODO: function to check if file is OK
} else {
# Load default values - to avoid overwriting always the loaded
# parameters with default, we set the defaults here rather than
# in the initialization of the function!
proc_opts <- jsonlite::read_json(system.file("ExtData",
"mstp_defaults.json",
package = "MODIStsp"))
}
# update proc_opts based on arguments passed to the function ----
if(!is.null(selprod)) {proc_opts$selprod <- selprod}
if(!is.null(prod_version)) {proc_opts$prod_version <- prod_version}
if(proc_opts$prod_version=="6") {proc_opts$prod_version <- "006"} # for retrocompatibility
if(!is.null(bandsel)) {proc_opts$bandsel <- bandsel}
if(!is.null(quality_bandsel)) {proc_opts$quality_bandsel <- quality_bandsel}
if(!is.null(indexes_bandsel)) {proc_opts$indexes_bandsel <- indexes_bandsel}
if(!is.null(sensor)) {proc_opts$sensor <- sensor}
#TODO check correctness of platform wrt product
if(!is.null(download_server)) {proc_opts$download_server <- download_server}
if(!is.null(downloader)) {proc_opts$downloader <- downloader}
# TODO replace use_aria use
if(!is.null(user)) {proc_opts$user <- user}
if(!is.null(password)) {proc_opts$password <- password}
if(!is.null(download_range)) {proc_opts$download_range <- download_range}
if(!is.null(start_date)) {proc_opts$start_date <- start_date}
if(!is.null(end_date)) {proc_opts$end_date <- end_date}
if(!is.null(spatmeth)) {proc_opts$spatmeth <- spatmeth}
if(!is.null(out_projsel)) {proc_opts$out_projsel <- out_projsel}
if(!is.null(output_proj)) {proc_opts$output_proj <- output_proj}
if(!is.null(out_res_sel)) {proc_opts$out_res_sel <- out_res_sel}
if(!is.null(out_res)) {proc_opts$out_res <- out_res}
if(!is.null(resampling)) {proc_opts$resampling <- resampling}
if(!is.null(reprocess)) {proc_opts$reprocess <- reprocess}
if(!is.null(delete_hdf)) {proc_opts$delete_hdf <- delete_hdf}
if(!is.null(nodata_change)) {proc_opts$nodata_change <- nodata_change}
if(!is.null(scale_val)) {proc_opts$scale_val <- scale_val}
if(!is.null(out_format)) {proc_opts$out_format <- out_format}
if(!is.null(ts_format)) {proc_opts$ts_format <- ts_format}
if(!is.null(compress)) {proc_opts$compress <- compress}
if(!is.null(out_folder)) {proc_opts$out_folder <- out_folder}
if(!is.null(out_folder_mod)) {proc_opts$out_folder_mod <- out_folder_mod}
if (proc_opts$spatmeth == "tiles") {
if(!is.null(start_x)) {proc_opts$start_x <- start_x}
if(!is.null(end_x)) {proc_opts$end_x <- end_x}
if(!is.null(start_y)) {proc_opts$start_y <- start_y}
if(!is.null(end_y)) {proc_opts$end_y <- end_y}
} else {
if (proc_opts$spatmeth == "file") {
if (!is.null(spafile)) {
bbox <- bbox_from_file(spafile, crs_out = proc_opts$output_proj)
proc_opts$bbox <- as.numeric(bbox)
tiles <- tiles_from_bbox(bbox, proc_opts$output_proj)
proc_opts$start_x <- tiles[1]
proc_opts$start_y <- tiles[3]
proc_opts$end_x <- tiles[2]
proc_opts$end_y <- tiles[4]
}
#TODO update examples and website
} else {
if (proc_opts$spatmeth == "map") {
# TODO how to deal with this?????
} else {
if (proc_opts$spatmeth == "bbox") {
#TODO retrieve tiles from selection
if(!is.null(bbox)) {proc_opts$bbox <- as.numeric(bbox)}
tiles <- tiles_from_bbox(proc_opts$bbox, proc_opts$output_proj)
proc_opts$start_x <- tiles[1]
proc_opts$start_y <- tiles[3]
proc_opts$end_x <- tiles[2]
proc_opts$end_y <- tiles[4]
}
}
}
}
# proc_opts$bbox <- as.numeric(proc_opts$bbox)
if (proc_opts$out_folder == "$modistest") {
warning(paste0(
"Argument out_folder = '$modistest' can no longer be used ",
"due to CRAN policy; using '$tempdir'."
))
proc_opts$out_folder <- "$tempdir"
}
if (proc_opts$out_folder == "$tempdir") {
proc_opts$out_folder <- file.path(tempdir(), "MODIStsp")
}
if (proc_opts$out_folder_mod == "$modistest") {
warning(paste0(
"Argument out_folder_mod = '$modistest' can no longer be used ",
"due to CRAN policy; using '$tempdir'."
))
proc_opts$out_folder_mod <- "$tempdir"
}
if (proc_opts$out_folder_mod == "$tempdir") {
proc_opts$out_folder_mod <- file.path(tempdir(), "MODIStsp/HDFs")
dir.create(dirname(proc_opts$out_folder_mod), showWarnings = FALSE)
dir.create(proc_opts$out_folder_mod, showWarnings = FALSE)
}
if (proc_opts$spatmeth == "file" & !missing(spafile)) {
proc_opts$out_folder <- file.path(
proc_opts$out_folder,
tools::file_path_sans_ext(basename(spafile))
)
}
if (inherits(proc_opts$bbox, "list")) {
proc_opts$bbox <- unlist(proc_opts$bbox)
}
# Create output folders if needed. No recursive to avoid creating big
# folder trees by mistake
dir.create(proc_opts$out_folder,
recursive = FALSE,
showWarnings = FALSE)
dir.create(proc_opts$out_folder_mod,
recursive = FALSE,
showWarnings = FALSE)
check_opts <- check_proc_opts(proc_opts)
MODIStsp_process(proc_opts,
n_retries = n_retries,
verbose = verbose,
parallel = parallel)
}
}
| /R/MODIStsp.R | no_license | cran/MODIStsp | R | false | false | 26,579 | r | #' @title MODIStsp main function
#' @description Main function for the MODIS Time Series Processing Tool
#' (MODIStsp)
#' @details The function is used to:
#' - initialize the processing (folder names, packages, etc.);
#' - launch the GUI ([MODIStsp_GUI()]) on interactive
#' execution, or load an options file to set processing arguments and/or
#' retrieve CLI inputs and run processing on non-interactive execution;
#' - launch the routines for downloading and processing the requested datasets.
#' ([MODIStsp_process()])
#' - launching the function with GUI = FALSE and without specifying a opts_file
#' initializes arguments with default values. This allows making a test run.
#' @param gui `logical` if TRUE: the GUI is opened before processing. If FALSE:
#' processing parameters are retrieved from the provided `opts_file`
#' argument), Default: TRUE
#' @param out_folder `character` Main output folder, default: NULL.
#' @param out_folder_mod `character` Output folder for original HDF storage.
#' If `"$tempdir"` (default), a temporary directory is used.
#' @param opts_file `character` full path to a JSON file
#' containing MODIStsp processing options saved from the GUI, Default: NULL
#' @param selprod `character` Name of selected MODIS product (e.g.,
#' Vegetation Indexes_16Days_250m (M*D13Q1)). You can get
#' a list of available product names using function `MODIStsp_get_prodnames`,
#' Default: NULL
#' @param prod_version Version of the selected MODIS product.
#' Currently versions `"006"` and/or `"061"` can be chosen.
#' Default value is `"006"` until decommission of this version will be
#' announced by USGS.
#' Products with version `"061` are experimental: in case users would encounter
#' an error in the encoding of bands or quality flags they are encouraged
#' to report it by opening a new issue on GitHub at
#' \url{https://github.com/ropensci/MODIStsp/issues}.
#' @param bandsel `character array` Original MODIS layers to be processed.
#' You can get a list of available layers for a given product
#' using function `MODIStsp_get_prodlayers` (e.g., MODIStsp_get_prodlayers("M*D13Q1")$bandnames),
#' Default: NULL
#' @param quality_bandsel `character array` Quality Indicators to be computed starting from
#' bit fields of original MODIS layers. You can get a list of available quality layers for a given product
#' using function `MODIStsp_get_prodlayers` (e.g., MODIStsp_get_prodlayers("M*D13Q1")$quality_bandnames),
#' Default: NULL
#' @param indexes_bandsel `character array`Spectral Indexes to be computed starting from reflectance bands.
#' You can get a list of available quality layers for a given product
#' using function `MODIStsp_get_prodlayers` (e.g., MODIStsp_get_prodlayers("M*D13Q1")$indexes_bandnames),
#' Default: NULL
#' @param sensor `character ["Terra"| "Aqua" | "Both"]` MODIS platform to be considered.
#' (Ignored for MCD* products). Default: "Both"
#' @param download_server `character ["http" | "offline"]` service to be used for
#' download. Default: "http"
#' @param downloader download_server `character ["http" | "aria2"]` downloader to be used,
#' Default: "http"
#' @param user `character` Username for NASA http server.
#' ([urs.earthdata.nasa.gov/home](https://urs.earthdata.nasa.gov/home)).
#' @param password `character` Password for NASA http server
#' ([urs.earthdata.nasa.gov/home](https://urs.earthdata.nasa.gov/home)).
#' @param download_range `character ["Full" | "Seasonal"]` If "full", all the
#' available images between the starting and the ending dates are downloaded;
#' If "seasonal", only the images included in the season are downloaded
#' (e.g: if the starting date is 2005-12-01 and the ending is 2010-02-31, only
#' the images of December, January and February from 2005 to 2010 - excluding
#' 2005-01, 2005-02 and 2010-12 - are downloaded), Default: Full
#' @param start_date `character` Start date for images download and preprocessing
#' (yyyy.mm.dd), Default: NULL
#' @param end_date `character` End date for images download and preprocessing
#' (yyyy.mm.dd), Default: NULL
#' @param spatmeth `character ["tiles" | "bbox" | "file"]`, indicates how the processing
#' extent is retrieves. if "tiles", use the specified tiles (start_x....).
#' If "file", retrieve extent from spatial file specifies in `spafile`. If
#' "bbox", use the specified bounding box, Default: "tiles"
#' @param start_x `integer [0-35]` Start MODIS horizontal tile defining spatial extent.
#' Ignored if spatmeth != "tiles", Default: 18
#' @param start_y `integer [0-17]` Start MODIS vertical tile defining spatial extent.
#' Ignored if spatmeth != "tiles", Default: 4
#' @param end_x `integer [0-35]` End MODIS horizontal tile defining spatial extent.
#' Ignored if spatmeth != "tiles", Default: 18
#' @param end_y `integer [0-17]` End MODIS vertical tile defining spatial extent.
#' Ignored if spatmeth != "tiles", Default: 4
#' @param bbox `numeric(4)` Output bounding box (xmin, ymin, xmax, ymax) in
#' out_proj coordinate system. Ignored if spatmeth == "tiles", Default: NULL
#' @param spafile `character` (optional) full path of a spatial file
#' to use to derive the processing extent. If not NULL, the processing options
#' which define the extent, the selected tiles and the "Full Tile / Custom"
#' in the JSON options file are overwritten and new files are created on the
#' extent of the provided spatial file. Ignored if spatmeth != "file", Default: NULL
#' @param out_projsel `character ["Native", "User Defined`] If "Native", the
#' outputs keep the original resolution of MODIS HDF images. Otherwise, the value
#' set in "out_res" is used, Default:Native
#' @param output_proj `character` either equal to "MODIS Sinusoidal",
#' or to the code of a valid EPSG or to a WKT projection string.
#' Ignored if outproj_sel == "Native", Default: NULL
#' @param out_res_sel `character ["Native", "User Defined`]. If "Native", the
#' outputs keep the original resolution of MODIS HDF images. Otherwise, the value
#' set in "out_res" is used.
#' @param out_res `float` Output resolution (in output projection measurement
#' unit). Ignored if out_res_sel == "Native".
#' @param resampling `character ["near" | "bilinear" | "cubic" | "cubicspline",
#' |lanczos"|, "average"|, "mode", |"max"|, |"min"|, |"q1"|, |"q3"|, |"sum"|]`
#' Resampling method to be used by `gdalwarp`.
#' @param reprocess `logical` If TRUE, reprocess data for already existing dates.
#' @param delete_hdf `logical` If TRUE, delete downloaded HDF files after completion.
#' @param nodata_change `logical` if TRUE, NoData values are set to the max value
#' of the datatype of the layer on the MODIStsp output rasters. NOTE: If multiple
#' nodata values are reported for a layer, all are reset to the new value.
#' @param scale_val `logical` If TRUE, scale and offset are applied to
#' original MODIS layers, and Spectral Indexes are saved as floating point. If
#' FALSE, no rescaling is done and Spectral Indexes are saved as integer, with a
#' 10000 scaling factor.
#' @param ts_format `character array including ["R RasterStack" | "ENVI Meta Files" | "GDAL VRT" |
#' "ENVI and GDAL"]` Selected virtual time series format.
#' @param out_format `character ["ENVI" | "GTiff"]` Desired output format.
#' @param compress `character ["None" | "PACKBITS" | "LZW" | "DEFLATE"]`
#' Compression method for GTiff outputs (Ignored if `out_format == ENVI`)
#' @param test `integer | character (e.g., "01a")` if set, MODIStsp is executed in
#' "test mode", using a preset Options File instead than opening the GUI or accepting the
#' `opts_file` parameter. This allows both to check correct installation on
#' user's machines, and to implement unit testing.
#' @param n_retries `numeric` maximum number of retries on download functions.
#' In case any download function fails more than `n_retries` times consecutively,
#' MODIStsp_process will abort, Default: 20
#' @param verbose `logical` If FALSE, suppress processing messages,
#' Default: TRUE
#' @param parallel `logical` If TRUE (default), the function is run using parallel
#' processing, to speed-up the computation for large rasters (with a maximum
#' of 8 cores).
#' The number of cores is automatically determined; specifying it is also
#' possible (e.g. `parallel = 4`). In this case, more than 8 cores can be
#' specified. If FALSE (default), single core processing is used.
#' @param ... not used for values, forces later arguments to bind by name
#' @return NULL
#'
#' @author Lorenzo Busetto, phD (2014-2017)
#' @author Luigi Ranghetti, phD (2015-2017)
#' @note License: GPL 3.0
#' @export
#' @seealso [MODIStsp_GUI()], [MODIStsp_process()]
#' @rdname MODIStsp
#' @importFrom raster rasterOptions
#' @importFrom sf sf_extSoftVersion
#' @importFrom jsonlite read_json write_json
#' @importFrom tools file_path_sans_ext
#' @importFrom utils unzip
#' @examples
#' \donttest{
#'
#' #' # - Running the tool using the GUI
#' # Running the tool without any option will start the GUI with the default or
#' # last used settings, in interactive mode (i.e., with gui = TRUE).
#' if (interactive()) {
#' MODIStsp()
#' }
#'
#'
#' #' # - Running the tool specifying processing arguments in the call
#'
#' # **NOTE** Output files of examples are saved to file.path(tempdir(), "MODIStsp").
#'
#' # Here we process layers __NDVI__ and __EVI__ and quality indicator __usefulness__
#' # of product __M*D13Q1__, considering both Terra and Aqua platforms, for dates
#' # comprised between 2020-06-01 and 2020-06-15 and saves output to R tempdir
#' # --> See name and available layers for product M*D13Q1.
#' # Note that this example (as well as the following ones) is run in single
#' # core to follow CRAN policies, by setting parallel = FALSE.
#' # Users can exploit multicore functionalities skipping to set this argument.
#'
#' # The following check is performed in order not to provide errors
#' # running the examples if HDF4 is not supported.
#' is_hdf4_supported <- "HDF4" %in% sf::st_drivers("raster")$name
#'
#' MODIStsp_get_prodlayers("M*D13A2")
#' if (is_hdf4_supported) {
#' MODIStsp(
#' gui = FALSE,
#' out_folder = "$tempdir",
#' selprod = "Vegetation_Indexes_16Days_1Km (M*D13A2)",
#' bandsel = c("EVI", "NDVI"),
#' quality_bandsel = "QA_usef",
#' indexes_bandsel = "SR",
#' user = "mstp_test" ,
#' password = "MSTP_test_01",
#' start_date = "2020.06.01",
#' end_date = "2020.06.15",
#' verbose = FALSE,
#' parallel = FALSE
#' )
#' }
#'
#'
#' #' # - Running the tool using the settings previously saved in a specific options file
#'
#' # **NOTE** Output files of examples are saved to file.path(tempdir(), "MODIStsp").
#' # You can run the examples with `gui = TRUE` to set a different output folder!
#'
#' # Here we use a test json file saved in MODIStsp installation folder which
#' # downloads and processed 3 MOD13A2 images over the Como Lake (Lombardy, Italy)
#' # and retrieves NDVI and EVI data, plus the Usefulness Index Quality Indicator.
#'
#' opts_file <- system.file("testdata/test_MOD13A2.json", package = "MODIStsp")
#'
#' if (is_hdf4_supported) {
#' MODIStsp(gui = FALSE, opts_file = opts_file, verbose = TRUE, parallel = FALSE)
#' }
#'
#'
#' # Running the tool using the settings previously saved in a specific option file
#' # and specifying the extent from a spatial file allows to re-use the same
#' # processing settings to perform download and reprocessing on a different area
#'
#' opts_file <- system.file("testdata/test_MOD13A2.json", package = "MODIStsp")
#' spatial_file <- system.file("testdata/lakeshapes/garda_lake.shp", package = "MODIStsp")
#' if (is_hdf4_supported) {
#' MODIStsp(
#' gui = FALSE,
#' opts_file = opts_file,
#' spatmeth = "file",
#' spafile = spatial_file,
#' verbose = TRUE,
#' parallel = FALSE
#' )
#' }
#'
#'
#' # Running the tool using the settings previously saved in a
#' # specific options file and specifying each time the extent from a different
#' # spatial file (e.g., to perform the same processing on several extents)
#' # Note that you can also put all your extent files in a specific folder and
#' # create the extent list using for example.
#'
#' extent_list = list.files(
#' system.file("testdata/lakeshapes/", package = "MODIStsp"),
#' "\\.shp$",
#' full.names = TRUE
#' )
#' extent_list
#' opts_file <- system.file("testdata/test_MOD13A2.json", package = "MODIStsp")
#'
#' if (is_hdf4_supported) {
#' for (single_shape in extent_list) {
#' MODIStsp(
#' gui = FALSE,
#' opts_file = opts_file,
#' spatmeth = "file",
#' spafile = single_shape,
#' verbose = TRUE,
#' parallel = FALSE
#' )
#' }
#' }
#'
#' # output files are placed in separate folders:
#' outfiles_garda <- list.files(
#' file.path(tempdir(), "MODIStsp/garda_lake/VI_16Days_1Km_v6/NDVI"),
#' full.names = TRUE
#' )
#' outfiles_garda
#' require(raster)
#' if (length(outfiles_garda) > 0) {
#' plot(raster(outfiles_garda[1] ))
#' }
#'
#' outfiles_iseo <- list.files(
#' file.path(tempdir(), "MODIStsp/iseo_lake/VI_16Days_1Km_v6/NDVI"),
#' full.names = TRUE
#' )
#' outfiles_iseo
#' if (length(outfiles_garda) > 0) {
#' plot(raster(outfiles_iseo[1]))
#' }
#'
#' # See also https://docs.ropensci.org/MODIStsp/articles/noninteractive_execution.html
#' }
MODIStsp <- function(...,
gui = TRUE,
out_folder = NULL,
out_folder_mod = NULL,
opts_file = NULL,
selprod = NULL,
prod_version = NULL,
bandsel = NULL,
quality_bandsel = NULL,
indexes_bandsel = NULL,
sensor = NULL,
download_server = NULL,
downloader = NULL,
user = NULL,
password = NULL,
download_range = NULL,
start_date = NULL,
end_date = NULL,
spatmeth = NULL,
start_x = NULL,
end_x = NULL,
start_y = NULL,
end_y = NULL,
bbox = NULL,
spafile = NULL,
out_projsel = NULL,
output_proj = NULL,
out_res_sel = NULL,
out_res = NULL,
resampling = NULL,
reprocess = NULL,
delete_hdf = NULL,
nodata_change = NULL,
scale_val = NULL,
ts_format = NULL,
out_format = NULL,
compress = NULL,
test = NULL,
n_retries = 5,
verbose = TRUE,
parallel = TRUE) {
# Make so that "raster" functions does not automatically add extensions on
# output files. This is automatically reset to TRUE at the end of the session
raster::rasterOptions(setfileext = FALSE)
proc_opts <- NULL
# _________________________________________________________________________
# check arguments ####
# if (is.null(opts_file) & gui == FALSE) {
# stop("You need to provide a valid `.json` options file to run MODIStsp",
# " in non-interactive mode. \n",
# "Please provide a valid \"opts_file\" path or run ",
# "with gui=TRUE to create and save one.")
# }
if (!is.null(opts_file)) {
if (!file.exists(opts_file))
stop("The specified `.json` options file was not found. \n",
"Please provide a valid \"opts_file\" path or run ",
"without specifying one to create and save one.")
}
# __________________________________________________________________________
# Initialize processing ####
# __________________________________________________________________________
# If test mode is selected, select the options file for testing ####
# and set other parameters
if (!is.null(test)) {
gui <- FALSE
message("MODIStsp is running in test mode.")
# read names of available json test files
test_files <- sort(list.files(
path = system.file("testdata", package = "MODIStsp"),
pattern = "^test[0-9]{2}[a-zA-Z]?\\.json$",
full.names = TRUE))
if (is.numeric(test)) {
test <- sprintf("%02d", test)
}
cur_test <- paste0("test", test, ".json")
avail_tests <- basename(test_files)
#nocov start
if (!cur_test %in% avail_tests) {
stop(paste0("The test is not available. These are the currently available tests: ", #nolint
paste(avail_tests, collapse = ", ")))
}
#nocov end
# check that the offline HDF files were unzipped - unzip them if not
tests_hdf_zipped <- list.files(
path = system.file("testdata", package = "MODIStsp"),
pattern = "\\.hdf\\.zip$",
full.names = TRUE
)
for (test_hdf in gsub("\\.zip$", "", tests_hdf_zipped)) {
if (!file.exists(file.path(tempdir(), "MODIStsp/HDFs", basename(test_hdf)))) {
unzip(zipfile = paste0(test_hdf, ".zip"),
files = basename(test_hdf),
exdir = file.path(tempdir(), "MODIStsp/HDFs"),
unzip = "internal")
}
}
# Assign the selected test Option File
opts_file <- list.files(
path = system.file("testdata", package = "MODIStsp"),
pattern = cur_test,
full.names = TRUE)
}
# __________________________________________________________________________
# On GUI execution, ensure that shiny libraries suggested are avauilable ####
if (gui) {
#nocov start
gui_deps <- c("leaflet", "shiny",
"shinydashboard","shinyFiles",
"shinyalert","rappdirs", "shinyjs",
"leafem", "mapedit",
"magrittr")
gui_deps_missing <- !sapply(gui_deps, requireNamespace, quietly = TRUE)
if (sum(gui_deps_missing) > 0) {
stop("You need to install the following Suggested packages to use the MODIStsp GUI.
Please install them with:
install.packages(c(\"leaflet\", \"shiny\",\"shinydashboard\",\"shinyFiles\",
\"shinyalert\", \"rappdirs\",\"shinyjs\",
\"leafem\", \"mapedit\", \"magrittr\"))")
} else {
requireNamespace("leaflet")
requireNamespace("shiny")
requireNamespace("shinydashboard")
requireNamespace("shinyFiles")
requireNamespace("shinyalert")
requireNamespace("shinyjs")
requireNamespace("leafem")
requireNamespace("rappdirs")
requireNamespace("mapedit")
requireNamespace("magrittr")
}
#nocov end
}
gdal_version <- sf::sf_extSoftVersion()[["GDAL"]]
if (verbose) message("GDAL version in use: ", as.character(gdal_version))
# __________________________________________________________________________
# On interactive execution, launch the GUI and wait for user selection. ####
# On non-interactive immediately start processing using the processing
# options contained in the provided `opts_file`
if (gui) {
#nocov start
MODIStsp_GUI()
#nocov end
} else {
if (!is.null(opts_file)) {
proc_opts <- try(jsonlite::read_json(opts_file))
if(inherits(proc_opts, "try-error")) {
stop("Unable to read the provided options file. Please check your ",
"inputs!")
}
if(proc_opts$MODIStspVersion < 2.0) {
stop("Options files saved with MODIStsp versions lower than 2.0
are no longer supported. Please save a new file using MODIStsp
GUI.")
}
#TODO: function to check if file is OK
} else {
# Load default values - to avoid overwriting always the loaded
# parameters with default, we set the defaults here rather than
# in the initialization of the function!
proc_opts <- jsonlite::read_json(system.file("ExtData",
"mstp_defaults.json",
package = "MODIStsp"))
}
# update proc_opts based on arguments passed to the function ----
if(!is.null(selprod)) {proc_opts$selprod <- selprod}
if(!is.null(prod_version)) {proc_opts$prod_version <- prod_version}
if(proc_opts$prod_version=="6") {proc_opts$prod_version <- "006"} # for retrocompatibility
if(!is.null(bandsel)) {proc_opts$bandsel <- bandsel}
if(!is.null(quality_bandsel)) {proc_opts$quality_bandsel <- quality_bandsel}
if(!is.null(indexes_bandsel)) {proc_opts$indexes_bandsel <- indexes_bandsel}
if(!is.null(sensor)) {proc_opts$sensor <- sensor}
#TODO check correctness of platform wrt product
if(!is.null(download_server)) {proc_opts$download_server <- download_server}
if(!is.null(downloader)) {proc_opts$downloader <- downloader}
# TODO replace use_aria use
if(!is.null(user)) {proc_opts$user <- user}
if(!is.null(password)) {proc_opts$password <- password}
if(!is.null(download_range)) {proc_opts$download_range <- download_range}
if(!is.null(start_date)) {proc_opts$start_date <- start_date}
if(!is.null(end_date)) {proc_opts$end_date <- end_date}
if(!is.null(spatmeth)) {proc_opts$spatmeth <- spatmeth}
if(!is.null(out_projsel)) {proc_opts$out_projsel <- out_projsel}
if(!is.null(output_proj)) {proc_opts$output_proj <- output_proj}
if(!is.null(out_res_sel)) {proc_opts$out_res_sel <- out_res_sel}
if(!is.null(out_res)) {proc_opts$out_res <- out_res}
if(!is.null(resampling)) {proc_opts$resampling <- resampling}
if(!is.null(reprocess)) {proc_opts$reprocess <- reprocess}
if(!is.null(delete_hdf)) {proc_opts$delete_hdf <- delete_hdf}
if(!is.null(nodata_change)) {proc_opts$nodata_change <- nodata_change}
if(!is.null(scale_val)) {proc_opts$scale_val <- scale_val}
if(!is.null(out_format)) {proc_opts$out_format <- out_format}
if(!is.null(ts_format)) {proc_opts$ts_format <- ts_format}
if(!is.null(compress)) {proc_opts$compress <- compress}
if(!is.null(out_folder)) {proc_opts$out_folder <- out_folder}
if(!is.null(out_folder_mod)) {proc_opts$out_folder_mod <- out_folder_mod}
if (proc_opts$spatmeth == "tiles") {
if(!is.null(start_x)) {proc_opts$start_x <- start_x}
if(!is.null(end_x)) {proc_opts$end_x <- end_x}
if(!is.null(start_y)) {proc_opts$start_y <- start_y}
if(!is.null(end_y)) {proc_opts$end_y <- end_y}
} else {
if (proc_opts$spatmeth == "file") {
if (!is.null(spafile)) {
bbox <- bbox_from_file(spafile, crs_out = proc_opts$output_proj)
proc_opts$bbox <- as.numeric(bbox)
tiles <- tiles_from_bbox(bbox, proc_opts$output_proj)
proc_opts$start_x <- tiles[1]
proc_opts$start_y <- tiles[3]
proc_opts$end_x <- tiles[2]
proc_opts$end_y <- tiles[4]
}
#TODO update examples and website
} else {
if (proc_opts$spatmeth == "map") {
# TODO how to deal with this?????
} else {
if (proc_opts$spatmeth == "bbox") {
#TODO retrieve tiles from selection
if(!is.null(bbox)) {proc_opts$bbox <- as.numeric(bbox)}
tiles <- tiles_from_bbox(proc_opts$bbox, proc_opts$output_proj)
proc_opts$start_x <- tiles[1]
proc_opts$start_y <- tiles[3]
proc_opts$end_x <- tiles[2]
proc_opts$end_y <- tiles[4]
}
}
}
}
# proc_opts$bbox <- as.numeric(proc_opts$bbox)
if (proc_opts$out_folder == "$modistest") {
warning(paste0(
"Argument out_folder = '$modistest' can no longer be used ",
"due to CRAN policy; using '$tempdir'."
))
proc_opts$out_folder <- "$tempdir"
}
if (proc_opts$out_folder == "$tempdir") {
proc_opts$out_folder <- file.path(tempdir(), "MODIStsp")
}
if (proc_opts$out_folder_mod == "$modistest") {
warning(paste0(
"Argument out_folder_mod = '$modistest' can no longer be used ",
"due to CRAN policy; using '$tempdir'."
))
proc_opts$out_folder_mod <- "$tempdir"
}
if (proc_opts$out_folder_mod == "$tempdir") {
proc_opts$out_folder_mod <- file.path(tempdir(), "MODIStsp/HDFs")
dir.create(dirname(proc_opts$out_folder_mod), showWarnings = FALSE)
dir.create(proc_opts$out_folder_mod, showWarnings = FALSE)
}
if (proc_opts$spatmeth == "file" & !missing(spafile)) {
proc_opts$out_folder <- file.path(
proc_opts$out_folder,
tools::file_path_sans_ext(basename(spafile))
)
}
if (inherits(proc_opts$bbox, "list")) {
proc_opts$bbox <- unlist(proc_opts$bbox)
}
# Create output folders if needed. No recursive to avoid creating big
# folder trees by mistake
dir.create(proc_opts$out_folder,
recursive = FALSE,
showWarnings = FALSE)
dir.create(proc_opts$out_folder_mod,
recursive = FALSE,
showWarnings = FALSE)
check_opts <- check_proc_opts(proc_opts)
MODIStsp_process(proc_opts,
n_retries = n_retries,
verbose = verbose,
parallel = parallel)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dbscan.R
\name{optics}
\alias{optics}
\title{OPTICS}
\usage{
optics(data = NULL, neighbors = NULL, edges = NULL, eps,
minPts = nrow(data) + 1, eps_cl, xi, verbose = getOption("verbose", TRUE))
}
\arguments{
\item{data}{Input data, where examples are columns.}
\item{neighbors}{An adjacency matrix of the type produced by \code{\link{randomProjectionTreeSearch}}}
\item{edges}{A weighted graph of the type produced by \code{\link{buildEdgeMatrix}}.}
\item{eps}{See \code{\link[dbscan]{optics}}.}
\item{minPts}{See \code{\link[dbscan]{optics}}.}
\item{eps_cl}{See \code{\link[dbscan]{optics}}.}
\item{xi}{See \code{\link[dbscan]{optics}}.}
\item{verbose}{Vebosity level.}
}
\value{
An \code{\link[dbscan]{optics}} object.
}
\description{
An implementation of the OPTICS algorithm.
}
\details{
This is a preliminary implementation of a variant of the OPTICS algorithm that attempts
to leverage the \code{largeVis} nearest-neighbor search.
One of \code{neighbors} or \code{edges} must be specified. If \code{edges} is missing,
\code{data} must also be given. If \code{data} is given along with either \code{edges}
or \code{neighbors}, the algorithm will attempt a more thorough search.
}
\note{
Support for dbscan and optics are preliminary, and not fully tested for
correctness.
This is not the original OPTICS algorithm. In particular, the neighbor-search strategy in
OPTICS is not used, in favor of using a pre-calculated neighbor matrix produced incidentally by
`largeVis`.
}
| /man/optics.Rd | no_license | Laurae2/largeVis | R | false | true | 1,566 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dbscan.R
\name{optics}
\alias{optics}
\title{OPTICS}
\usage{
optics(data = NULL, neighbors = NULL, edges = NULL, eps,
minPts = nrow(data) + 1, eps_cl, xi, verbose = getOption("verbose", TRUE))
}
\arguments{
\item{data}{Input data, where examples are columns.}
\item{neighbors}{An adjacency matrix of the type produced by \code{\link{randomProjectionTreeSearch}}}
\item{edges}{A weighted graph of the type produced by \code{\link{buildEdgeMatrix}}.}
\item{eps}{See \code{\link[dbscan]{optics}}.}
\item{minPts}{See \code{\link[dbscan]{optics}}.}
\item{eps_cl}{See \code{\link[dbscan]{optics}}.}
\item{xi}{See \code{\link[dbscan]{optics}}.}
\item{verbose}{Vebosity level.}
}
\value{
An \code{\link[dbscan]{optics}} object.
}
\description{
An implementation of the OPTICS algorithm.
}
\details{
This is a preliminary implementation of a variant of the OPTICS algorithm that attempts
to leverage the \code{largeVis} nearest-neighbor search.
One of \code{neighbors} or \code{edges} must be specified. If \code{edges} is missing,
\code{data} must also be given. If \code{data} is given along with either \code{edges}
or \code{neighbors}, the algorithm will attempt a more thorough search.
}
\note{
Support for dbscan and optics are preliminary, and not fully tested for
correctness.
This is not the original OPTICS algorithm. In particular, the neighbor-search strategy in
OPTICS is not used, in favor of using a pre-calculated neighbor matrix produced incidentally by
`largeVis`.
}
|
##' .. content ..
##'
##' .. content ..
##' @param rawinput the raw input
##' @param sep the separator
##' @return
##' @keywords internal
##' @author
check_input = function(rawinput,sep = ','){
df = try({read.csv(text = rawinput,sep = sep)},silent = TRUE)
if (class(df) == 'try-error'){return('cannot read input')}
if(any(!(c('id','score','decoy','subset') %in% colnames(df))))
{return('missing columns or wrong column names')}
if(!is.numeric(df$score)) {return('Score should be numeric')}
if(any(!(df$decoy ) %in% c(0,1))) {return('Decoy variable should be 0 or 1')}
if(any(!(df$subset ) %in% c(0,1))) {return('Subset variable should be 0 or 1')}
if(!any((df$decoy == 1))) {return('No decoys available')}
if(!any((df$decoy == 0))) {return('No targets available')}
if(!any((df$subset == 1))) {return('No subset PSMs indicated')}
if(!any((df$decoy[df$subset == 1] == 1))) {return('No subset decoys available')}
return('Data format correct')
}
#' plots the theoretical distribution of all components in the PSM distribution.
#'
#' @param H0_mean HO_mean
#' @param H1_mean H1_mean
#' @param H0_sd H0_Sd
#' @param H1_sd H1_sd
#' @param decoy_mean decoy_mean
#' @param decoy_sd decoy_sd
#' @param decoy_large_mean decoy_large_mean
#' @param decoy_large_sd decoy_large_sd
#' @return
##' @keywords internal
#' @import dplyr
#' @import ggplot2
#' @examples 1
plot_theo_dist = function(H0_mean=2.75, H1_mean=3.31,H0_sd=.13,H1_sd=.28,
decoy_mean = H0_mean, decoy_sd = H0_sd,
decoy_extra_mean = H0_mean, decoy_extra_sd = H0_sd){
## make grid of scores
d = data_frame(score = seq(1,5,.01))
## calculate theoretical density for eacht dist
d = bind_rows(
mutate(d,dens = dnorm(score,H1_mean,H1_sd),
PSMs = 'correct subset')
,
mutate(d,dens = dnorm(score,H0_mean,H0_sd),
PSMs = 'incorrect subset')
,
mutate(d,dens = dnorm(score,decoy_mean,decoy_sd),
PSMs = 'subset decoy')
,
mutate(d,dens = dnorm(score,decoy_extra_mean,decoy_extra_sd),
PSMs = 'extra decoy')
)
d = mutate(d,PSMs = factor(PSMs,levels = unique(d$PSMs)))
p1 = ggplot(d,aes(score,dens,col = PSMs,size = PSMs,linetype = PSMs)) + geom_line() +
labs(x = 'Score',y = 'Density') +
theme(axis.title = element_text(size = rel(1.2)), axis.title.y = element_text(angle = 0),
legend.position = 'top') +
scale_size_manual(values=c(4,4,1,1))+
scale_linetype_manual(values=c(1,1,1,2)) +
scale_color_manual(values = c('green','red','orange','blue'))
print(p1)
list(data = d,plot = p1)
}
##' server
##'
##' @param input input
##' @param output output
##' @param session session
##' @return
##' @author
##' @keywords internal
server = function(input, output, session) {
observe({cat(input$tabs, '\n')})
## logic data input tab
#######################
## read data from given path
rawfiledata = reactive({readLines(req(input$file1$datapath))})
# ## or read example data
rawexamplefiledata = reactive({
req(input$action)
readLines(system.file("extdata", "cytoplasm.csv", package = "saas"))})
observe({updateTextInput(session,inputId = 'rawinput',value = paste0(rawfiledata(),collapse = '\n'))})
observe({updateTextInput(session,inputId = 'rawinput',value = paste0(rawexamplefiledata(),collapse = '\n'))})
rawdata = reactive({req(input$rawinput)})
generate_errormessage = reactive({check_input(rawdata())})
output$errormessage = reactive({generate_errormessage()})
df = reactive({req(generate_errormessage() == 'Data format correct')
read.csv(text = rawdata(),sep = ',')})
## logic calculation tab
########################
df_calc = reactive({calculate_fdr(df(), score_higher = input$scorehigher)})
output$contents <- renderDataTable({
mutate_at(req(df_calc()),
vars(score,pi_0_cons, FDR, FDR_BH, FDR_stable),
funs(round(.*1000)/1000))
}, options = list(
lengthMenu = c(10, 20, 100, 1000),
pageLength = 20
))
output$downloadData <- downloadHandler(
filename = function() {
paste(input$file1$datapath, '_fdr.csv', sep = '')
},
content = function(file) {
write.csv(df_calc(), file)
})
## logic diagnostic tab
#######################
output$plot1 = renderPlot({
plot_diag(df())$all
}, width = 940,height = 410)
## logic simulation tab
#######################
plotinput = reactiveValues()
check_min_val = function(par,val){
observe({
plotinput[[par]] = input[[par]]
if (is.na(input[[par]]) |!(input[[par]] > 0)) {
updateTextInput(session,inputId = par,value = val)
plotinput[[par]] = val
}})
}
## check for impossible values in input
check_min_val("subset_n",1)
check_min_val("decoy_n",0)
check_min_val("decoy_extra_n",0)
check_min_val("H1_sd",0)
check_min_val("H0_sd",0)
check_min_val("decoy_sd",0)
check_min_val("decoy_extra_sd",0)
observeEvent((input$make_subset_action),{
par = simulate_subset(input$subset_n,input$subset_pi0)
updateTextInput(session,inputId = 'H1_n',value = par$H1_n)
updateTextInput(session,inputId = 'H0_n',value = par$H0_n)
updateTextInput(session,inputId = 'decoy_n',value = par$decoy_n)
})
## check if decoy dist are the same and change value of respective boxes
observe({
if(input$check_subset_decoys_same){
updateTextInput(session,inputId = 'decoy_mean',value = input$H0_mean)
plotinput$decoy_mean = input$H0_mean
updateTextInput(session,inputId = 'decoy_sd',value = input$H0_sd)
}
if(input$check_extra_decoys_same){
updateTextInput(session,inputId = 'decoy_extra_mean',value = input$decoy_mean)
plotinput$decoy_extra_mean = input$decoy_mean
updateTextInput(session,inputId = 'decoy_extra_sd',value = input$decoy_sd)
}
})
## check if there are enough PSMs for diagnostics
#enough_targets_decoys = reactive({input$H1_n + input$H0_n) > 0) & (input$decoy_n > 0)})
#output$not_enough_decoys_targets = reactive({ifelse(enough_targets_decoys,'',
# 'You need at least 1 decoy or target to visualize the diagnostic plots!')})
output$plot_theo = renderPlot({
plot_theo_dist(
H0_mean = input$H0_mean,
H1_mean = input$H1_mean,
H0_sd = plotinput$H0_sd,
H1_sd = plotinput$H1_sd,
decoy_mean = plotinput$decoy_mean,
decoy_sd = plotinput$decoy_sd,
decoy_extra_mean = plotinput$decoy_extra_mean,
decoy_extra_sd = plotinput$decoy_extra_sd)$plot
}, width = 400, height = 300)
output$plot_sim_diag = renderPlot({
req(input$action_simulate)
isolate({
decoy_n = input$decoy_n
if (input$check_also_decoys){
decoy_n =simulate_subset(input$subset_n,input$pi0)$decoy_n
updateTextInput(session,inputId = 'decoy_n',value = decoy_n)
}
target_n = input$subset_n - decoy_n
pi0 = rpi0(1, target_n, decoy_n)
H0_n = round(pi0 * target_n)
H1_n = target_n - H0_n
plotdata = sample_dataset(
H1_n = H1_n,
decoy_n = decoy_n,
decoy_large_n = input$decoy_extra_n,
H0_n = H0_n,
H0_mean = input$H0_mean,
H1_mean = input$H1_mean,
H0_sd = input$H0_sd,
H1_sd = input$H1_sd,
decoy_mean = input$decoy_mean,
decoy_sd = input$decoy_sd,
decoy_large_mean = input$decoy_extra_mean,
decoy_large_sd = input$decoy_extra_sd)
plot_diag(plotdata)$all
})
}, width = 940,height = 410)
}
##' UI shiny app
##'
##' @return
##' @author
##' @keywords internal
ui = function() fluidPage(
navbarPage(
header = list('Source code and offline application: ',
tags$a(href = 'https://github.com/compomics/search-all-assess-subset/',
'https://github.com/compomics/search-all-assess-subset/'), tags$br(),
## 'Details on FDR procedure:',
## tags$a(href = 'https://git.io/vDdWq',
## 'https://git.io/vDdWq'),tags$br(),
## 'Bioarxiv preprint: ',
## tags$a(href = 'https://doi.org/10.1101/094581',
## 'https://doi.org/10.1101/094581')),
'Publication: ',
tags$a(href = 'https://doi.org/10.1038/nmeth.4338',
'Sticker et al. Nature Methods 14, 643–644 (2017) doi:10.1038/nmeth.4338'
)),
title = 'Search All, Assess Subset',
tabPanel('Data input',
sidebarLayout(
sidebarPanel(width = 7,
HTML(markdownToHTML(text =
'
This tool demonstrates the search-all-asses-subset method to control
the False Discovery Rate (FDR) when you are only interested in a subset
of the identified PSMs from a shotgun proteomics experiment.
The workflow is as follows:
1. search spectra against all expected proteins
2. Remove PSMs from irrelevant proteins
3. Calculate FDR
You can load the data from step **1.** into this webtool to perform
step **2.** and **3.**'
)), hr(),
fileInput ('file1',
'Choose Text File',
accept = c('text/csv',
'text/comma-separated-values,text/plain',
'.csv')
),
actionButton("action", label = "Load example data"),
checkboxInput("scorehigher", "Are higher scores better?", TRUE),
HTML(markdownToHTML(text =
'
Use the **Browse...** button above to load a CSV file from your computer
or paste the data in the **text area** to the right.
Adhere to the following format:
* Every Row is a unique PSM
* Every row contains:
+ **id**: Can be any text or number
+ **score**: Score given to the PSM, higher scores are better
+ **decoy**: FALSE or TRUE; TRUE indicates that the PSM matches a decoy peptide sequence
+ **subset**: FALSE or TRUE: TRUE indicates that the PSM matches a subset peptide sequence
* The first row are the column names
* All columns should be comma separated
Note that the row with extra set of decoys (**decoy** = TRUE and **subset** = FALSE) are not necessarily the non-subset decoys from the same mass spectrometry run but can be chosen by the user.
Example input:
<pre>
id,score,decoy,subset
1,7.67,TRUE,FALSE
2,10.99,TRUE,TRUE
3,75.10,FALSE,FALSE
4,73.83,FALSE,TRUE
</pre>
**Warning:** use this tool only with PSMs from a competitive target-decoy search!
The minimal required input are PSMs from subset targets and decoys (row 2 and 4 in example input).
Additional decoy PSMs (row 1) are used for a more stable FDR calculation.
Non subset targets (row 3) are ignored in the analysis.
'))
),
mainPanel(width = 5,h3(textOutput('errormessage',container = span)),
tags$textarea(id = 'rawinput',label = '',
rows = '20',cols = 40))
)),
tabPanel('Calculate FDR',
sidebarLayout(
sidebarPanel(width = 12,
fluidRow(column(width = 10, HTML(markdownToHTML(text = '
Download the results to your computer:
'))),
column(width = 2, checkboxInput("res_more_info", label = "More info")),
downloadButton('downloadData', 'Download'),
column(width = 12, conditionalPanel(condition = "input.res_more_info == true",
withMathJax(),
HTML(markdownToHTML(text =
'The following columns were calculated:
* **pi_0_cons**: A conservative estimation of $\\pi_0$. (pi_0_cons = (#decoys+1) / (#targets+1})
* **FDR**: The estimated FDR at this score cutoff for subset PSMs.
Calculated according the classical TDA method. Does not work well small subsets.
Missing for non-subset or decoy PSMs.
* **FDR_stable**: The estimated stable FDR at this score cutoff for subset PSMs.
This FDR is estimated from the complete decoy set and *pi_0_cons*.
This method is more stable for smaller subsets and will for large subset be close to the **FDR** estimates.
Missing for non-subset or decoy PSMs.
Please check the diagnostics on the next tab.
* **FDR_BH**: The estimated stable FDR at this score cutoff for subset PSMs.
This FDR is estimated from the complete decoy set and according the Benjamini-Hochberg FDR procedure ($\\pi_0$ set to 1).
This FDR estimate is more conservative then **FDR** and **FDR_BH**.
Use this method when you have a large decoy set but no decoy information on the subset PSMs (eg. when the search engine does not return the decoy protein ids).
Please check the diagnostics on the next tab.
Missing for non-subset or decoy PSMs.
')))))),
mainPanel(width = 12,dataTableOutput('contents'))
)),
tabPanel('Check diagnostic plots',
sidebarLayout(
sidebarPanel(width = 12,
fluidRow(column(width = 10, HTML(markdownToHTML(text = '
These are diagnostic plots to evaluate the quality of the decoy set and the uncertainty in the estimated fraction of incorrect target PSMs (pi0).
This allows an informed choice on the use of the stable all-sub FDR estimator and the large decoy set.'))),
column(width = 2, checkboxInput("diag_more_info", label = "More info")),
column(width = 12, conditionalPanel(condition = "input.diag_more_info == true",
HTML(markdownToHTML(text = '
**Panel a** shows the posterior distribution of pi_0 given the observed number of target and decoy PSMs in the subset.
The vertical line indicates the conservative pi_0 estimate used in the calculations.
At very high pi_0 uncertainty (broad peak), you can opt to use the BH procedure to minimize sample to sample variability (see **FDR_BH** in the output).
However, this will come at the expense of too conservative PSM lists.
Our improved TDA for subsets relies on the assumption that incorrect subset PSMs and the complete set of decoys have the same distribution.
This distributional assumption can be verified through a PP-plot where the empirical Cumulative Distribution Function (eCDF) of the decoys is plotted against the eCDF of the subset target PSMs.
The PP-plots in **panel b - d** display the target subset PSMs plotted against all decoy PSMs from the complete search, the decoy subset PSMs plotted against all decoy PSMs from the complete search, and the target subset PSMs plotted against the decoy PSMs from the complete search, respectively.
The full line in panel **b** and **d** indicates a line with a slope of pi_0.
The full line in panel **c** indicates the identity line.
The first part of the plot in **b** and **d** should be linear with a slope that equals pi_0.
The second part of the plot will deviate from this line towards higher percentiles and will ultimately become vertical (decoy percentile = 1).
If we see this profile in panel **b**, we have a good indication that the set of decoys from the complete search is representative for the mixture component for incorrect PSMs of the target mixture distribution.
When there is high uncertainty on pi_0 as indicated by **a**, then the linear pattern in the data points might deviate from the drawn solid line, but should still be more or less linear.
Deviations from this pattern might be subtle, therefore we provide the PP plots in **c** and **d** to support the conclusion drawn from panel **b**.
The PP-plot in panel **c** shows the subset decoy PSMs plotted against all decoy PSMs.
The whole plot should follow the identity line, indicating that the complete set of decoys is a good representation of the subset decoys.
To verify that the subset decoys (and thus also the complete set of decoys) are representative for the mixture component for incorrect PSMs of the target mixture distribution, we look at the PP-plot of the subset decoys against the subset targets in panel **d**.
The profile should look as described for panel **b**.
If the profile matches in panel **d** but does not for panel **b**, then we suggest to not use the extra decoy set and use only the subset decoys for FDR estimation.
When the profile does not match in panel **d**, the subset decoys might not be representative for incorrect PSMs. This can indicate that pi_0 is estimated incorrectly, since this is based on the subset PSM scores.
In this case, the first part of the plot in panel **d** can deviate from the (incorrect) pi_0 slope line.
But if this first part is linear, it still indicates that the extra set of decoys is representative for the mixture component of incorrect target PSMs. Since pi_0 is unknown we set it to 1 (see **FDR_BH** in the output).
When you are not sure how the diagnostic plots should look like, you can simulate your own data under various (erratic) settings in the simulation tab.
'))))))
,
mainPanel(width = 12,plotOutput('plot1')
)))
,
tabPanel('simulation',
sidebarLayout(
sidebarPanel(width = 12,
fluidRow(column(width = 10,HTML(markdownToHTML(text = '
In this tab, you can simulate your own data and look at examples of diagnostic plots.'))),
column(width = 2, checkboxInput("simulation_more_info", label = "More info")),
column(width = 12, conditionalPanel(condition = "input.simulation_more_info == true",
HTML(markdownToHTML(text = '
Random datasets are generated based on a observed number of target and decoy subset PSMs.
Optionally, you can also generate a random number of subset decoy PSMs based on the observed number of subset target PSMs and a theoretic pi0 that you can choose.
In the default setting, the decoy distribution is equal to the incorrect subset target distribution.
This means that the diagnostic plots from the simulated datasets are exemplary for experimental settings where the assumptions are grounded and you can safely use the decoys for FDR estimation.
Optionally, you can change the mean and standard deviation of the subset decoys and/or the large set of extra decoys, violating the assumption that they are representative for the incorrect subset targets.
Plots generated by these simulated datasets are examples of diagnostic plots when the assumptions are violated an you should not use these decoys for FDR estimation.
For more information on how to interpret the generated diagnostic plots, please read the information in the **Check diagnostic plots** tab.'))))))
,
mainPanel(width = 12,
column(6,
fluidRow(column(6, '# subset PSMs'),
column(6, tags$input(id = 'subset_n',type = "number",
value = 200,min = 1,step = 10))
),
fluidRow(column(6, '# subset decoy PSMs'),
column(6, tags$input(id = 'decoy_n',type = "number",
value = 20,min = 0,step = 10))),
fluidRow(column(6, '# extra decoys'),
column(6, tags$input(id = 'decoy_extra_n',type = "number",
value = 2000,min = 0,step = 10))))
,
## column(1,actionButton("action_simulate", label = "GO")),
column(6,checkboxInput("check_also_decoys", label = "Simulate also decoys"),
conditionalPanel(
condition = "input.check_also_decoys == true",
column(4, 'with pi0:'),
column(7, tags$input(id = 'pi0',type = "number",
value = .2,min = 0,max = 1,step = .1))
)
)
,
column(12,br(),
column(12,
HTML(markdownToHTML(text ='__Subset target mixture distribution__')))
,
column(6, 'incorrect targets',
wellPanel(
fluidRow(column(6, numericInput('H0_mean', 'mean', 2.75,step = .1)),
column(6, numericInput('H0_sd', 'sd', 0.13,step = .1)))))
,
column(6, 'correct targets',
wellPanel(
fluidRow(column(6, numericInput('H1_mean', 'mean', 3.31,step = .1)),
column(6, numericInput('H1_sd', 'sd', 0.28,step = .1))))))
,
column(6,
HTML(markdownToHTML(text ='__Subset decoys distribution__')),
checkboxInput('check_subset_decoys_same','Same as incorrect target PSMs?',value = TRUE)
,
conditionalPanel(
condition = "input.check_subset_decoys_same == false",
wellPanel(
fluidRow(column(6, numericInput('decoy_mean', 'mean', 0,step = .1)),
column(6, numericInput('decoy_sd', 'sd', 0,step = .1))))
)
,
HTML(markdownToHTML(text ='__Extra decoys distribution__')),
checkboxInput('check_extra_decoys_same','Same as subset decoy PSMs?',value = TRUE)
,
conditionalPanel(condition = "input.check_extra_decoys_same == false",
wellPanel(
fluidRow(column(6, numericInput('decoy_extra_mean', 'mean', 0,step = .1)),
column(6, numericInput('decoy_extra_sd', 'sd', 0,step = .1))))
),
actionButton("action_simulate", label = "SIMULATE",style = 'font-size:150%'))
,
column(6,plotOutput('plot_theo'))
,
fluidRow(column(12,plotOutput('plot_sim_diag')))
)
))
))
#' Launches the GUI version of saas.
#'
#' To easily launch the GUI outside an R session (eg. on a server),
#' you can run R -e "library(saas);saas_gui()" from the terminal (on linux/mac).
#'
#'
#' @param options See help of shiny::shinyApp for more details on available options
#' @return
#' @export
#' @import shiny
#' @import markdown
#' @examples 1
saas_gui = function(options = list(port = 3320, host = "0.0.0.0"))
shinyApp(ui = ui(), server = server,options = options)
| /R/app.R | no_license | ab604/search-all-assess-subset | R | false | false | 22,950 | r | ##' .. content ..
##'
##' .. content ..
##' @param rawinput the raw input
##' @param sep the separator
##' @return
##' @keywords internal
##' @author
check_input = function(rawinput,sep = ','){
df = try({read.csv(text = rawinput,sep = sep)},silent = TRUE)
if (class(df) == 'try-error'){return('cannot read input')}
if(any(!(c('id','score','decoy','subset') %in% colnames(df))))
{return('missing columns or wrong column names')}
if(!is.numeric(df$score)) {return('Score should be numeric')}
if(any(!(df$decoy ) %in% c(0,1))) {return('Decoy variable should be 0 or 1')}
if(any(!(df$subset ) %in% c(0,1))) {return('Subset variable should be 0 or 1')}
if(!any((df$decoy == 1))) {return('No decoys available')}
if(!any((df$decoy == 0))) {return('No targets available')}
if(!any((df$subset == 1))) {return('No subset PSMs indicated')}
if(!any((df$decoy[df$subset == 1] == 1))) {return('No subset decoys available')}
return('Data format correct')
}
#' plots the theoretical distribution of all components in the PSM distribution.
#'
#' @param H0_mean HO_mean
#' @param H1_mean H1_mean
#' @param H0_sd H0_Sd
#' @param H1_sd H1_sd
#' @param decoy_mean decoy_mean
#' @param decoy_sd decoy_sd
#' @param decoy_large_mean decoy_large_mean
#' @param decoy_large_sd decoy_large_sd
#' @return
##' @keywords internal
#' @import dplyr
#' @import ggplot2
#' @examples 1
plot_theo_dist = function(H0_mean=2.75, H1_mean=3.31,H0_sd=.13,H1_sd=.28,
decoy_mean = H0_mean, decoy_sd = H0_sd,
decoy_extra_mean = H0_mean, decoy_extra_sd = H0_sd){
## make grid of scores
d = data_frame(score = seq(1,5,.01))
## calculate theoretical density for eacht dist
d = bind_rows(
mutate(d,dens = dnorm(score,H1_mean,H1_sd),
PSMs = 'correct subset')
,
mutate(d,dens = dnorm(score,H0_mean,H0_sd),
PSMs = 'incorrect subset')
,
mutate(d,dens = dnorm(score,decoy_mean,decoy_sd),
PSMs = 'subset decoy')
,
mutate(d,dens = dnorm(score,decoy_extra_mean,decoy_extra_sd),
PSMs = 'extra decoy')
)
d = mutate(d,PSMs = factor(PSMs,levels = unique(d$PSMs)))
p1 = ggplot(d,aes(score,dens,col = PSMs,size = PSMs,linetype = PSMs)) + geom_line() +
labs(x = 'Score',y = 'Density') +
theme(axis.title = element_text(size = rel(1.2)), axis.title.y = element_text(angle = 0),
legend.position = 'top') +
scale_size_manual(values=c(4,4,1,1))+
scale_linetype_manual(values=c(1,1,1,2)) +
scale_color_manual(values = c('green','red','orange','blue'))
print(p1)
list(data = d,plot = p1)
}
##' server
##'
##' @param input input
##' @param output output
##' @param session session
##' @return
##' @author
##' @keywords internal
server = function(input, output, session) {
observe({cat(input$tabs, '\n')})
## logic data input tab
#######################
## read data from given path
rawfiledata = reactive({readLines(req(input$file1$datapath))})
# ## or read example data
rawexamplefiledata = reactive({
req(input$action)
readLines(system.file("extdata", "cytoplasm.csv", package = "saas"))})
observe({updateTextInput(session,inputId = 'rawinput',value = paste0(rawfiledata(),collapse = '\n'))})
observe({updateTextInput(session,inputId = 'rawinput',value = paste0(rawexamplefiledata(),collapse = '\n'))})
rawdata = reactive({req(input$rawinput)})
generate_errormessage = reactive({check_input(rawdata())})
output$errormessage = reactive({generate_errormessage()})
df = reactive({req(generate_errormessage() == 'Data format correct')
read.csv(text = rawdata(),sep = ',')})
## logic calculation tab
########################
df_calc = reactive({calculate_fdr(df(), score_higher = input$scorehigher)})
output$contents <- renderDataTable({
mutate_at(req(df_calc()),
vars(score,pi_0_cons, FDR, FDR_BH, FDR_stable),
funs(round(.*1000)/1000))
}, options = list(
lengthMenu = c(10, 20, 100, 1000),
pageLength = 20
))
output$downloadData <- downloadHandler(
filename = function() {
paste(input$file1$datapath, '_fdr.csv', sep = '')
},
content = function(file) {
write.csv(df_calc(), file)
})
## logic diagnostic tab
#######################
output$plot1 = renderPlot({
plot_diag(df())$all
}, width = 940,height = 410)
## logic simulation tab
#######################
plotinput = reactiveValues()
check_min_val = function(par,val){
observe({
plotinput[[par]] = input[[par]]
if (is.na(input[[par]]) |!(input[[par]] > 0)) {
updateTextInput(session,inputId = par,value = val)
plotinput[[par]] = val
}})
}
## check for impossible values in input
check_min_val("subset_n",1)
check_min_val("decoy_n",0)
check_min_val("decoy_extra_n",0)
check_min_val("H1_sd",0)
check_min_val("H0_sd",0)
check_min_val("decoy_sd",0)
check_min_val("decoy_extra_sd",0)
observeEvent((input$make_subset_action),{
par = simulate_subset(input$subset_n,input$subset_pi0)
updateTextInput(session,inputId = 'H1_n',value = par$H1_n)
updateTextInput(session,inputId = 'H0_n',value = par$H0_n)
updateTextInput(session,inputId = 'decoy_n',value = par$decoy_n)
})
## check if decoy dist are the same and change value of respective boxes
observe({
if(input$check_subset_decoys_same){
updateTextInput(session,inputId = 'decoy_mean',value = input$H0_mean)
plotinput$decoy_mean = input$H0_mean
updateTextInput(session,inputId = 'decoy_sd',value = input$H0_sd)
}
if(input$check_extra_decoys_same){
updateTextInput(session,inputId = 'decoy_extra_mean',value = input$decoy_mean)
plotinput$decoy_extra_mean = input$decoy_mean
updateTextInput(session,inputId = 'decoy_extra_sd',value = input$decoy_sd)
}
})
## check if there are enough PSMs for diagnostics
#enough_targets_decoys = reactive({input$H1_n + input$H0_n) > 0) & (input$decoy_n > 0)})
#output$not_enough_decoys_targets = reactive({ifelse(enough_targets_decoys,'',
# 'You need at least 1 decoy or target to visualize the diagnostic plots!')})
output$plot_theo = renderPlot({
plot_theo_dist(
H0_mean = input$H0_mean,
H1_mean = input$H1_mean,
H0_sd = plotinput$H0_sd,
H1_sd = plotinput$H1_sd,
decoy_mean = plotinput$decoy_mean,
decoy_sd = plotinput$decoy_sd,
decoy_extra_mean = plotinput$decoy_extra_mean,
decoy_extra_sd = plotinput$decoy_extra_sd)$plot
}, width = 400, height = 300)
output$plot_sim_diag = renderPlot({
req(input$action_simulate)
isolate({
decoy_n = input$decoy_n
if (input$check_also_decoys){
decoy_n =simulate_subset(input$subset_n,input$pi0)$decoy_n
updateTextInput(session,inputId = 'decoy_n',value = decoy_n)
}
target_n = input$subset_n - decoy_n
pi0 = rpi0(1, target_n, decoy_n)
H0_n = round(pi0 * target_n)
H1_n = target_n - H0_n
plotdata = sample_dataset(
H1_n = H1_n,
decoy_n = decoy_n,
decoy_large_n = input$decoy_extra_n,
H0_n = H0_n,
H0_mean = input$H0_mean,
H1_mean = input$H1_mean,
H0_sd = input$H0_sd,
H1_sd = input$H1_sd,
decoy_mean = input$decoy_mean,
decoy_sd = input$decoy_sd,
decoy_large_mean = input$decoy_extra_mean,
decoy_large_sd = input$decoy_extra_sd)
plot_diag(plotdata)$all
})
}, width = 940,height = 410)
}
##' UI shiny app
##'
##' @return
##' @author
##' @keywords internal
ui = function() fluidPage(
navbarPage(
header = list('Source code and offline application: ',
tags$a(href = 'https://github.com/compomics/search-all-assess-subset/',
'https://github.com/compomics/search-all-assess-subset/'), tags$br(),
## 'Details on FDR procedure:',
## tags$a(href = 'https://git.io/vDdWq',
## 'https://git.io/vDdWq'),tags$br(),
## 'Bioarxiv preprint: ',
## tags$a(href = 'https://doi.org/10.1101/094581',
## 'https://doi.org/10.1101/094581')),
'Publication: ',
tags$a(href = 'https://doi.org/10.1038/nmeth.4338',
'Sticker et al. Nature Methods 14, 643–644 (2017) doi:10.1038/nmeth.4338'
)),
title = 'Search All, Assess Subset',
tabPanel('Data input',
sidebarLayout(
sidebarPanel(width = 7,
HTML(markdownToHTML(text =
'
This tool demonstrates the search-all-asses-subset method to control
the False Discovery Rate (FDR) when you are only interested in a subset
of the identified PSMs from a shotgun proteomics experiment.
The workflow is as follows:
1. search spectra against all expected proteins
2. Remove PSMs from irrelevant proteins
3. Calculate FDR
You can load the data from step **1.** into this webtool to perform
step **2.** and **3.**'
)), hr(),
fileInput ('file1',
'Choose Text File',
accept = c('text/csv',
'text/comma-separated-values,text/plain',
'.csv')
),
actionButton("action", label = "Load example data"),
checkboxInput("scorehigher", "Are higher scores better?", TRUE),
HTML(markdownToHTML(text =
'
Use the **Browse...** button above to load a CSV file from your computer
or paste the data in the **text area** to the right.
Adhere to the following format:
* Every Row is a unique PSM
* Every row contains:
+ **id**: Can be any text or number
+ **score**: Score given to the PSM, higher scores are better
+ **decoy**: FALSE or TRUE; TRUE indicates that the PSM matches a decoy peptide sequence
+ **subset**: FALSE or TRUE: TRUE indicates that the PSM matches a subset peptide sequence
* The first row are the column names
* All columns should be comma separated
Note that the row with extra set of decoys (**decoy** = TRUE and **subset** = FALSE) are not necessarily the non-subset decoys from the same mass spectrometry run but can be chosen by the user.
Example input:
<pre>
id,score,decoy,subset
1,7.67,TRUE,FALSE
2,10.99,TRUE,TRUE
3,75.10,FALSE,FALSE
4,73.83,FALSE,TRUE
</pre>
**Warning:** use this tool only with PSMs from a competitive target-decoy search!
The minimal required input are PSMs from subset targets and decoys (row 2 and 4 in example input).
Additional decoy PSMs (row 1) are used for a more stable FDR calculation.
Non subset targets (row 3) are ignored in the analysis.
'))
),
mainPanel(width = 5,h3(textOutput('errormessage',container = span)),
tags$textarea(id = 'rawinput',label = '',
rows = '20',cols = 40))
)),
tabPanel('Calculate FDR',
sidebarLayout(
sidebarPanel(width = 12,
fluidRow(column(width = 10, HTML(markdownToHTML(text = '
Download the results to your computer:
'))),
column(width = 2, checkboxInput("res_more_info", label = "More info")),
downloadButton('downloadData', 'Download'),
column(width = 12, conditionalPanel(condition = "input.res_more_info == true",
withMathJax(),
HTML(markdownToHTML(text =
'The following columns were calculated:
* **pi_0_cons**: A conservative estimation of $\\pi_0$. (pi_0_cons = (#decoys+1) / (#targets+1})
* **FDR**: The estimated FDR at this score cutoff for subset PSMs.
Calculated according the classical TDA method. Does not work well small subsets.
Missing for non-subset or decoy PSMs.
* **FDR_stable**: The estimated stable FDR at this score cutoff for subset PSMs.
This FDR is estimated from the complete decoy set and *pi_0_cons*.
This method is more stable for smaller subsets and will for large subset be close to the **FDR** estimates.
Missing for non-subset or decoy PSMs.
Please check the diagnostics on the next tab.
* **FDR_BH**: The estimated stable FDR at this score cutoff for subset PSMs.
This FDR is estimated from the complete decoy set and according the Benjamini-Hochberg FDR procedure ($\\pi_0$ set to 1).
This FDR estimate is more conservative then **FDR** and **FDR_BH**.
Use this method when you have a large decoy set but no decoy information on the subset PSMs (eg. when the search engine does not return the decoy protein ids).
Please check the diagnostics on the next tab.
Missing for non-subset or decoy PSMs.
')))))),
mainPanel(width = 12,dataTableOutput('contents'))
)),
tabPanel('Check diagnostic plots',
sidebarLayout(
sidebarPanel(width = 12,
fluidRow(column(width = 10, HTML(markdownToHTML(text = '
These are diagnostic plots to evaluate the quality of the decoy set and the uncertainty in the estimated fraction of incorrect target PSMs (pi0).
This allows an informed choice on the use of the stable all-sub FDR estimator and the large decoy set.'))),
column(width = 2, checkboxInput("diag_more_info", label = "More info")),
column(width = 12, conditionalPanel(condition = "input.diag_more_info == true",
HTML(markdownToHTML(text = '
**Panel a** shows the posterior distribution of pi_0 given the observed number of target and decoy PSMs in the subset.
The vertical line indicates the conservative pi_0 estimate used in the calculations.
At very high pi_0 uncertainty (broad peak), you can opt to use the BH procedure to minimize sample to sample variability (see **FDR_BH** in the output).
However, this will come at the expense of too conservative PSM lists.
Our improved TDA for subsets relies on the assumption that incorrect subset PSMs and the complete set of decoys have the same distribution.
This distributional assumption can be verified through a PP-plot where the empirical Cumulative Distribution Function (eCDF) of the decoys is plotted against the eCDF of the subset target PSMs.
The PP-plots in **panel b - d** display the target subset PSMs plotted against all decoy PSMs from the complete search, the decoy subset PSMs plotted against all decoy PSMs from the complete search, and the target subset PSMs plotted against the decoy PSMs from the complete search, respectively.
The full line in panel **b** and **d** indicates a line with a slope of pi_0.
The full line in panel **c** indicates the identity line.
The first part of the plot in **b** and **d** should be linear with a slope that equals pi_0.
The second part of the plot will deviate from this line towards higher percentiles and will ultimately become vertical (decoy percentile = 1).
If we see this profile in panel **b**, we have a good indication that the set of decoys from the complete search is representative for the mixture component for incorrect PSMs of the target mixture distribution.
When there is high uncertainty on pi_0 as indicated by **a**, then the linear pattern in the data points might deviate from the drawn solid line, but should still be more or less linear.
Deviations from this pattern might be subtle, therefore we provide the PP plots in **c** and **d** to support the conclusion drawn from panel **b**.
The PP-plot in panel **c** shows the subset decoy PSMs plotted against all decoy PSMs.
The whole plot should follow the identity line, indicating that the complete set of decoys is a good representation of the subset decoys.
To verify that the subset decoys (and thus also the complete set of decoys) are representative for the mixture component for incorrect PSMs of the target mixture distribution, we look at the PP-plot of the subset decoys against the subset targets in panel **d**.
The profile should look as described for panel **b**.
If the profile matches in panel **d** but does not for panel **b**, then we suggest to not use the extra decoy set and use only the subset decoys for FDR estimation.
When the profile does not match in panel **d**, the subset decoys might not be representative for incorrect PSMs. This can indicate that pi_0 is estimated incorrectly, since this is based on the subset PSM scores.
In this case, the first part of the plot in panel **d** can deviate from the (incorrect) pi_0 slope line.
But if this first part is linear, it still indicates that the extra set of decoys is representative for the mixture component of incorrect target PSMs. Since pi_0 is unknown we set it to 1 (see **FDR_BH** in the output).
When you are not sure how the diagnostic plots should look like, you can simulate your own data under various (erratic) settings in the simulation tab.
'))))))
,
mainPanel(width = 12,plotOutput('plot1')
)))
,
tabPanel('simulation',
sidebarLayout(
sidebarPanel(width = 12,
fluidRow(column(width = 10,HTML(markdownToHTML(text = '
In this tab, you can simulate your own data and look at examples of diagnostic plots.'))),
column(width = 2, checkboxInput("simulation_more_info", label = "More info")),
column(width = 12, conditionalPanel(condition = "input.simulation_more_info == true",
HTML(markdownToHTML(text = '
Random datasets are generated based on a observed number of target and decoy subset PSMs.
Optionally, you can also generate a random number of subset decoy PSMs based on the observed number of subset target PSMs and a theoretic pi0 that you can choose.
In the default setting, the decoy distribution is equal to the incorrect subset target distribution.
This means that the diagnostic plots from the simulated datasets are exemplary for experimental settings where the assumptions are grounded and you can safely use the decoys for FDR estimation.
Optionally, you can change the mean and standard deviation of the subset decoys and/or the large set of extra decoys, violating the assumption that they are representative for the incorrect subset targets.
Plots generated by these simulated datasets are examples of diagnostic plots when the assumptions are violated an you should not use these decoys for FDR estimation.
For more information on how to interpret the generated diagnostic plots, please read the information in the **Check diagnostic plots** tab.'))))))
,
mainPanel(width = 12,
column(6,
fluidRow(column(6, '# subset PSMs'),
column(6, tags$input(id = 'subset_n',type = "number",
value = 200,min = 1,step = 10))
),
fluidRow(column(6, '# subset decoy PSMs'),
column(6, tags$input(id = 'decoy_n',type = "number",
value = 20,min = 0,step = 10))),
fluidRow(column(6, '# extra decoys'),
column(6, tags$input(id = 'decoy_extra_n',type = "number",
value = 2000,min = 0,step = 10))))
,
## column(1,actionButton("action_simulate", label = "GO")),
column(6,checkboxInput("check_also_decoys", label = "Simulate also decoys"),
conditionalPanel(
condition = "input.check_also_decoys == true",
column(4, 'with pi0:'),
column(7, tags$input(id = 'pi0',type = "number",
value = .2,min = 0,max = 1,step = .1))
)
)
,
column(12,br(),
column(12,
HTML(markdownToHTML(text ='__Subset target mixture distribution__')))
,
column(6, 'incorrect targets',
wellPanel(
fluidRow(column(6, numericInput('H0_mean', 'mean', 2.75,step = .1)),
column(6, numericInput('H0_sd', 'sd', 0.13,step = .1)))))
,
column(6, 'correct targets',
wellPanel(
fluidRow(column(6, numericInput('H1_mean', 'mean', 3.31,step = .1)),
column(6, numericInput('H1_sd', 'sd', 0.28,step = .1))))))
,
column(6,
HTML(markdownToHTML(text ='__Subset decoys distribution__')),
checkboxInput('check_subset_decoys_same','Same as incorrect target PSMs?',value = TRUE)
,
conditionalPanel(
condition = "input.check_subset_decoys_same == false",
wellPanel(
fluidRow(column(6, numericInput('decoy_mean', 'mean', 0,step = .1)),
column(6, numericInput('decoy_sd', 'sd', 0,step = .1))))
)
,
HTML(markdownToHTML(text ='__Extra decoys distribution__')),
checkboxInput('check_extra_decoys_same','Same as subset decoy PSMs?',value = TRUE)
,
conditionalPanel(condition = "input.check_extra_decoys_same == false",
wellPanel(
fluidRow(column(6, numericInput('decoy_extra_mean', 'mean', 0,step = .1)),
column(6, numericInput('decoy_extra_sd', 'sd', 0,step = .1))))
),
actionButton("action_simulate", label = "SIMULATE",style = 'font-size:150%'))
,
column(6,plotOutput('plot_theo'))
,
fluidRow(column(12,plotOutput('plot_sim_diag')))
)
))
))
#' Launches the GUI version of saas.
#'
#' To easily launch the GUI outside an R session (eg. on a server),
#' you can run R -e "library(saas);saas_gui()" from the terminal (on linux/mac).
#'
#'
#' @param options See help of shiny::shinyApp for more details on available options
#' @return
#' @export
#' @import shiny
#' @import markdown
#' @examples 1
saas_gui = function(options = list(port = 3320, host = "0.0.0.0"))
shinyApp(ui = ui(), server = server,options = options)
|
library(ggplot2)
file_names<-list.files(path = "clean_data/")
sds_data<-file_names[grep("sds", file_names)]
p1_mean=c()
p2_mean=c()
for (ind in 1:length(sds_data)) {
sds<-read.csv(paste("clean_data/", sds_data[ind], sep = ""))
sub_area<-sds[sds$lat <= 42.720 & sds$lat >= 42.694 & sds$lon <= 23.304 & sds$lon >= 23.244, ]
p1.mean<-mean(sub_area$P1)
p2.mean<-mean(sub_area$P2)
p1_mean<-c(p1_mean,p1.mean)
p2_mean<-c(p2_mean,p2.mean)
}
time<-gsub("_.*_.*","",sds_data)
name <-factor(time)
t = data.frame(name,as.numeric(p1_mean),as.numeric(p2_mean))
ggplot()+ geom_line(data=t,aes(x=time, y=p1_mean,group=1,colour="PM2.5"))+
geom_line(data=t,aes(x=time, y=p2_mean,group=1,colour="PM10"))+
xlab("Time")+ylab("AQI")+
scale_colour_manual("",values = c("PM2.5"="red","PM10"="blue"))+
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = .5),plot.title=element_text(hjust=0.5))+
labs(title = "Pollution")
bme_data<-file_names[grep("bme", file_names)]
pre_mean=c()
tem_mean=c()
hum_mean=c()
for (ind in 1:length(bme_data)) {
bme<-read.csv(paste("clean_data/", bme_data[ind], sep = ""))
sub_area<-bme[bme$lat <= 42.720 & bme$lat >= 42.694 & bme$lon <= 23.304 & bme$lon >= 23.244, ]
pre.mean<-mean(sub_area$pressure)
tem.mean<-mean(sub_area$temperature)
hum.mean<-mean(sub_area$humidity)
pre_mean<-c(pre_mean,pre.mean)
tem_mean<-c(tem_mean,tem.mean)
hum_mean<-c(hum_mean,hum.mean)
}
time<-gsub("_.*_.*","",bme_data)
name <-factor(time)
t = data.frame(name,as.numeric(pre_mean))
ggplot()+ geom_line(data=t,aes(x=time, y=pre_mean,group=1))+
xlab("Time")+ylab("Pressure")+
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = .5),plot.title=element_text(hjust=0.5))+
labs(title = "Pressure")
t = data.frame(name,as.numeric(tem_mean))
ggplot()+ geom_line(data=t,aes(x=time, y=tem_mean,group=1))+
xlab("Time")+ylab("Temperature")+
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = .5),plot.title=element_text(hjust=0.5))+
labs(title = "Temperature")
t = data.frame(name,as.numeric(hum_mean))
ggplot()+ geom_line(data=t,aes(x=time, y=hum_mean,group=1))+
xlab("Time")+ylab("Humidity")+
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = .5),plot.title=element_text(hjust=0.5))+
labs(title = "Humidity")
mydata <- data.frame(
Group = c(rep("TRUE",length(t)), rep("FALSE",length(f))),
Frequency = c(t, f)
)
ggboxplot(mydata, x="Group", y = "Frequency", color = "Group", palette = "jco",add = "jitter",short.panel.labs = FALSE)+
stat_compare_means(method = "t.test",label.y=100)
| /code/data_shown.R | no_license | y220/Stat_605_Group_Project | R | false | false | 2,580 | r | library(ggplot2)
file_names<-list.files(path = "clean_data/")
sds_data<-file_names[grep("sds", file_names)]
p1_mean=c()
p2_mean=c()
for (ind in 1:length(sds_data)) {
sds<-read.csv(paste("clean_data/", sds_data[ind], sep = ""))
sub_area<-sds[sds$lat <= 42.720 & sds$lat >= 42.694 & sds$lon <= 23.304 & sds$lon >= 23.244, ]
p1.mean<-mean(sub_area$P1)
p2.mean<-mean(sub_area$P2)
p1_mean<-c(p1_mean,p1.mean)
p2_mean<-c(p2_mean,p2.mean)
}
time<-gsub("_.*_.*","",sds_data)
name <-factor(time)
t = data.frame(name,as.numeric(p1_mean),as.numeric(p2_mean))
ggplot()+ geom_line(data=t,aes(x=time, y=p1_mean,group=1,colour="PM2.5"))+
geom_line(data=t,aes(x=time, y=p2_mean,group=1,colour="PM10"))+
xlab("Time")+ylab("AQI")+
scale_colour_manual("",values = c("PM2.5"="red","PM10"="blue"))+
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = .5),plot.title=element_text(hjust=0.5))+
labs(title = "Pollution")
bme_data<-file_names[grep("bme", file_names)]
pre_mean=c()
tem_mean=c()
hum_mean=c()
for (ind in 1:length(bme_data)) {
bme<-read.csv(paste("clean_data/", bme_data[ind], sep = ""))
sub_area<-bme[bme$lat <= 42.720 & bme$lat >= 42.694 & bme$lon <= 23.304 & bme$lon >= 23.244, ]
pre.mean<-mean(sub_area$pressure)
tem.mean<-mean(sub_area$temperature)
hum.mean<-mean(sub_area$humidity)
pre_mean<-c(pre_mean,pre.mean)
tem_mean<-c(tem_mean,tem.mean)
hum_mean<-c(hum_mean,hum.mean)
}
time<-gsub("_.*_.*","",bme_data)
name <-factor(time)
t = data.frame(name,as.numeric(pre_mean))
ggplot()+ geom_line(data=t,aes(x=time, y=pre_mean,group=1))+
xlab("Time")+ylab("Pressure")+
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = .5),plot.title=element_text(hjust=0.5))+
labs(title = "Pressure")
t = data.frame(name,as.numeric(tem_mean))
ggplot()+ geom_line(data=t,aes(x=time, y=tem_mean,group=1))+
xlab("Time")+ylab("Temperature")+
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = .5),plot.title=element_text(hjust=0.5))+
labs(title = "Temperature")
t = data.frame(name,as.numeric(hum_mean))
ggplot()+ geom_line(data=t,aes(x=time, y=hum_mean,group=1))+
xlab("Time")+ylab("Humidity")+
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = .5),plot.title=element_text(hjust=0.5))+
labs(title = "Humidity")
mydata <- data.frame(
Group = c(rep("TRUE",length(t)), rep("FALSE",length(f))),
Frequency = c(t, f)
)
ggboxplot(mydata, x="Group", y = "Frequency", color = "Group", palette = "jco",add = "jitter",short.panel.labs = FALSE)+
stat_compare_means(method = "t.test",label.y=100)
|
%% File Name: mi_dstat.Rd
%% File Version: 0.08
\name{mi_dstat}
\alias{mi_dstat}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Cohen's d Effect Size for Missingness Indicators
}
\description{
Computes Cohen's d effect size indicating whether missingness
on a variable is related to other variables (covariates).
}
\usage{
mi_dstat(dat)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dat}{
Data frame
}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
\value{
A matrix. Missingness indicators refer to rows and covariates
to columns.
}
%\references{
%% ~put references to the literature/web site here ~
%}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
\examples{
#############################################################################
# EXAMPLE 1: d effect size for missingness indicators data.ma01
#############################################################################
data(data.ma01)
dat <- data.ma01
# compute d effect sizes
md <- miceadds::mi_dstat(dat)
round( md, 3 )
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/mi_dstat.Rd | no_license | cran/miceadds | R | false | false | 1,423 | rd | %% File Name: mi_dstat.Rd
%% File Version: 0.08
\name{mi_dstat}
\alias{mi_dstat}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Cohen's d Effect Size for Missingness Indicators
}
\description{
Computes Cohen's d effect size indicating whether missingness
on a variable is related to other variables (covariates).
}
\usage{
mi_dstat(dat)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dat}{
Data frame
}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
\value{
A matrix. Missingness indicators refer to rows and covariates
to columns.
}
%\references{
%% ~put references to the literature/web site here ~
%}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
\examples{
#############################################################################
# EXAMPLE 1: d effect size for missingness indicators data.ma01
#############################################################################
data(data.ma01)
dat <- data.ma01
# compute d effect sizes
md <- miceadds::mi_dstat(dat)
round( md, 3 )
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
x <- 1
while (x <= 5) {
print(x)
x <- x + 1
} | /ProgramWhileLoop.R | no_license | pvanderhoeven/Rtutorial | R | false | false | 49 | r | x <- 1
while (x <= 5) {
print(x)
x <- x + 1
} |
# For each point along a time series apply random perturbations, evolve each perturbed point
# according to model equations and save results
# cleaning wd, loading functions and packages ------------------------------
rm(list = ls(all = TRUE))
source("code/functions/lotka_volterra.R")
source("code/functions/predator_prey.R")
source("code/functions/food_chain.R")
source("code/functions/consumer_resource.R")
source("code/functions/lotka_volterra_stochastic.R")
source("code/functions/predator_prey_stochastic.R")
source("code/functions/food_chain_stochastic.R")
source("code/functions/consumer_resource_stochastic.R")
if(!require(deSolve)) {install.packages("deSolve"); library(deSolve)}
if(!require(rootSolve)) {install.packages("rootSolve"); library(rootSolve)}
if(!require(plyr)) {install.packages("plyr"); library(plyr)}
if(!require(tidyr)) {install.packages("tidyr"); library(tidyr)}
if(!require(ggplot2)) {install.packages("ggplot2"); library(ggplot2)}
if(!require(plotly)) {install.packages("plotly"); library(plotly)}
if(!require(viridis)) {install.packages("viridis"); library(viridis)}
if(!require(corrplot)) {install.packages("corrplot"); library(corrplot)}
if(!require(latex2exp)) {install.packages("latex2exp"); library(latex2exp)}
if(!require(RColorBrewer)) {install.packages("RColorBrewer"); library(RColorBrewer)}
if(!require(ppcor)) {install.packages("ppcor"); library(ppcor)}
# load synthetic time series ------------------------------
# number of species (2, 3, 4 or 5)
n_sp <- 2
# model to use (predator_prey, food_chain, lotka_volterra or consumer_resource)
func_name <- "predator_prey"
# final time series length
ts_length <- 500
# sampling frequency
sampling_freq <- 20
# load result files
load(file = paste("data/synthetic_time_series/time_series_", func_name, "_", n_sp, "_sp_",
ts_length, "_points_", sampling_freq, "_sampling_freq_0_noise",
".RData", sep = ""))
# load model settings
source("code/scripts/model_settings.R")
# if time series is stochastic, use deterministic model to evolve perturbed abundances
if (grepl("stochastic", func_name)) {
stochastic_name <- func_name
func_name <- substr(func_name, 1 , nchar(func_name)-11)
source("code/scripts/model_settings.R")
func_name <- stochastic_name
}
# perform perturbation simulations ------------------------------
# distribution of perturbed points (uniform, gaussian, or gaussian_proportional)
pert_dist <- "gaussian"
# perturbation magnitude (percentage of average species standard deviation)
pert_magnitude <- 0.15
scaled_pert_magnitude <- pert_magnitude * mean(apply(ts[ , -1], 2, sd))
# number of perturbations
n_pert <- 300
# time steps to evolve perturbed points (1 or 3)
k <- 1
# whether number of time points to evolve perturbations is fixed or variable
k_type <- "variable"
# time sequence to evolve perturbed points
times <- seq(0, k, by = time_step)
# data frames to store results
df_unpert <- data.frame()
df_full <- data.frame()
k_vec <- c()
# loop over all points in time series
for (j in 1:(nrow(ts)-1)) {
print(j)
state <- as.numeric(ts[j, -1])
names(state) <- paste("x", 1:n_sp, sep = "")
df_initial <- data.frame()
df_final <- data.frame()
# k inversely proportional to mean abundance rate of change
if (k_type == "variable") {
mean_rate_change <- mean(abs((as.numeric(ts[j+1, -1]) - as.numeric(ts[j, -1])) / as.numeric(ts[j, -1])))
k_vec[j] <- round(k / sqrt(mean_rate_change), digits = 1)
times_variable <- seq(0, k_vec[j], by = time_step)
if (k_vec[j] == 0) {
k_vec[j] <- 2 * time_step
times_variable <- seq(0, k_vec[j], by = time_step)
}
times_pert <- times_variable
}
# k remains the same for all points in time
if (k_type == "fixed") {
times_pert <- times
}
# apply n_pert perturbations (cloud of perturbations)
for (i in 1:n_pert) {
# sampling abundance perturbation
if (pert_dist == "gaussian") {
pert <- rnorm(n_sp, mean = 0, sd = scaled_pert_magnitude)
}
if (pert_dist == "gaussian_proportional") {
pert <- rnorm(n_sp, mean = 0, sd = sqrt(state / sum(state)) * scaled_pert_magnitude)
}
if (pert_dist == "uniform") {
x <- rnorm(n_sp, 0, 1)
norm_x <- sqrt(sum(x^2))
x_scaled <- (x / norm_x) * scaled_pert_magnitude
u <- runif(1, 0, 1)^(1/n_sp)
pert <- x_scaled * u
}
# applying perturbation
state_initial <- state + pert
# set perturbed abundance to zero, if negative
state_initial[state_initial < 0] <- 0
# add initial perturbed abundances to data frame
df_initial <- rbind(df_initial, state_initial)
# perform numerical integration
ts_pert <- as.data.frame(ode(y = state_initial, times = times_pert, func = func, parms = parms, method = "ode45"))
# add final perturbed abundances to data frame
state_final <- as.numeric(ts_pert[nrow(ts_pert), -1])
df_final <- rbind(df_final, state_final)
}
# obtain unperturbed final state
ts_unpert <- as.data.frame(ode(y = state, times = times_pert, func = func, parms = parms, method = "ode45"))
state_unpert <- as.numeric(ts_unpert[nrow(ts_unpert), -1])
df_unpert <- rbind(df_unpert, state_unpert)
# merge data frames
names(df_initial) <- paste("x", 1:n_sp, sep = "")
names(df_final) <- paste("x", 1:n_sp, sep = "")
df_pert <- rbind(df_initial, df_final)
df_pert$perturbation <- c(1:nrow(df_initial), 1:nrow(df_final))
df_pert$type <- c(rep("initial", n_pert), rep("final", n_pert))
df_pert$time <- j
df_full <- rbind(df_full, df_pert)
}
# modifying data frames
ts <- ts[-nrow(ts), ]
df_unpert <- cbind(ts$time, df_unpert)
names(df_unpert) <- c("time", paste("x", 1:n_sp, sep = ""))
# saving objects as RData ------------------------------
if (k_type == "variable") {
df_k <- data.frame(time = ts$time, k = k_vec)
save(df_k, file = paste("results/perturbation_analyses/time_step_k_", func_name, "_", n_sp, "_sp_",
k_type, "_time_step_k_", k, ".RData", sep = ""))
}
save(df_unpert, file = paste("results/perturbation_analyses/unperturbed_final_points_", func_name, "_", n_sp, "_sp_",
pert_dist, "_pert_", pert_magnitude, "_pert_magn_",
k_type, "_time_step_k_", k, ".RData", sep = ""))
save(df_full, file = paste("results/perturbation_analyses/perturbed_initial_final_points_", func_name, "_", n_sp, "_sp_",
pert_dist, "_pert_", pert_magnitude, "_pert_magn_",
k_type, "_time_step_k_", k, ".RData", sep = ""))
| /code/scripts/perturbation_analyses_synthetic_time_series.R | permissive | lucaspdmedeiros/ranking-species-sensitivity | R | false | false | 6,563 | r | # For each point along a time series apply random perturbations, evolve each perturbed point
# according to model equations and save results
# cleaning wd, loading functions and packages ------------------------------
rm(list = ls(all = TRUE))
source("code/functions/lotka_volterra.R")
source("code/functions/predator_prey.R")
source("code/functions/food_chain.R")
source("code/functions/consumer_resource.R")
source("code/functions/lotka_volterra_stochastic.R")
source("code/functions/predator_prey_stochastic.R")
source("code/functions/food_chain_stochastic.R")
source("code/functions/consumer_resource_stochastic.R")
if(!require(deSolve)) {install.packages("deSolve"); library(deSolve)}
if(!require(rootSolve)) {install.packages("rootSolve"); library(rootSolve)}
if(!require(plyr)) {install.packages("plyr"); library(plyr)}
if(!require(tidyr)) {install.packages("tidyr"); library(tidyr)}
if(!require(ggplot2)) {install.packages("ggplot2"); library(ggplot2)}
if(!require(plotly)) {install.packages("plotly"); library(plotly)}
if(!require(viridis)) {install.packages("viridis"); library(viridis)}
if(!require(corrplot)) {install.packages("corrplot"); library(corrplot)}
if(!require(latex2exp)) {install.packages("latex2exp"); library(latex2exp)}
if(!require(RColorBrewer)) {install.packages("RColorBrewer"); library(RColorBrewer)}
if(!require(ppcor)) {install.packages("ppcor"); library(ppcor)}
# load synthetic time series ------------------------------
# number of species (2, 3, 4 or 5)
n_sp <- 2
# model to use (predator_prey, food_chain, lotka_volterra or consumer_resource)
func_name <- "predator_prey"
# final time series length
ts_length <- 500
# sampling frequency
sampling_freq <- 20
# load result files
load(file = paste("data/synthetic_time_series/time_series_", func_name, "_", n_sp, "_sp_",
ts_length, "_points_", sampling_freq, "_sampling_freq_0_noise",
".RData", sep = ""))
# load model settings
source("code/scripts/model_settings.R")
# if time series is stochastic, use deterministic model to evolve perturbed abundances
if (grepl("stochastic", func_name)) {
stochastic_name <- func_name
func_name <- substr(func_name, 1 , nchar(func_name)-11)
source("code/scripts/model_settings.R")
func_name <- stochastic_name
}
# perform perturbation simulations ------------------------------
# distribution of perturbed points (uniform, gaussian, or gaussian_proportional)
pert_dist <- "gaussian"
# perturbation magnitude (percentage of average species standard deviation)
pert_magnitude <- 0.15
scaled_pert_magnitude <- pert_magnitude * mean(apply(ts[ , -1], 2, sd))
# number of perturbations
n_pert <- 300
# time steps to evolve perturbed points (1 or 3)
k <- 1
# whether number of time points to evolve perturbations is fixed or variable
k_type <- "variable"
# time sequence to evolve perturbed points
times <- seq(0, k, by = time_step)
# data frames to store results
df_unpert <- data.frame()
df_full <- data.frame()
k_vec <- c()
# loop over all points in time series
for (j in 1:(nrow(ts)-1)) {
print(j)
state <- as.numeric(ts[j, -1])
names(state) <- paste("x", 1:n_sp, sep = "")
df_initial <- data.frame()
df_final <- data.frame()
# k inversely proportional to mean abundance rate of change
if (k_type == "variable") {
mean_rate_change <- mean(abs((as.numeric(ts[j+1, -1]) - as.numeric(ts[j, -1])) / as.numeric(ts[j, -1])))
k_vec[j] <- round(k / sqrt(mean_rate_change), digits = 1)
times_variable <- seq(0, k_vec[j], by = time_step)
if (k_vec[j] == 0) {
k_vec[j] <- 2 * time_step
times_variable <- seq(0, k_vec[j], by = time_step)
}
times_pert <- times_variable
}
# k remains the same for all points in time
if (k_type == "fixed") {
times_pert <- times
}
# apply n_pert perturbations (cloud of perturbations)
for (i in 1:n_pert) {
# sampling abundance perturbation
if (pert_dist == "gaussian") {
pert <- rnorm(n_sp, mean = 0, sd = scaled_pert_magnitude)
}
if (pert_dist == "gaussian_proportional") {
pert <- rnorm(n_sp, mean = 0, sd = sqrt(state / sum(state)) * scaled_pert_magnitude)
}
if (pert_dist == "uniform") {
x <- rnorm(n_sp, 0, 1)
norm_x <- sqrt(sum(x^2))
x_scaled <- (x / norm_x) * scaled_pert_magnitude
u <- runif(1, 0, 1)^(1/n_sp)
pert <- x_scaled * u
}
# applying perturbation
state_initial <- state + pert
# set perturbed abundance to zero, if negative
state_initial[state_initial < 0] <- 0
# add initial perturbed abundances to data frame
df_initial <- rbind(df_initial, state_initial)
# perform numerical integration
ts_pert <- as.data.frame(ode(y = state_initial, times = times_pert, func = func, parms = parms, method = "ode45"))
# add final perturbed abundances to data frame
state_final <- as.numeric(ts_pert[nrow(ts_pert), -1])
df_final <- rbind(df_final, state_final)
}
# obtain unperturbed final state
ts_unpert <- as.data.frame(ode(y = state, times = times_pert, func = func, parms = parms, method = "ode45"))
state_unpert <- as.numeric(ts_unpert[nrow(ts_unpert), -1])
df_unpert <- rbind(df_unpert, state_unpert)
# merge data frames
names(df_initial) <- paste("x", 1:n_sp, sep = "")
names(df_final) <- paste("x", 1:n_sp, sep = "")
df_pert <- rbind(df_initial, df_final)
df_pert$perturbation <- c(1:nrow(df_initial), 1:nrow(df_final))
df_pert$type <- c(rep("initial", n_pert), rep("final", n_pert))
df_pert$time <- j
df_full <- rbind(df_full, df_pert)
}
# modifying data frames
ts <- ts[-nrow(ts), ]
df_unpert <- cbind(ts$time, df_unpert)
names(df_unpert) <- c("time", paste("x", 1:n_sp, sep = ""))
# saving objects as RData ------------------------------
if (k_type == "variable") {
df_k <- data.frame(time = ts$time, k = k_vec)
save(df_k, file = paste("results/perturbation_analyses/time_step_k_", func_name, "_", n_sp, "_sp_",
k_type, "_time_step_k_", k, ".RData", sep = ""))
}
save(df_unpert, file = paste("results/perturbation_analyses/unperturbed_final_points_", func_name, "_", n_sp, "_sp_",
pert_dist, "_pert_", pert_magnitude, "_pert_magn_",
k_type, "_time_step_k_", k, ".RData", sep = ""))
save(df_full, file = paste("results/perturbation_analyses/perturbed_initial_final_points_", func_name, "_", n_sp, "_sp_",
pert_dist, "_pert_", pert_magnitude, "_pert_magn_",
k_type, "_time_step_k_", k, ".RData", sep = ""))
|
#' @title runCapR
#'
#' @description Runs CapR
#'
#' @param in_file An .fa file like from ExtractTranscriptomeSequence that is a
#' list of fasta sequences to be folded. Required
#' @param out_file Name of the CapR output file of nucleotide folding
#' probabilities. Default is in_file prefix.out
#' @param max_dist Integer of maximum distance between folded nucleotides in
#' sequences. Recommeded between 50 and 100, with higher values yielding
#' potentially more accurate results but dramatically increasing run time.
#' Default 100.
#'
#' @return generates CapR outfile
#'
#' @examples
#'
#' ## make dummy file
#' write_fasta(paste0(sample(c("A", "T", "G", "C"), 50, replace = TRUE),
#' collapse = ""),
#' "test",
#' "test.fa")
#' ## run CapR
#' runCapR("test.fa")
#'
#' @export
runCapR <- function(in_file,
out_file = NA,
max_dist = 100) {
if (is.na(out_file)) {
out_file <- paste0(substr(in_file, 0, nchar(in_file) - 2), "out")
}
if(.is_CapR_installed()){
.CapR_run(in_file, out_file, max_dist)
}else{return("Please install CapR and place in working PATH")}
}
| /R/runCapR.R | no_license | vbusa1/nearBynding | R | false | false | 1,180 | r | #' @title runCapR
#'
#' @description Runs CapR
#'
#' @param in_file An .fa file like from ExtractTranscriptomeSequence that is a
#' list of fasta sequences to be folded. Required
#' @param out_file Name of the CapR output file of nucleotide folding
#' probabilities. Default is in_file prefix.out
#' @param max_dist Integer of maximum distance between folded nucleotides in
#' sequences. Recommeded between 50 and 100, with higher values yielding
#' potentially more accurate results but dramatically increasing run time.
#' Default 100.
#'
#' @return generates CapR outfile
#'
#' @examples
#'
#' ## make dummy file
#' write_fasta(paste0(sample(c("A", "T", "G", "C"), 50, replace = TRUE),
#' collapse = ""),
#' "test",
#' "test.fa")
#' ## run CapR
#' runCapR("test.fa")
#'
#' @export
runCapR <- function(in_file,
out_file = NA,
max_dist = 100) {
if (is.na(out_file)) {
out_file <- paste0(substr(in_file, 0, nchar(in_file) - 2), "out")
}
if(.is_CapR_installed()){
.CapR_run(in_file, out_file, max_dist)
}else{return("Please install CapR and place in working PATH")}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/samples.R
\name{tidy_sample_avail}
\alias{tidy_sample_avail}
\title{Tidy PBS samples data for \code{\link[=plot_sample_avail]{plot_sample_avail()}}}
\usage{
tidy_sample_avail(dat, year_range = NULL, ageing_method_codes = NULL)
}
\arguments{
\item{dat}{Input data frame from \code{\link[=get_survey_samples]{get_survey_samples()}}.}
\item{year_range}{Either \code{NULL}, in which case all years are returned,
or a numeric vector of length two giving the lower and upper years to
include.}
\item{ageing_method_codes}{A numeric vector of ageing method codes to filter
on. Default to \code{NULL}, which brings in all valid ageing codes.
See \code{\link[=get_age_methods]{get_age_methods()}}.}
}
\description{
Tidy PBS samples data for \code{\link[=plot_sample_avail]{plot_sample_avail()}}
}
\examples{
\dontrun{
d <- get_survey_samples("lingcod")
tidy_sample_avail(d)
d <- get_commercial_samples("lingcod")
tidy_sample_avail(d)
}
}
| /man/tidy_sample_avail.Rd | no_license | chuanboguo/gfplot | R | false | true | 1,009 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/samples.R
\name{tidy_sample_avail}
\alias{tidy_sample_avail}
\title{Tidy PBS samples data for \code{\link[=plot_sample_avail]{plot_sample_avail()}}}
\usage{
tidy_sample_avail(dat, year_range = NULL, ageing_method_codes = NULL)
}
\arguments{
\item{dat}{Input data frame from \code{\link[=get_survey_samples]{get_survey_samples()}}.}
\item{year_range}{Either \code{NULL}, in which case all years are returned,
or a numeric vector of length two giving the lower and upper years to
include.}
\item{ageing_method_codes}{A numeric vector of ageing method codes to filter
on. Default to \code{NULL}, which brings in all valid ageing codes.
See \code{\link[=get_age_methods]{get_age_methods()}}.}
}
\description{
Tidy PBS samples data for \code{\link[=plot_sample_avail]{plot_sample_avail()}}
}
\examples{
\dontrun{
d <- get_survey_samples("lingcod")
tidy_sample_avail(d)
d <- get_commercial_samples("lingcod")
tidy_sample_avail(d)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/resilience_functions.R
\docType{data}
\name{oxygen}
\alias{oxygen}
\title{Simulated Dissolved Oxygen in River Spree}
\format{A data frame with 23425 rows and 5 variables}
\usage{
data(oxygen)
}
\description{
Data series "oxygen" are included in kwb.resilience as test data
for the use of the package. The included data is referred to in
the supplied package tutorial (see vignettes), as well as in the supporting
\href{https://www.researchgate.net/publication/326040304_Quantitative_Beschreibung_der_Resilienz_urbaner_Wassersysteme}{research paper}\cr
The data are simulated concentrations of dissolved oxygen (DO) in mg/l for different
management scenarios. Simulations and assumptions are described in detail in \href{https://www.researchgate.net/publication/306025705_Impacts_of_combined_sewer_overflows_on_a_large_urban_river_-_Understanding_the_effect_of_different_management_strategies}{Riechel et al. 2016}.\cr
}
\details{
Columns in data.frame:\cr
\itemize{
\item timestamp (POSIXct).
\item S2_storage_2020 (numeric).
\item S3_storage_increase (numeric).
\item S4_red_Imp_Surface (numeric).
\item S5_increase_in_DO (numeric).
}
}
\keyword{datasets}
| /man/oxygen.Rd | permissive | KWB-R/kwb.resilience | R | false | true | 1,247 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/resilience_functions.R
\docType{data}
\name{oxygen}
\alias{oxygen}
\title{Simulated Dissolved Oxygen in River Spree}
\format{A data frame with 23425 rows and 5 variables}
\usage{
data(oxygen)
}
\description{
Data series "oxygen" are included in kwb.resilience as test data
for the use of the package. The included data is referred to in
the supplied package tutorial (see vignettes), as well as in the supporting
\href{https://www.researchgate.net/publication/326040304_Quantitative_Beschreibung_der_Resilienz_urbaner_Wassersysteme}{research paper}\cr
The data are simulated concentrations of dissolved oxygen (DO) in mg/l for different
management scenarios. Simulations and assumptions are described in detail in \href{https://www.researchgate.net/publication/306025705_Impacts_of_combined_sewer_overflows_on_a_large_urban_river_-_Understanding_the_effect_of_different_management_strategies}{Riechel et al. 2016}.\cr
}
\details{
Columns in data.frame:\cr
\itemize{
\item timestamp (POSIXct).
\item S2_storage_2020 (numeric).
\item S3_storage_increase (numeric).
\item S4_red_Imp_Surface (numeric).
\item S5_increase_in_DO (numeric).
}
}
\keyword{datasets}
|
######################################################################
## Demonstrate APE with EpiEstim package
######################################################################
# Assumptions and modifications
# - compute EpiEstim for influenza and SARS data
# - get APE-regularised estimates for comparison
# Clean the workspace and console
closeAllConnections(); rm(list=ls())
cat("\014"); graphics.off()
# Main packages
library("EpiEstim")
library("caTools")
# Key functions
source('apeEstim.R')
source('apePredPost.R')
source('apeSpecific.R')
source('plotAPEWindow.R')
# Set working directory to source
this.dir <- dirname(parent.frame(2)$ofile)
setwd(this.dir)
# Boolean for plotting
wantPlot = 0
# Load data on pandemic flu in Baltimore in 1918
data("Flu1918");
Iflu = Flu1918$incidence
genflu = Flu1918$si_distr
# Total infectiousness
Lflu = overall_infectivity(Iflu, genflu)
# Load data on SARS in Hong Kong in 2003
data("SARS2003")
Isars = SARS2003$incidence
gensars = SARS2003$si_distr
# Total infectiousness
Lsars = overall_infectivity(Isars, gensars)
######################################################################
## Conventional EpiEstim estimates
######################################################################
# Start and end times for weekly window in flu
t_start = seq(2, length(Iflu)-6)
t_end = t_start + 6
# Estimates on a 7 day window - considered best in Cori 2013 for this data
estflu = estimate_R(Iflu, method ="non_parametric_si", config = make_config(list(
si_distr = genflu, t_start = t_start, t_end = t_end)))
# Extract outputs
tflu = estflu$R$t_end # end of window
Rflu = estflu$R$`Mean(R)`
RfluCI = matrix(NA, nrow = 2, ncol = length(Rflu))
RfluCI[1,] = estflu$R$`Quantile.0.025(R)`
RfluCI[2,] = estflu$R$`Quantile.0.975(R)`
# Start and end times for weekly window in SARS
t_start = seq(14, length(Isars)-6)
t_end = t_start + 6
# Estimates on a 7 day window - considered best in Cori 2013 for this data
estsars = estimate_R(Isars, method ="non_parametric_si", config = make_config(list(
si_distr = gensars, t_start = t_start, t_end = t_end)))
# Extract outputs
tsars = estsars$R$t_end # end of window
Rsars = estsars$R$`Mean(R)`
RsarsCI = matrix(NA, nrow = 2, ncol = length(Rsars))
RsarsCI[1,] = estsars$R$`Quantile.0.025(R)`
RsarsCI[2,] = estsars$R$`Quantile.0.975(R)`
# Estimate R at each day over 7 day window finishing on that day
if(wantPlot){
quartz()
plot(estflu)
dev.copy2eps(file="fluCori.eps")
quartz()
plot(estsars)
dev.copy2eps(file="sarsCori.eps")
}
######################################################################
## APE and PMSE solution
######################################################################
# Outputs take list form [[kbest1, modBest1, kbest2, modBest2]]
# Each modBest is also a list of form [[ape, pmse, prob, Rhat, Rhatci, Inexhat, Inexci, alpha, beta, pr]]
# Priors and settings
Rprior = c(1, 5); a = 0.025
# Clean Lam vectors of NAs
Lflu[is.na(Lflu)] = 0; Lsars[is.na(Lsars)] = 0 # <------ important
# Flu results
Rmodflu = apeEstim(Iflu, genflu, Lflu, Rprior, a, tflu[1], 'flu')
# Best estimates and prediction
plotAPEWindow(Rmodflu[[2]], 'flu', Rmodflu[[1]], Iplt = Iflu[seq(tflu[1]+1,length(Iflu))])
# Specific 7-day window
Rmodflu7 = apeSpecific(Iflu, genflu, Lflu, Rprior, a, tflu[1], 'flu7', 7)
plotAPEWindow(Rmodflu7[[2]], 'flu7', Rmodflu7[[1]], Iplt = Iflu[seq(tflu[1]+1,length(Iflu))])
# Sars results
Rmodsars = apeEstim(Isars, gensars, Lsars, Rprior, a, tsars[1], 'sars')
# Best estimates and prediction
plotAPEWindow(Rmodsars[[2]], 'sars', Rmodsars[[1]], Iplt = Isars[seq(tsars[1]+1,length(Isars))])
# Specific 7-day window
Rmodsars7 = apeSpecific(Isars, gensars, Lsars, Rprior, a, tsars[1], 'sars7', 7)
plotAPEWindow(Rmodsars7[[2]], 'sars7', Rmodsars7[[1]], Iplt = Isars[seq(tsars[1]+1,length(Isars))])
# Compare against EpiEstim outputs for weekly windows as sanity check
Rsars2 = Rmodsars7[[2]][[4]]; Rflu2 = Rmodflu7[[2]][[4]]
Rsars2CI = Rmodsars7[[2]][[5]]; Rflu2CI = Rmodflu7[[2]][[5]]
# Compare APE at weekly windows and EpiEstim
quartz(); par(mfrow=c(2,1))
# Time ranges to plot
ran1 = seq(1, length(tflu)-1); ran2 = seq(1, length(tsars)-1)
# Reprod. num estimates and confidence interval for flu
plot(ran1, Rflu[ran1], type = 'l', bty = 'l', lwd = 2, col='red',
xlab = paste0("time (k = ", 7, ")"), ylab = 'Rhat flu')
lines(ran1, RfluCI[1, ran1], col = 'red', type = "l", lwd = 1)
lines(ran1, RfluCI[2, ran1], col = 'red', type = "l", lwd = 1)
points(ran1, Rflu2, lwd = 2, col = 'lightgrey')
points(ran1, Rflu2CI[1,], lwd = 2, col = 'lightgrey')
points(ran1, Rflu2CI[2,], lwd = 2, col = 'lightgrey')
# Reprod. num estimates and confidence interval for sars
plot(ran2, Rsars[ran2], type = 'l', bty = 'l', lwd = 2, col='red',
xlab = paste0("time (k = ", 7, ")"), ylab = 'Rhat sars')
lines(ran2, RsarsCI[1, ran2], col = 'red', type = "l", lwd = 1)
lines(ran2, RsarsCI[2, ran2], col = 'red', type = "l", lwd = 1)
points(ran2, Rsars2, lwd = 2, col = 'lightgrey')
points(ran2, Rsars2CI[1,], lwd = 2, col = 'lightgrey')
points(ran2, Rsars2CI[2,], lwd = 2, col = 'lightgrey')
| /APE_R/exampleData.R | no_license | kpzoo/model-selection-for-epidemic-renewal-models | R | false | false | 5,129 | r | ######################################################################
## Demonstrate APE with EpiEstim package
######################################################################
# Assumptions and modifications
# - compute EpiEstim for influenza and SARS data
# - get APE-regularised estimates for comparison
# Clean the workspace and console
closeAllConnections(); rm(list=ls())
cat("\014"); graphics.off()
# Main packages
library("EpiEstim")
library("caTools")
# Key functions
source('apeEstim.R')
source('apePredPost.R')
source('apeSpecific.R')
source('plotAPEWindow.R')
# Set working directory to source
this.dir <- dirname(parent.frame(2)$ofile)
setwd(this.dir)
# Boolean for plotting
wantPlot = 0
# Load data on pandemic flu in Baltimore in 1918
data("Flu1918");
Iflu = Flu1918$incidence
genflu = Flu1918$si_distr
# Total infectiousness
Lflu = overall_infectivity(Iflu, genflu)
# Load data on SARS in Hong Kong in 2003
data("SARS2003")
Isars = SARS2003$incidence
gensars = SARS2003$si_distr
# Total infectiousness
Lsars = overall_infectivity(Isars, gensars)
######################################################################
## Conventional EpiEstim estimates
######################################################################
# Start and end times for weekly window in flu
t_start = seq(2, length(Iflu)-6)
t_end = t_start + 6
# Estimates on a 7 day window - considered best in Cori 2013 for this data
estflu = estimate_R(Iflu, method ="non_parametric_si", config = make_config(list(
si_distr = genflu, t_start = t_start, t_end = t_end)))
# Extract outputs
tflu = estflu$R$t_end # end of window
Rflu = estflu$R$`Mean(R)`
RfluCI = matrix(NA, nrow = 2, ncol = length(Rflu))
RfluCI[1,] = estflu$R$`Quantile.0.025(R)`
RfluCI[2,] = estflu$R$`Quantile.0.975(R)`
# Start and end times for weekly window in SARS
t_start = seq(14, length(Isars)-6)
t_end = t_start + 6
# Estimates on a 7 day window - considered best in Cori 2013 for this data
estsars = estimate_R(Isars, method ="non_parametric_si", config = make_config(list(
si_distr = gensars, t_start = t_start, t_end = t_end)))
# Extract outputs
tsars = estsars$R$t_end # end of window
Rsars = estsars$R$`Mean(R)`
RsarsCI = matrix(NA, nrow = 2, ncol = length(Rsars))
RsarsCI[1,] = estsars$R$`Quantile.0.025(R)`
RsarsCI[2,] = estsars$R$`Quantile.0.975(R)`
# Estimate R at each day over 7 day window finishing on that day
if(wantPlot){
quartz()
plot(estflu)
dev.copy2eps(file="fluCori.eps")
quartz()
plot(estsars)
dev.copy2eps(file="sarsCori.eps")
}
######################################################################
## APE and PMSE solution
######################################################################
# Outputs take list form [[kbest1, modBest1, kbest2, modBest2]]
# Each modBest is also a list of form [[ape, pmse, prob, Rhat, Rhatci, Inexhat, Inexci, alpha, beta, pr]]
# Priors and settings
Rprior = c(1, 5); a = 0.025
# Clean Lam vectors of NAs
Lflu[is.na(Lflu)] = 0; Lsars[is.na(Lsars)] = 0 # <------ important
# Flu results
Rmodflu = apeEstim(Iflu, genflu, Lflu, Rprior, a, tflu[1], 'flu')
# Best estimates and prediction
plotAPEWindow(Rmodflu[[2]], 'flu', Rmodflu[[1]], Iplt = Iflu[seq(tflu[1]+1,length(Iflu))])
# Specific 7-day window
Rmodflu7 = apeSpecific(Iflu, genflu, Lflu, Rprior, a, tflu[1], 'flu7', 7)
plotAPEWindow(Rmodflu7[[2]], 'flu7', Rmodflu7[[1]], Iplt = Iflu[seq(tflu[1]+1,length(Iflu))])
# Sars results
Rmodsars = apeEstim(Isars, gensars, Lsars, Rprior, a, tsars[1], 'sars')
# Best estimates and prediction
plotAPEWindow(Rmodsars[[2]], 'sars', Rmodsars[[1]], Iplt = Isars[seq(tsars[1]+1,length(Isars))])
# Specific 7-day window
Rmodsars7 = apeSpecific(Isars, gensars, Lsars, Rprior, a, tsars[1], 'sars7', 7)
plotAPEWindow(Rmodsars7[[2]], 'sars7', Rmodsars7[[1]], Iplt = Isars[seq(tsars[1]+1,length(Isars))])
# Compare against EpiEstim outputs for weekly windows as sanity check
Rsars2 = Rmodsars7[[2]][[4]]; Rflu2 = Rmodflu7[[2]][[4]]
Rsars2CI = Rmodsars7[[2]][[5]]; Rflu2CI = Rmodflu7[[2]][[5]]
# Compare APE at weekly windows and EpiEstim
quartz(); par(mfrow=c(2,1))
# Time ranges to plot
ran1 = seq(1, length(tflu)-1); ran2 = seq(1, length(tsars)-1)
# Reprod. num estimates and confidence interval for flu
plot(ran1, Rflu[ran1], type = 'l', bty = 'l', lwd = 2, col='red',
xlab = paste0("time (k = ", 7, ")"), ylab = 'Rhat flu')
lines(ran1, RfluCI[1, ran1], col = 'red', type = "l", lwd = 1)
lines(ran1, RfluCI[2, ran1], col = 'red', type = "l", lwd = 1)
points(ran1, Rflu2, lwd = 2, col = 'lightgrey')
points(ran1, Rflu2CI[1,], lwd = 2, col = 'lightgrey')
points(ran1, Rflu2CI[2,], lwd = 2, col = 'lightgrey')
# Reprod. num estimates and confidence interval for sars
plot(ran2, Rsars[ran2], type = 'l', bty = 'l', lwd = 2, col='red',
xlab = paste0("time (k = ", 7, ")"), ylab = 'Rhat sars')
lines(ran2, RsarsCI[1, ran2], col = 'red', type = "l", lwd = 1)
lines(ran2, RsarsCI[2, ran2], col = 'red', type = "l", lwd = 1)
points(ran2, Rsars2, lwd = 2, col = 'lightgrey')
points(ran2, Rsars2CI[1,], lwd = 2, col = 'lightgrey')
points(ran2, Rsars2CI[2,], lwd = 2, col = 'lightgrey')
|
#Data pull of Mass inshore survey
#-------------------------------------------------------------------------------
#User parameters
windows <- F
if(windows == T){
data.dir <- "L:\\EcoAP\\Data\\survey\\"
out.dir <- "L:\\EcoAP\\Data\\survey\\"
memory.limit(4000)
}
if(windows == F){
data.dir <- "slucey/EcoAP/Data/survey/"
out.dir <- "slucey/EcoAP/Data/survey/"
uid <- 'slucey'
cat('Oracle Password:')
pwd <- readLines(n=1) #If reading from source, need to manually add pwd here
}
#-------------------------------------------------------------------------------
#Required packages
library(RODBC); library(data.table)
#-------------------------------------------------------------------------------
#Created functions
#Convert output to text for RODBC query
sqltext<-function(x){
out<-x[1]
if(length(x) > 1){
for(i in 2:length(x)){
out<-paste(out, x[i], sep="','")
}
}
out<-paste("'", out, "'", sep='')
return(out)
}
#-------------------------------------------------------------------------------
#Begin script
if(windows == T){
channel <- odbcDriverConnect()
}else{
channel <- odbcConnect('sole', uid, pwd)
}
#Select cruises
cruise.qry <- "select unique year, cruise6, svvessel, season
from mstr_cruise
where purpose_code = 11
and year >= 1963
order by year, cruise6"
cruise <- as.data.table(sqlQuery(channel, cruise.qry))
#Use cruise codes to select other data
cruise6 <- sqltext(cruise$CRUISE6)
#Select station data
station.qry <- paste("select unique cruise6, station, stratum, shg, decdeg_beglat as lat,
decdeg_beglon as lon, begin_est_towdate as est_towdate, avgdepth as depth,
surftemp, surfsalin, bottemp, botsalin
from UNION_FSCS_SVSTA
where cruise6 in (", cruise6, ")
order by cruise6, station", sep='')
station <- as.data.table(sqlQuery(channel, station.qry))
#merge cruise and station
mass.survey <- merge(cruise, station, by = 'CRUISE6')
#Grab catch data
catch.qry <- paste("select cruise6, station, stratum, svspp, catchsex, expcatchnum as abundance,
expcatchwt as biomass
from UNION_FSCS_SVCAT
where cruise6 in (", cruise6, ")
and stratum not like 'YT%'
order by cruise6, station, svspp", sep='')
catch <- as.data.table(sqlQuery(channel, catch.qry))
setkey(catch, CRUISE6, STATION, STRATUM)
#merge with mass.survey
setkey(mass.survey, CRUISE6, STATION, STRATUM)
mass.survey <- merge(mass.survey, catch, by = key(mass.survey))
#Length data
length.qry <- paste("select cruise6, station, stratum, svspp, catchsex, length, expnumlen as numlen
from UNION_FSCS_SVLEN
where cruise6 in (", cruise6, ")
and stratum not like 'YT%'
order by cruise6, station, svspp, length", sep='')
len <- as.data.table(sqlQuery(channel, length.qry))
setkey(len, CRUISE6, STATION, STRATUM, SVSPP, CATCHSEX)
#merge with mass.survey
setkey(mass.survey, CRUISE6, STATION, STRATUM, SVSPP, CATCHSEX)
mass.survey <- merge(mass.survey, len, all.x = T, allow.cartesian = T)
odbcClose(channel)
save(mass.survey, file = paste(out.dir, "Mass_survey.RData", sep=''))
| /Mass_survey.R | no_license | slucey/RSurvey | R | false | false | 3,324 | r | #Data pull of Mass inshore survey
#-------------------------------------------------------------------------------
#User parameters
windows <- F
if(windows == T){
data.dir <- "L:\\EcoAP\\Data\\survey\\"
out.dir <- "L:\\EcoAP\\Data\\survey\\"
memory.limit(4000)
}
if(windows == F){
data.dir <- "slucey/EcoAP/Data/survey/"
out.dir <- "slucey/EcoAP/Data/survey/"
uid <- 'slucey'
cat('Oracle Password:')
pwd <- readLines(n=1) #If reading from source, need to manually add pwd here
}
#-------------------------------------------------------------------------------
#Required packages
library(RODBC); library(data.table)
#-------------------------------------------------------------------------------
#Created functions
#Convert output to text for RODBC query
sqltext<-function(x){
out<-x[1]
if(length(x) > 1){
for(i in 2:length(x)){
out<-paste(out, x[i], sep="','")
}
}
out<-paste("'", out, "'", sep='')
return(out)
}
#-------------------------------------------------------------------------------
#Begin script
if(windows == T){
channel <- odbcDriverConnect()
}else{
channel <- odbcConnect('sole', uid, pwd)
}
#Select cruises
cruise.qry <- "select unique year, cruise6, svvessel, season
from mstr_cruise
where purpose_code = 11
and year >= 1963
order by year, cruise6"
cruise <- as.data.table(sqlQuery(channel, cruise.qry))
#Use cruise codes to select other data
cruise6 <- sqltext(cruise$CRUISE6)
#Select station data
station.qry <- paste("select unique cruise6, station, stratum, shg, decdeg_beglat as lat,
decdeg_beglon as lon, begin_est_towdate as est_towdate, avgdepth as depth,
surftemp, surfsalin, bottemp, botsalin
from UNION_FSCS_SVSTA
where cruise6 in (", cruise6, ")
order by cruise6, station", sep='')
station <- as.data.table(sqlQuery(channel, station.qry))
#merge cruise and station
mass.survey <- merge(cruise, station, by = 'CRUISE6')
#Grab catch data
catch.qry <- paste("select cruise6, station, stratum, svspp, catchsex, expcatchnum as abundance,
expcatchwt as biomass
from UNION_FSCS_SVCAT
where cruise6 in (", cruise6, ")
and stratum not like 'YT%'
order by cruise6, station, svspp", sep='')
catch <- as.data.table(sqlQuery(channel, catch.qry))
setkey(catch, CRUISE6, STATION, STRATUM)
#merge with mass.survey
setkey(mass.survey, CRUISE6, STATION, STRATUM)
mass.survey <- merge(mass.survey, catch, by = key(mass.survey))
#Length data
length.qry <- paste("select cruise6, station, stratum, svspp, catchsex, length, expnumlen as numlen
from UNION_FSCS_SVLEN
where cruise6 in (", cruise6, ")
and stratum not like 'YT%'
order by cruise6, station, svspp, length", sep='')
len <- as.data.table(sqlQuery(channel, length.qry))
setkey(len, CRUISE6, STATION, STRATUM, SVSPP, CATCHSEX)
#merge with mass.survey
setkey(mass.survey, CRUISE6, STATION, STRATUM, SVSPP, CATCHSEX)
mass.survey <- merge(mass.survey, len, all.x = T, allow.cartesian = T)
odbcClose(channel)
save(mass.survey, file = paste(out.dir, "Mass_survey.RData", sep=''))
|
rankhospital <- function(state, outcome, num = "best") {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state and outcome are valid
sdata <- data[data$State == state, ]
if(length(sdata[, 1]) == 0) {
return(paste("Error in best(",'"',state,'",', '"',outcome,'"',") : invalid state"))
}
if(outcome == "heart attack") {
disease <- "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack"
} else if(outcome == "heart failure") {
disease <- "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure"
} else if(outcome == "pneumonia") {
disease <- "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia"
} else {
return(paste("Error in best(",'"',state,'",', '"',outcome,'"',") : invalid outcome"))
}
## Return hospital name in that state with the given rank
subdata <- subset(sdata, , select = c("Hospital.Name", disease))
colnames(subdata) <- c("name", "disease")
dnadata <- subdata[complete.cases(as.numeric(subdata$disease)), ]
rdata <- dnadata[order(as.numeric(dnadata$disease), dnadata$name), ]
if(num == "best") {
num <- 1
} else if(num == "worst") {
num <- as.numeric(length(rdata))
}
## 30-day death rate
return(rdata[num, 1])
} | /rankhospital.R | no_license | kuoyliu/R-ProgAssignment3 | R | false | false | 1,516 | r | rankhospital <- function(state, outcome, num = "best") {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state and outcome are valid
sdata <- data[data$State == state, ]
if(length(sdata[, 1]) == 0) {
return(paste("Error in best(",'"',state,'",', '"',outcome,'"',") : invalid state"))
}
if(outcome == "heart attack") {
disease <- "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack"
} else if(outcome == "heart failure") {
disease <- "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure"
} else if(outcome == "pneumonia") {
disease <- "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia"
} else {
return(paste("Error in best(",'"',state,'",', '"',outcome,'"',") : invalid outcome"))
}
## Return hospital name in that state with the given rank
subdata <- subset(sdata, , select = c("Hospital.Name", disease))
colnames(subdata) <- c("name", "disease")
dnadata <- subdata[complete.cases(as.numeric(subdata$disease)), ]
rdata <- dnadata[order(as.numeric(dnadata$disease), dnadata$name), ]
if(num == "best") {
num <- 1
} else if(num == "worst") {
num <- as.numeric(length(rdata))
}
## 30-day death rate
return(rdata[num, 1])
} |
0163403bc313def49cc9452828b6570c dungeon_i25-m12-u5-v0.pddl_planlen=146.qdimacs 109835 995677 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Kronegger-Pfandler-Pichler/dungeon/dungeon_i25-m12-u5-v0.pddl_planlen=146/dungeon_i25-m12-u5-v0.pddl_planlen=146.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 93 | r | 0163403bc313def49cc9452828b6570c dungeon_i25-m12-u5-v0.pddl_planlen=146.qdimacs 109835 995677 |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bamlss.R
\name{BAMLSS}
\alias{BAMLSS}
\alias{family.BAMLSS}
\alias{mean.BAMLSS}
\alias{variance.BAMLSS}
\alias{skewness.BAMLSS}
\alias{kurtosis.BAMLSS}
\alias{random.BAMLSS}
\alias{pdf.BAMLSS}
\alias{log_pdf.BAMLSS}
\alias{cdf.BAMLSS}
\alias{quantile.BAMLSS}
\alias{support.BAMLSS}
\alias{is_discrete.BAMLSS}
\alias{is_continuous.BAMLSS}
\alias{format.BAMLSS}
\alias{print.BAMLSS}
\title{Create \pkg{distributions3} Object}
\usage{
BAMLSS(family, ...)
}
\arguments{
\item{family}{object. BAMLSS family specifications recognized by
\code{\link[bamlss]{bamlss.family}}, including \code{family.bamlss} objects,
family-generating functions (e.g., \code{\link[bamlss]{gaussian_bamlss}}),
or characters with family names (e.g., \code{"gaussian"} or \code{"binomial"}).}
\item{\dots}{further arguments passed as parameters to the BAMLSS family.
Can be scalars or vectors.}
}
\value{
A \code{BAMLSS} distribution object.
}
\description{
A single class and corresponding methods encompassing all \code{bamlss.family}
distributions (from the \pkg{bamlss} package) using the workflow from the
\pkg{distributions3} package.
}
\details{
The constructor function \code{BAMLSS} sets up a distribution
object, representing a distribution from the BAMLSS (Bayesian additive
model of location, scale, and shape) framework by the corresponding parameters
plus a \code{family} attribute, e.g., \code{\link[bamlss]{gaussian_bamlss}} for the
normal distribution or \code{\link[bamlss]{binomial_bamlss}} for the binomial
distribution. The parameters employed by the family vary across the families
but typically capture different distributional properties (like location, scale,
shape, etc.).
All parameters can also be vectors, so that it is possible to define a vector
of BAMLSS distributions from the same family with potentially different parameters.
All parameters need to have the same length or must be scalars (i.e.,
of length 1) which are then recycled to the length of the other parameters.
For the \code{BAMLSS} distribution objects there is a wide range
of standard methods available to the generics provided in the \pkg{distributions3}
package: \code{\link[distributions3]{pdf}} and \code{\link[distributions3]{log_pdf}}
for the (log-)density (PDF), \code{\link[distributions3]{cdf}} for the probability
from the cumulative distribution function (CDF), \code{quantile} for quantiles,
\code{\link[distributions3]{random}} for simulating random variables,
and \code{\link[distributions3]{support}} for the support interval
(minimum and maximum). Internally, these methods rely on the usual d/p/q/r
functions provided in \pkg{bamlss}, see the manual pages of the individual
families. The methods \code{\link[distributions3]{is_discrete}} and
\code{\link[distributions3]{is_continuous}} can be used to query whether the
distributions are discrete on the entire support or continuous on the entire
support, respectively.
See the examples below for an illustration of the workflow for the class and methods.
}
\examples{
\dontshow{ if(!requireNamespace("bamlss")) {
if(interactive() || is.na(Sys.getenv("_R_CHECK_PACKAGE_NAME_", NA))) {
stop("not all packages required for the example are installed")
} else q() }
}
## package and random seed
library("distributions3")
set.seed(6020)
## three Weibull distributions
X <- BAMLSS("weibull", lambda = c(1, 1, 2), alpha = c(1, 2, 2))
X
## moments (FIXME: mean and variance not provided by weibull_bamlss)
## mean(X)
## variance(X)
## support interval (minimum and maximum)
support(X)
is_discrete(X)
is_continuous(X)
## simulate random variables
random(X, 5)
## histograms of 1,000 simulated observations
x <- random(X, 1000)
hist(x[1, ], main = "Weibull(1,1)")
hist(x[2, ], main = "Weibull(1,2)")
hist(x[3, ], main = "Weibull(2,2)")
## probability density function (PDF) and log-density (or log-likelihood)
x <- c(2, 2, 1)
pdf(X, x)
pdf(X, x, log = TRUE)
log_pdf(X, x)
## cumulative distribution function (CDF)
cdf(X, x)
## quantiles
quantile(X, 0.5)
## cdf() and quantile() are inverses
cdf(X, quantile(X, 0.5))
quantile(X, cdf(X, 1))
## all methods above can either be applied elementwise or for
## all combinations of X and x, if length(X) = length(x),
## also the result can be assured to be a matrix via drop = FALSE
p <- c(0.05, 0.5, 0.95)
quantile(X, p, elementwise = FALSE)
quantile(X, p, elementwise = TRUE)
quantile(X, p, elementwise = TRUE, drop = FALSE)
## compare theoretical and empirical mean from 1,000 simulated observations
## (FIXME: mean not provided by weibull_bamlss)
## cbind(
## "theoretical" = mean(X),
## "empirical" = rowMeans(random(X, 1000))
## )
}
\seealso{
\code{\link[bamlss]{bamlss.family}}
}
| /man/topmodels.Rd | no_license | cran/bamlss | R | false | true | 4,760 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bamlss.R
\name{BAMLSS}
\alias{BAMLSS}
\alias{family.BAMLSS}
\alias{mean.BAMLSS}
\alias{variance.BAMLSS}
\alias{skewness.BAMLSS}
\alias{kurtosis.BAMLSS}
\alias{random.BAMLSS}
\alias{pdf.BAMLSS}
\alias{log_pdf.BAMLSS}
\alias{cdf.BAMLSS}
\alias{quantile.BAMLSS}
\alias{support.BAMLSS}
\alias{is_discrete.BAMLSS}
\alias{is_continuous.BAMLSS}
\alias{format.BAMLSS}
\alias{print.BAMLSS}
\title{Create \pkg{distributions3} Object}
\usage{
BAMLSS(family, ...)
}
\arguments{
\item{family}{object. BAMLSS family specifications recognized by
\code{\link[bamlss]{bamlss.family}}, including \code{family.bamlss} objects,
family-generating functions (e.g., \code{\link[bamlss]{gaussian_bamlss}}),
or characters with family names (e.g., \code{"gaussian"} or \code{"binomial"}).}
\item{\dots}{further arguments passed as parameters to the BAMLSS family.
Can be scalars or vectors.}
}
\value{
A \code{BAMLSS} distribution object.
}
\description{
A single class and corresponding methods encompassing all \code{bamlss.family}
distributions (from the \pkg{bamlss} package) using the workflow from the
\pkg{distributions3} package.
}
\details{
The constructor function \code{BAMLSS} sets up a distribution
object, representing a distribution from the BAMLSS (Bayesian additive
model of location, scale, and shape) framework by the corresponding parameters
plus a \code{family} attribute, e.g., \code{\link[bamlss]{gaussian_bamlss}} for the
normal distribution or \code{\link[bamlss]{binomial_bamlss}} for the binomial
distribution. The parameters employed by the family vary across the families
but typically capture different distributional properties (like location, scale,
shape, etc.).
All parameters can also be vectors, so that it is possible to define a vector
of BAMLSS distributions from the same family with potentially different parameters.
All parameters need to have the same length or must be scalars (i.e.,
of length 1) which are then recycled to the length of the other parameters.
For the \code{BAMLSS} distribution objects there is a wide range
of standard methods available to the generics provided in the \pkg{distributions3}
package: \code{\link[distributions3]{pdf}} and \code{\link[distributions3]{log_pdf}}
for the (log-)density (PDF), \code{\link[distributions3]{cdf}} for the probability
from the cumulative distribution function (CDF), \code{quantile} for quantiles,
\code{\link[distributions3]{random}} for simulating random variables,
and \code{\link[distributions3]{support}} for the support interval
(minimum and maximum). Internally, these methods rely on the usual d/p/q/r
functions provided in \pkg{bamlss}, see the manual pages of the individual
families. The methods \code{\link[distributions3]{is_discrete}} and
\code{\link[distributions3]{is_continuous}} can be used to query whether the
distributions are discrete on the entire support or continuous on the entire
support, respectively.
See the examples below for an illustration of the workflow for the class and methods.
}
\examples{
\dontshow{ if(!requireNamespace("bamlss")) {
if(interactive() || is.na(Sys.getenv("_R_CHECK_PACKAGE_NAME_", NA))) {
stop("not all packages required for the example are installed")
} else q() }
}
## package and random seed
library("distributions3")
set.seed(6020)
## three Weibull distributions
X <- BAMLSS("weibull", lambda = c(1, 1, 2), alpha = c(1, 2, 2))
X
## moments (FIXME: mean and variance not provided by weibull_bamlss)
## mean(X)
## variance(X)
## support interval (minimum and maximum)
support(X)
is_discrete(X)
is_continuous(X)
## simulate random variables
random(X, 5)
## histograms of 1,000 simulated observations
x <- random(X, 1000)
hist(x[1, ], main = "Weibull(1,1)")
hist(x[2, ], main = "Weibull(1,2)")
hist(x[3, ], main = "Weibull(2,2)")
## probability density function (PDF) and log-density (or log-likelihood)
x <- c(2, 2, 1)
pdf(X, x)
pdf(X, x, log = TRUE)
log_pdf(X, x)
## cumulative distribution function (CDF)
cdf(X, x)
## quantiles
quantile(X, 0.5)
## cdf() and quantile() are inverses
cdf(X, quantile(X, 0.5))
quantile(X, cdf(X, 1))
## all methods above can either be applied elementwise or for
## all combinations of X and x, if length(X) = length(x),
## also the result can be assured to be a matrix via drop = FALSE
p <- c(0.05, 0.5, 0.95)
quantile(X, p, elementwise = FALSE)
quantile(X, p, elementwise = TRUE)
quantile(X, p, elementwise = TRUE, drop = FALSE)
## compare theoretical and empirical mean from 1,000 simulated observations
## (FIXME: mean not provided by weibull_bamlss)
## cbind(
## "theoretical" = mean(X),
## "empirical" = rowMeans(random(X, 1000))
## )
}
\seealso{
\code{\link[bamlss]{bamlss.family}}
}
|
\name{ClusterTest-class}
\alias{ClusterTest}
\alias{ClusterTest-class}
\alias{hist,ClusterTest-method}
\alias{image,ClusterTest-method}
\alias{summary,ClusterTest-method}
\docType{class}
\title{Class "ClusterTest"}
\description{
This is a base class for tests that attempt to determine whether the groups
found by an unsupervised clustering method are statistically significant.
}
\usage{
\S4method{image}{ClusterTest}(x, dendrogram, \dots)
}
\arguments{
\item{x}{An object of the \code{ClusterTest} class.}
\item{dendrogram}{An object with S3 class \code{hclust}, as returned
by the \code{\link{hclust}} function.}
\item{\dots}{Additional graphical parameters to be passed to the
standard \code{heatmap} function that is used to implement the
\code{image} method.}
}
\section{Objects from the Class}{
Objects can be created by calls of the form \code{new("ClusterTest", \dots)}.
Most users, however, will only create objects from one of the derived
classes such as \code{\link{BootstrapClusterTest}} or
\code{\link{PerturbationClusterTest}}.
}
\section{Slots}{
\describe{
\item{\code{call}:}{An object of class \code{call}, which shows
how the object was constructed.}
\item{\code{result}:}{A symmetric \code{matrix} containing the
results of the cluster reproducibility test. The size of the
matrix corresponds to the number of samples (columns) in the data
set on which the test was performed. The \code{result} matrix
should contain "agreement" values between 0 and 1, representing for
each pair of samples the fraction of times that they were
collected into the same cluster.}
}
}
\section{Methods}{
\describe{
\item{hist}{\code{signature(x = "ClusterTest")}: Produces a
histogram of the agreement fractions. When a true group structure
exists, one expects a multimodal distribution,with one group of
agreements near 0 (for pairs belonging to different clusters) and
one group of agreements near 1 (for pairs belonging to the same
cluster).}
\item{image}{\code{signature(x = "ClusterTest")}: Uses the
\code{heatmap} function to display the agreement matrix. The
optional \code{dendrogram} argument should be used to display the
extent to which the agreement matrix matches the results of
hierarchical clustering using the full data set. This method
invisibly returns the result of a call to \code{heatmap}; thus, you
can use \code{keep.dendro=TRUE} to recover the cluster assignments
from the dendrograms.}
\item{summary}{\code{signature(object = "ClusterTest")}: Write out a
summary of the object.}
}
}
\references{
Kerr MK, Churchill GJ.\cr
\emph{Bootstrapping cluster analysis: Assessing the reliability of
conclusions from microarray experiments.}\cr
PNAS 2001; 98:8961-8965.
}
\author{
Kevin R. Coombes email{krc@silicovore.com}
}
\seealso{
\code{\link{BootstrapClusterTest}},
\code{\link{PerturbationClusterTest}},
\code{\link{heatmap}}
}
\examples{
showClass("ClusterTest")
## simulate data from two different classes
d1 <- matrix(rnorm(100*30, rnorm(100, 0.5)), nrow=100, ncol=30, byrow=FALSE)
d2 <- matrix(rnorm(100*20, rnorm(100, 0.5)), nrow=100, ncol=20, byrow=FALSE)
dd <- cbind(d1, d2)
## cluster the data
hc <- hclust(distanceMatrix(dd, 'pearson'), method='average')
## make a fake reproducibility matrix
fraud <- function(x) {
new('ClusterTest', result=abs(cor(x)), call=match.call())
}
fake <- fraud(dd)
summary(fake)
hist(fake)
image(fake) # let heatmap compute a new dendrogram from the agreements
image(fake, dendrogram=hc) # use the actual dendrogram from the data
image(fake, dendrogram=hc, col=blueyellow(64)) # change the colors
## cleanup
rm(fake, fraud, hc, dd, d1, d2)
}
\keyword{classes}
\keyword{cluster}
\keyword{htest}
\keyword{multivariate}
| /man/cd03-0-ClusterTest-class.Rd | no_license | cran/ClassDiscovery | R | false | false | 4,000 | rd | \name{ClusterTest-class}
\alias{ClusterTest}
\alias{ClusterTest-class}
\alias{hist,ClusterTest-method}
\alias{image,ClusterTest-method}
\alias{summary,ClusterTest-method}
\docType{class}
\title{Class "ClusterTest"}
\description{
This is a base class for tests that attempt to determine whether the groups
found by an unsupervised clustering method are statistically significant.
}
\usage{
\S4method{image}{ClusterTest}(x, dendrogram, \dots)
}
\arguments{
\item{x}{An object of the \code{ClusterTest} class.}
\item{dendrogram}{An object with S3 class \code{hclust}, as returned
by the \code{\link{hclust}} function.}
\item{\dots}{Additional graphical parameters to be passed to the
standard \code{heatmap} function that is used to implement the
\code{image} method.}
}
\section{Objects from the Class}{
Objects can be created by calls of the form \code{new("ClusterTest", \dots)}.
Most users, however, will only create objects from one of the derived
classes such as \code{\link{BootstrapClusterTest}} or
\code{\link{PerturbationClusterTest}}.
}
\section{Slots}{
\describe{
\item{\code{call}:}{An object of class \code{call}, which shows
how the object was constructed.}
\item{\code{result}:}{A symmetric \code{matrix} containing the
results of the cluster reproducibility test. The size of the
matrix corresponds to the number of samples (columns) in the data
set on which the test was performed. The \code{result} matrix
should contain "agreement" values between 0 and 1, representing for
each pair of samples the fraction of times that they were
collected into the same cluster.}
}
}
\section{Methods}{
\describe{
\item{hist}{\code{signature(x = "ClusterTest")}: Produces a
histogram of the agreement fractions. When a true group structure
exists, one expects a multimodal distribution,with one group of
agreements near 0 (for pairs belonging to different clusters) and
one group of agreements near 1 (for pairs belonging to the same
cluster).}
\item{image}{\code{signature(x = "ClusterTest")}: Uses the
\code{heatmap} function to display the agreement matrix. The
optional \code{dendrogram} argument should be used to display the
extent to which the agreement matrix matches the results of
hierarchical clustering using the full data set. This method
invisibly returns the result of a call to \code{heatmap}; thus, you
can use \code{keep.dendro=TRUE} to recover the cluster assignments
from the dendrograms.}
\item{summary}{\code{signature(object = "ClusterTest")}: Write out a
summary of the object.}
}
}
\references{
Kerr MK, Churchill GJ.\cr
\emph{Bootstrapping cluster analysis: Assessing the reliability of
conclusions from microarray experiments.}\cr
PNAS 2001; 98:8961-8965.
}
\author{
Kevin R. Coombes email{krc@silicovore.com}
}
\seealso{
\code{\link{BootstrapClusterTest}},
\code{\link{PerturbationClusterTest}},
\code{\link{heatmap}}
}
\examples{
showClass("ClusterTest")
## simulate data from two different classes
d1 <- matrix(rnorm(100*30, rnorm(100, 0.5)), nrow=100, ncol=30, byrow=FALSE)
d2 <- matrix(rnorm(100*20, rnorm(100, 0.5)), nrow=100, ncol=20, byrow=FALSE)
dd <- cbind(d1, d2)
## cluster the data
hc <- hclust(distanceMatrix(dd, 'pearson'), method='average')
## make a fake reproducibility matrix
fraud <- function(x) {
new('ClusterTest', result=abs(cor(x)), call=match.call())
}
fake <- fraud(dd)
summary(fake)
hist(fake)
image(fake) # let heatmap compute a new dendrogram from the agreements
image(fake, dendrogram=hc) # use the actual dendrogram from the data
image(fake, dendrogram=hc, col=blueyellow(64)) # change the colors
## cleanup
rm(fake, fraud, hc, dd, d1, d2)
}
\keyword{classes}
\keyword{cluster}
\keyword{htest}
\keyword{multivariate}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/DataFrame.R
\name{unionByName}
\alias{unionByName}
\alias{unionByName,SparkDataFrame,SparkDataFrame-method}
\title{Return a new SparkDataFrame containing the union of rows, matched by column names}
\usage{
unionByName(x, y, ...)
\S4method{unionByName}{SparkDataFrame,SparkDataFrame}(x, y, allowMissingColumns = FALSE)
}
\arguments{
\item{x}{A SparkDataFrame}
\item{y}{A SparkDataFrame}
\item{...}{further arguments to be passed to or from other methods.}
\item{allowMissingColumns}{logical}
}
\value{
A SparkDataFrame containing the result of the union.
}
\description{
Return a new SparkDataFrame containing the union of rows in this SparkDataFrame
and another SparkDataFrame. This is different from \code{union} function, and both
\code{UNION ALL} and \code{UNION DISTINCT} in SQL as column positions are not taken
into account. Input SparkDataFrames can have different data types in the schema.
}
\details{
When the parameter allowMissingColumns is `TRUE`, the set of column names
in x and y can differ; missing columns will be filled as null.
Further, the missing columns of x will be added at the end
in the schema of the union result.
Note: This does not remove duplicate rows across the two SparkDataFrames.
This function resolves columns by name (not by position).
}
\note{
unionByName since 2.3.0
}
\examples{
\dontrun{
sparkR.session()
df1 <- select(createDataFrame(mtcars), "carb", "am", "gear")
df2 <- select(createDataFrame(mtcars), "am", "gear", "carb")
head(unionByName(df1, df2))
df3 <- select(createDataFrame(mtcars), "carb")
head(unionByName(df1, df3, allowMissingColumns = TRUE))
}
}
\seealso{
\link{rbind} \link{union}
Other SparkDataFrame functions:
\code{\link{SparkDataFrame-class}},
\code{\link{agg}()},
\code{\link{alias}()},
\code{\link{arrange}()},
\code{\link{as.data.frame}()},
\code{\link{attach,SparkDataFrame-method}},
\code{\link{broadcast}()},
\code{\link{cache}()},
\code{\link{checkpoint}()},
\code{\link{coalesce}()},
\code{\link{collect}()},
\code{\link{colnames}()},
\code{\link{coltypes}()},
\code{\link{createOrReplaceTempView}()},
\code{\link{crossJoin}()},
\code{\link{cube}()},
\code{\link{dapplyCollect}()},
\code{\link{dapply}()},
\code{\link{describe}()},
\code{\link{dim}()},
\code{\link{distinct}()},
\code{\link{dropDuplicates}()},
\code{\link{dropna}()},
\code{\link{drop}()},
\code{\link{dtypes}()},
\code{\link{exceptAll}()},
\code{\link{except}()},
\code{\link{explain}()},
\code{\link{filter}()},
\code{\link{first}()},
\code{\link{gapplyCollect}()},
\code{\link{gapply}()},
\code{\link{getNumPartitions}()},
\code{\link{group_by}()},
\code{\link{head}()},
\code{\link{hint}()},
\code{\link{histogram}()},
\code{\link{insertInto}()},
\code{\link{intersectAll}()},
\code{\link{intersect}()},
\code{\link{isLocal}()},
\code{\link{isStreaming}()},
\code{\link{join}()},
\code{\link{limit}()},
\code{\link{localCheckpoint}()},
\code{\link{merge}()},
\code{\link{mutate}()},
\code{\link{ncol}()},
\code{\link{nrow}()},
\code{\link{persist}()},
\code{\link{printSchema}()},
\code{\link{randomSplit}()},
\code{\link{rbind}()},
\code{\link{rename}()},
\code{\link{repartitionByRange}()},
\code{\link{repartition}()},
\code{\link{rollup}()},
\code{\link{sample}()},
\code{\link{saveAsTable}()},
\code{\link{schema}()},
\code{\link{selectExpr}()},
\code{\link{select}()},
\code{\link{showDF}()},
\code{\link{show}()},
\code{\link{storageLevel}()},
\code{\link{str}()},
\code{\link{subset}()},
\code{\link{summary}()},
\code{\link{take}()},
\code{\link{toJSON}()},
\code{\link{unionAll}()},
\code{\link{union}()},
\code{\link{unpersist}()},
\code{\link{withColumn}()},
\code{\link{withWatermark}()},
\code{\link{with}()},
\code{\link{write.df}()},
\code{\link{write.jdbc}()},
\code{\link{write.json}()},
\code{\link{write.orc}()},
\code{\link{write.parquet}()},
\code{\link{write.stream}()},
\code{\link{write.text}()}
}
\concept{SparkDataFrame functions}
| /man/unionByName.Rd | no_license | cran/SparkR | R | false | true | 4,000 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/DataFrame.R
\name{unionByName}
\alias{unionByName}
\alias{unionByName,SparkDataFrame,SparkDataFrame-method}
\title{Return a new SparkDataFrame containing the union of rows, matched by column names}
\usage{
unionByName(x, y, ...)
\S4method{unionByName}{SparkDataFrame,SparkDataFrame}(x, y, allowMissingColumns = FALSE)
}
\arguments{
\item{x}{A SparkDataFrame}
\item{y}{A SparkDataFrame}
\item{...}{further arguments to be passed to or from other methods.}
\item{allowMissingColumns}{logical}
}
\value{
A SparkDataFrame containing the result of the union.
}
\description{
Return a new SparkDataFrame containing the union of rows in this SparkDataFrame
and another SparkDataFrame. This is different from \code{union} function, and both
\code{UNION ALL} and \code{UNION DISTINCT} in SQL as column positions are not taken
into account. Input SparkDataFrames can have different data types in the schema.
}
\details{
When the parameter allowMissingColumns is `TRUE`, the set of column names
in x and y can differ; missing columns will be filled as null.
Further, the missing columns of x will be added at the end
in the schema of the union result.
Note: This does not remove duplicate rows across the two SparkDataFrames.
This function resolves columns by name (not by position).
}
\note{
unionByName since 2.3.0
}
\examples{
\dontrun{
sparkR.session()
df1 <- select(createDataFrame(mtcars), "carb", "am", "gear")
df2 <- select(createDataFrame(mtcars), "am", "gear", "carb")
head(unionByName(df1, df2))
df3 <- select(createDataFrame(mtcars), "carb")
head(unionByName(df1, df3, allowMissingColumns = TRUE))
}
}
\seealso{
\link{rbind} \link{union}
Other SparkDataFrame functions:
\code{\link{SparkDataFrame-class}},
\code{\link{agg}()},
\code{\link{alias}()},
\code{\link{arrange}()},
\code{\link{as.data.frame}()},
\code{\link{attach,SparkDataFrame-method}},
\code{\link{broadcast}()},
\code{\link{cache}()},
\code{\link{checkpoint}()},
\code{\link{coalesce}()},
\code{\link{collect}()},
\code{\link{colnames}()},
\code{\link{coltypes}()},
\code{\link{createOrReplaceTempView}()},
\code{\link{crossJoin}()},
\code{\link{cube}()},
\code{\link{dapplyCollect}()},
\code{\link{dapply}()},
\code{\link{describe}()},
\code{\link{dim}()},
\code{\link{distinct}()},
\code{\link{dropDuplicates}()},
\code{\link{dropna}()},
\code{\link{drop}()},
\code{\link{dtypes}()},
\code{\link{exceptAll}()},
\code{\link{except}()},
\code{\link{explain}()},
\code{\link{filter}()},
\code{\link{first}()},
\code{\link{gapplyCollect}()},
\code{\link{gapply}()},
\code{\link{getNumPartitions}()},
\code{\link{group_by}()},
\code{\link{head}()},
\code{\link{hint}()},
\code{\link{histogram}()},
\code{\link{insertInto}()},
\code{\link{intersectAll}()},
\code{\link{intersect}()},
\code{\link{isLocal}()},
\code{\link{isStreaming}()},
\code{\link{join}()},
\code{\link{limit}()},
\code{\link{localCheckpoint}()},
\code{\link{merge}()},
\code{\link{mutate}()},
\code{\link{ncol}()},
\code{\link{nrow}()},
\code{\link{persist}()},
\code{\link{printSchema}()},
\code{\link{randomSplit}()},
\code{\link{rbind}()},
\code{\link{rename}()},
\code{\link{repartitionByRange}()},
\code{\link{repartition}()},
\code{\link{rollup}()},
\code{\link{sample}()},
\code{\link{saveAsTable}()},
\code{\link{schema}()},
\code{\link{selectExpr}()},
\code{\link{select}()},
\code{\link{showDF}()},
\code{\link{show}()},
\code{\link{storageLevel}()},
\code{\link{str}()},
\code{\link{subset}()},
\code{\link{summary}()},
\code{\link{take}()},
\code{\link{toJSON}()},
\code{\link{unionAll}()},
\code{\link{union}()},
\code{\link{unpersist}()},
\code{\link{withColumn}()},
\code{\link{withWatermark}()},
\code{\link{with}()},
\code{\link{write.df}()},
\code{\link{write.jdbc}()},
\code{\link{write.json}()},
\code{\link{write.orc}()},
\code{\link{write.parquet}()},
\code{\link{write.stream}()},
\code{\link{write.text}()}
}
\concept{SparkDataFrame functions}
|
library(c060)
### Name: Plot.coef.glmnet
### Title: function to highlight the path of a pre-specified set of
### variables within the coefficient path
### Aliases: Plot.coef.glmnet
### Keywords: coefficient path
### ** Examples
## Not run:
##D set.seed(1010)
##D n=1000;p=100
##D nzc=trunc(p/10)
##D x=matrix(rnorm(n*p),n,p)
##D beta=rnorm(nzc)
##D fx= x[,seq(nzc)] %*% beta
##D eps=rnorm(n)*5
##D y=drop(fx+eps)
##D px=exp(fx)
##D px=px/(1+px)
##D ly=rbinom(n=length(px),prob=px,size=1)
##D set.seed(1011)
##D cvob1=cv.glmnet(x,y)
##D Plot.coef.glmnet(cvob1, c("V1","V100"))
## End(Not run)
| /data/genthat_extracted_code/c060/examples/Plot.coef.glmnet.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 601 | r | library(c060)
### Name: Plot.coef.glmnet
### Title: function to highlight the path of a pre-specified set of
### variables within the coefficient path
### Aliases: Plot.coef.glmnet
### Keywords: coefficient path
### ** Examples
## Not run:
##D set.seed(1010)
##D n=1000;p=100
##D nzc=trunc(p/10)
##D x=matrix(rnorm(n*p),n,p)
##D beta=rnorm(nzc)
##D fx= x[,seq(nzc)] %*% beta
##D eps=rnorm(n)*5
##D y=drop(fx+eps)
##D px=exp(fx)
##D px=px/(1+px)
##D ly=rbinom(n=length(px),prob=px,size=1)
##D set.seed(1011)
##D cvob1=cv.glmnet(x,y)
##D Plot.coef.glmnet(cvob1, c("V1","V100"))
## End(Not run)
|
rm(list=ls())
# Load libraries
suppressPackageStartupMessages(library(sva))
suppressPackageStartupMessages(library(limma))
suppressPackageStartupMessages(library(DESeq2))
suppressPackageStartupMessages(library(scales))
suppressPackageStartupMessages(library(RColorBrewer))
suppressPackageStartupMessages(library(ggplot2))
suppressPackageStartupMessages(library(ggjoy))
suppressPackageStartupMessages(library(knitr))
suppressPackageStartupMessages(library(preprocessCore))
suppressPackageStartupMessages(library(variancePartition))
suppressPackageStartupMessages(library(doParallel))
suppressPackageStartupMessages(library(Biobase))
suppressPackageStartupMessages(library(DMwR))
suppressPackageStartupMessages(library(DT))
suppressPackageStartupMessages(library(randomForest))
suppressPackageStartupMessages(library(pROC))
suppressPackageStartupMessages(library(ggpubr))
suppressPackageStartupMessages(library(dplyr))
suppressPackageStartupMessages(library(reshape2))
suppressPackageStartupMessages(library(plyr))
suppressPackageStartupMessages(library(xlsx))
suppressPackageStartupMessages(library(pheatmap))
suppressPackageStartupMessages(library(edgeR))
suppressPackageStartupMessages(library(tibble))
suppressPackageStartupMessages(library(magrittr))
suppressPackageStartupMessages(library(purrr))
source("Utility_Functions.R")
# Create Output directories
folder_names <- c("OUTPUT_DOWNSAMPLE_NeuN")
sapply(folder_names, dir.create)
####################################
# Load Inputs and filter for Neun+ #
####################################
load("OUTPUTS_EXONS_NEUN/WGCNA_EXONS_NEUN.RData")
exp <- as.data.frame(t(expQuant))
pd <- pd
mod <- model.matrix(~Species+Sex+HumAge+RIN, data =pd)
mod0 <- model.matrix(~Sex+HumAge+RIN, data = pd)
svaobj <- sva(as.matrix(expQuant),mod,mod0,n.sv=NULL,B=100,method="two-step")
svs <- svaobj$sv
colnames(svs) <- paste("SV",1:ncol(svs),sep="")
pd <- cbind(pd,svs)
B=100 ## select number of times for leave-multiple-out method
new_pd <- vector("list", length = B)
Y.b <- vector("list",length = B)
for (i in 1:B)
{
new_pd[[i]] <- pd %>% # Subset the tables by 10 samples per each species and creating a 100 leave-multiple-out table
rownames_to_column('Sample') %>%
group_by(Species) %>%
sample_n(10) %>%
column_to_rownames('Sample') %>%
as.data.frame() %>%
droplevels()
Y.b[[i]]=exp[rownames(exp) %in% rownames(new_pd[[i]]),]
Y.b[[i]]=Y.b[[i]][match(rownames(new_pd[[i]]),rownames(Y.b[[i]])),]
}
save(Y.b,new_pd,file = "OUTPUT_DOWNSAMPLE_NeuN/Downsampled_raw_data.RData")
########################
# Anova across SPECIES #
########################
Model <- vector("list", length = B)
SPECIES_DGE <- vector("list",length = B)
for (i in 1:B)
{
SPECIES_DGE[[i]] <- data.frame(matrix(nrow=ncol(Y.b[[i]]), ncol=2, dimnames = list(colnames(Y.b[[i]]), c("Fstat_Species","Anova_Species"))))
{
for (j in 1:ncol(Y.b[[i]]))
{
Model[[i]]=tryCatch(anova(lm(as.formula(paste("Y.b[[i]][,j] ~ ", paste(colnames(new_pd[[i]]),collapse = " + "))), data = new_pd[[i]])),warning = function(w) w)
if (j %% 8372 == 0) {cat(paste("Done on Boot ",i,"\n",sep=""))}
if(typeof(Model[[i]]) == "list"){
SPECIES_DGE[[i]][j,"Fstat_Species"] = Model[[i]]$"F value"[1]
SPECIES_DGE[[i]][j,"Anova_Species"] = Model[[i]]$"Pr(>F)"[1]
SPECIES_DGE[[i]]$AnovaAdj_Species <- p.adjust(SPECIES_DGE[[i]]$Anova_Species,"bonferroni")
}
}
}
}
save(SPECIES_DGE,file="OUTPUT_DOWNSAMPLE_NeuN/SPECIES_DGE.RData")
##########
# H vs C #
##########
new_pd_HC <- vector("list", length = B)
Y.b_HC <- vector("list",length = B)
for (i in 1:B)
{
Y.b_HC[[i]] <- Y.b[[i]][grep("Hsap|PanTro",rownames(Y.b[[i]])),]
new_pd_HC[[i]] <- droplevels(new_pd[[i]][grep("Hsap|PanTro",new_pd[[i]]$Species),])
}
Model <- vector("list", length = B)
HsapVsPanTro_DGE <- vector("list",length = B)
coefs <- vector("list",length = B)
t_value <- vector("list",length = B)
for (i in 1:B)
{
HsapVsPanTro_DGE[[i]] <- data.frame(matrix(nrow=ncol(Y.b_HC[[i]]), ncol=2, dimnames = list(colnames(Y.b_HC[[i]]), c("Estimate_HsapVsPanTro", "Pval_HsapVsPanTro"))))
{
for (j in 1:ncol(Y.b_HC[[i]]))
{
Model[[i]]=tryCatch(lm(as.formula(paste("Y.b_HC[[i]][,j] ~ ", paste(colnames(new_pd_HC[[i]]),collapse = " + "))), data = new_pd_HC[[i]]),warning = function(w) w)
if (j %% 8372 == 0) {cat(paste("Done on Boot ",i,"\n",sep=""))}
if(typeof(Model[[i]]) == "list"){
coefs[[i]] = data.frame(coef(summary(Model[[i]])))
t_value[[i]] = coefs[[i]]["Species", "t.value"]
HsapVsPanTro_DGE[[i]][j,"Estimate_HsapVsPanTro"] = -1 * coefs[[i]]["Species", "Estimate"]
HsapVsPanTro_DGE[[i]][j,"Pval_HsapVsPanTro"] = 2 * (1 - pnorm(abs(t_value[[i]])))
HsapVsPanTro_DGE[[i]]$FDR_HsapVsPanTro <- p.adjust(HsapVsPanTro_DGE[[i]]$Pval_HsapVsPanTro,method="BH")
}
}
}
}
save(HsapVsPanTro_DGE,file="OUTPUT_DOWNSAMPLE_NeuN/HsapVsPanTro_DGE.RData")
##########
# H vs R #
##########
new_pd_HR <- vector("list", length = B)
Y.b_HR <- vector("list",length = B)
for (i in 1:B)
{
Y.b_HR[[i]] <- Y.b[[i]][grep("Hsap|RheMac",rownames(Y.b[[i]])),]
new_pd_HR[[i]] <- droplevels(new_pd[[i]][grep("Hsap|RheMac",new_pd[[i]]$Species),])
}
Model <- vector("list", length = B)
HsapVsRheMac_DGE <- vector("list",length = B)
coefs <- vector("list",length = B)
t_value <- vector("list",length = B)
for (i in 1:B)
{
HsapVsRheMac_DGE[[i]] <- data.frame(matrix(nrow=ncol(Y.b_HR[[i]]), ncol=2, dimnames = list(colnames(Y.b_HR[[i]]), c("Estimate_HsapVsRheMac", "Pval_HsapVsRheMac"))))
{
for (j in 1:ncol(Y.b_HR[[i]]))
{
Model[[i]]=tryCatch(lm(as.formula(paste("Y.b_HR[[i]][,j] ~ ", paste(colnames(new_pd_HR[[i]]),collapse = " + "))), data = new_pd_HR[[i]]),warning = function(w) w)
if (j %% 8372 == 0) {cat(paste("Done on Boot ",i,"\n",sep=""))}
if(typeof(Model[[i]]) == "list"){
coefs[[i]] = data.frame(coef(summary(Model[[i]])))
t_value[[i]] = coefs[[i]]["Species", "t.value"]
HsapVsRheMac_DGE[[i]][j,"Estimate_HsapVsRheMac"] = -1 * coefs[[i]]["Species", "Estimate"]
HsapVsRheMac_DGE[[i]][j,"Pval_HsapVsRheMac"] = 2 * (1 - pnorm(abs(t_value[[i]])))
HsapVsRheMac_DGE[[i]]$FDR_HsapVsRheMac <- p.adjust(HsapVsRheMac_DGE[[i]]$Pval_HsapVsRheMac,method="BH")
}
}
}
}
save(HsapVsRheMac_DGE,file="OUTPUT_DOWNSAMPLE_NeuN/HsapVsRheMac_DGE.RData")
##########
# C vs R #
##########
new_pd_CR <- vector("list", length = B)
Y.b_CR <- vector("list",length = B)
for (i in 1:B)
{
Y.b_CR[[i]] <- Y.b[[i]][grep("PanTro|RheMac",rownames(Y.b[[i]])),]
new_pd_CR[[i]] <- droplevels(new_pd[[i]][grep("PanTro|RheMac",new_pd[[i]]$Species),])
}
Model <- vector("list", length = B)
PanTroVsRheMac_DGE <- vector("list",length = B)
coefs <- vector("list",length = B)
t_value <- vector("list",length = B)
for (i in 1:B)
{
PanTroVsRheMac_DGE[[i]] <- data.frame(matrix(nrow=ncol(Y.b_CR[[i]]), ncol=2, dimnames = list(colnames(Y.b_CR[[i]]), c("Estimate_PanTroVsRheMac", "Pval_PanTroVsRheMac"))))
{
for (j in 1:ncol(Y.b_CR[[i]]))
{
Model[[i]]=tryCatch(lm(as.formula(paste("Y.b_CR[[i]][,j] ~ ", paste(colnames(new_pd_CR[[i]]),collapse = " + "))), data = new_pd_CR[[i]]),warning = function(w) w)
if (j %% 8372 == 0) {cat(paste("Done on Boot ",i,"\n",sep=""))}
if(typeof(Model[[i]]) == "list"){
coefs[[i]] = data.frame(coef(summary(Model[[i]])))
t_value[[i]] = coefs[[i]]["Species", "t.value"]
PanTroVsRheMac_DGE[[i]][j,"Estimate_PanTroVsRheMac"] = -1 * coefs[[i]]["Species", "Estimate"]
PanTroVsRheMac_DGE[[i]][j,"Pval_PanTroVsRheMac"] = 2 * (1 - pnorm(abs(t_value[[i]])))
PanTroVsRheMac_DGE[[i]]$FDR_PanTroVsRheMac <- p.adjust(PanTroVsRheMac_DGE[[i]]$Pval_PanTroVsRheMac,method="BH")
}
}
}
}
save(PanTroVsRheMac_DGE,file="OUTPUT_DOWNSAMPLE_NeuN/PanTroVsRheMac_DGE.RData")
# Combine data
for (i in 1:B)
{
HsapVsPanTro_DGE[[i]] <- HsapVsPanTro_DGE[[i]] %>% rownames_to_column('Gene')
HsapVsRheMac_DGE[[i]] <- HsapVsRheMac_DGE[[i]] %>% rownames_to_column('Gene')
PanTroVsRheMac_DGE[[i]] <- PanTroVsRheMac_DGE[[i]] %>% rownames_to_column('Gene')
SPECIES_DGE[[i]] <- SPECIES_DGE[[i]] %>% rownames_to_column('Gene')
}
myfiles <- vector("list", length = B)
for (i in 1:B)
{
myfiles[[i]] <- Reduce(function(x, y) merge(x, y,by="Gene",all=TRUE), list(HsapVsPanTro_DGE[[i]], HsapVsRheMac_DGE[[i]], PanTroVsRheMac_DGE[[i]],SPECIES_DGE[[i]]))
}
save(myfiles,file="OUTPUT_DOWNSAMPLE_NeuN/Combined_DGE.RData")
# Human Specific
posH <- vector("list", length = B)
negH <- vector("list", length = B)
for (i in 1:B)
{
posH[[i]] <- myfiles[[i]] %>%
filter(FDR_PanTroVsRheMac > 0.1 &
FDR_HsapVsPanTro < 0.05 &
FDR_HsapVsRheMac < 0.05 &
AnovaAdj_Species < 0.05 &
Estimate_HsapVsPanTro > 0 &
Estimate_HsapVsRheMac > 0) %>%
as.data.frame()
negH[[i]] <- myfiles[[i]] %>%
filter(FDR_PanTroVsRheMac > 0.1 &
FDR_HsapVsPanTro < 0.05 &
FDR_HsapVsRheMac < 0.05 &
AnovaAdj_Species < 0.05 &
Estimate_HsapVsPanTro < 0 &
Estimate_HsapVsRheMac < 0) %>%
as.data.frame()
}
human_specific <- Map(rbind, posH, negH)
save(human_specific,file="OUTPUT_DOWNSAMPLE_NeuN/Hsap_Specific_DGE.RData")
# Chimp Specific
posC <- vector("list", length = B)
negC <- vector("list", length = B)
for (i in 1:B)
{
posC[[i]] <- myfiles[[i]] %>%
filter(FDR_PanTroVsRheMac > 0.05 &
FDR_HsapVsPanTro < 0.05 &
FDR_HsapVsRheMac < 0.1 &
AnovaAdj_Species < 0.05 &
Estimate_HsapVsPanTro < 0 &
Estimate_PanTroVsRheMac > 0) %>%
as.data.frame()
negC[[i]] <- myfiles[[i]] %>%
filter(FDR_PanTroVsRheMac > 0.05 &
FDR_HsapVsPanTro < 0.05 &
FDR_HsapVsRheMac < 0.1 &
AnovaAdj_Species < 0.05 &
Estimate_HsapVsPanTro > 0 &
Estimate_PanTroVsRheMac < 0) %>%
as.data.frame()
}
pantro_specific <- Map(rbind, posC, negC)
save(pantro_specific,file="OUTPUT_DOWNSAMPLE_NeuN/PanTro_Specific_DGE.RData")
# RheMac Specific
posR <- vector("list", length = B)
negR <- vector("list", length = B)
for (i in 1:B)
{
posR[[i]] <- myfiles[[i]] %>%
filter(FDR_PanTroVsRheMac < 0.05 &
FDR_HsapVsPanTro > 0.1 &
FDR_HsapVsRheMac < 0.05 &
AnovaAdj_Species < 0.05 &
Estimate_PanTroVsRheMac < 0 &
Estimate_HsapVsRheMac < 0) %>%
as.data.frame()
negR[[i]] <- myfiles[[i]] %>%
filter(FDR_PanTroVsRheMac < 0.05 &
FDR_HsapVsPanTro > 0.1 &
FDR_HsapVsRheMac < 0.05 &
AnovaAdj_Species < 0.05 &
Estimate_PanTroVsRheMac > 0 &
Estimate_HsapVsRheMac > 0) %>%
as.data.frame()
}
rhemac_specific <- Map(rbind, posR, negR)
save(rhemac_specific,file="OUTPUT_DOWNSAMPLE_NeuN/RheMac_Specific_DGE.RData")
| /DGE/CROSS-VALIDATIONS/DOWNSAMPLE/Dge_NeuN_DownSampled.R | no_license | BioV/Primates_CellType | R | false | false | 11,677 | r | rm(list=ls())
# Load libraries
suppressPackageStartupMessages(library(sva))
suppressPackageStartupMessages(library(limma))
suppressPackageStartupMessages(library(DESeq2))
suppressPackageStartupMessages(library(scales))
suppressPackageStartupMessages(library(RColorBrewer))
suppressPackageStartupMessages(library(ggplot2))
suppressPackageStartupMessages(library(ggjoy))
suppressPackageStartupMessages(library(knitr))
suppressPackageStartupMessages(library(preprocessCore))
suppressPackageStartupMessages(library(variancePartition))
suppressPackageStartupMessages(library(doParallel))
suppressPackageStartupMessages(library(Biobase))
suppressPackageStartupMessages(library(DMwR))
suppressPackageStartupMessages(library(DT))
suppressPackageStartupMessages(library(randomForest))
suppressPackageStartupMessages(library(pROC))
suppressPackageStartupMessages(library(ggpubr))
suppressPackageStartupMessages(library(dplyr))
suppressPackageStartupMessages(library(reshape2))
suppressPackageStartupMessages(library(plyr))
suppressPackageStartupMessages(library(xlsx))
suppressPackageStartupMessages(library(pheatmap))
suppressPackageStartupMessages(library(edgeR))
suppressPackageStartupMessages(library(tibble))
suppressPackageStartupMessages(library(magrittr))
suppressPackageStartupMessages(library(purrr))
source("Utility_Functions.R")
# Create Output directories
folder_names <- c("OUTPUT_DOWNSAMPLE_NeuN")
sapply(folder_names, dir.create)
####################################
# Load Inputs and filter for Neun+ #
####################################
load("OUTPUTS_EXONS_NEUN/WGCNA_EXONS_NEUN.RData")
exp <- as.data.frame(t(expQuant))
pd <- pd
mod <- model.matrix(~Species+Sex+HumAge+RIN, data =pd)
mod0 <- model.matrix(~Sex+HumAge+RIN, data = pd)
svaobj <- sva(as.matrix(expQuant),mod,mod0,n.sv=NULL,B=100,method="two-step")
svs <- svaobj$sv
colnames(svs) <- paste("SV",1:ncol(svs),sep="")
pd <- cbind(pd,svs)
B=100 ## select number of times for leave-multiple-out method
new_pd <- vector("list", length = B)
Y.b <- vector("list",length = B)
for (i in 1:B)
{
new_pd[[i]] <- pd %>% # Subset the tables by 10 samples per each species and creating a 100 leave-multiple-out table
rownames_to_column('Sample') %>%
group_by(Species) %>%
sample_n(10) %>%
column_to_rownames('Sample') %>%
as.data.frame() %>%
droplevels()
Y.b[[i]]=exp[rownames(exp) %in% rownames(new_pd[[i]]),]
Y.b[[i]]=Y.b[[i]][match(rownames(new_pd[[i]]),rownames(Y.b[[i]])),]
}
save(Y.b,new_pd,file = "OUTPUT_DOWNSAMPLE_NeuN/Downsampled_raw_data.RData")
########################
# Anova across SPECIES #
########################
Model <- vector("list", length = B)
SPECIES_DGE <- vector("list",length = B)
for (i in 1:B)
{
SPECIES_DGE[[i]] <- data.frame(matrix(nrow=ncol(Y.b[[i]]), ncol=2, dimnames = list(colnames(Y.b[[i]]), c("Fstat_Species","Anova_Species"))))
{
for (j in 1:ncol(Y.b[[i]]))
{
Model[[i]]=tryCatch(anova(lm(as.formula(paste("Y.b[[i]][,j] ~ ", paste(colnames(new_pd[[i]]),collapse = " + "))), data = new_pd[[i]])),warning = function(w) w)
if (j %% 8372 == 0) {cat(paste("Done on Boot ",i,"\n",sep=""))}
if(typeof(Model[[i]]) == "list"){
SPECIES_DGE[[i]][j,"Fstat_Species"] = Model[[i]]$"F value"[1]
SPECIES_DGE[[i]][j,"Anova_Species"] = Model[[i]]$"Pr(>F)"[1]
SPECIES_DGE[[i]]$AnovaAdj_Species <- p.adjust(SPECIES_DGE[[i]]$Anova_Species,"bonferroni")
}
}
}
}
save(SPECIES_DGE,file="OUTPUT_DOWNSAMPLE_NeuN/SPECIES_DGE.RData")
##########
# H vs C #
##########
new_pd_HC <- vector("list", length = B)
Y.b_HC <- vector("list",length = B)
for (i in 1:B)
{
Y.b_HC[[i]] <- Y.b[[i]][grep("Hsap|PanTro",rownames(Y.b[[i]])),]
new_pd_HC[[i]] <- droplevels(new_pd[[i]][grep("Hsap|PanTro",new_pd[[i]]$Species),])
}
Model <- vector("list", length = B)
HsapVsPanTro_DGE <- vector("list",length = B)
coefs <- vector("list",length = B)
t_value <- vector("list",length = B)
for (i in 1:B)
{
HsapVsPanTro_DGE[[i]] <- data.frame(matrix(nrow=ncol(Y.b_HC[[i]]), ncol=2, dimnames = list(colnames(Y.b_HC[[i]]), c("Estimate_HsapVsPanTro", "Pval_HsapVsPanTro"))))
{
for (j in 1:ncol(Y.b_HC[[i]]))
{
Model[[i]]=tryCatch(lm(as.formula(paste("Y.b_HC[[i]][,j] ~ ", paste(colnames(new_pd_HC[[i]]),collapse = " + "))), data = new_pd_HC[[i]]),warning = function(w) w)
if (j %% 8372 == 0) {cat(paste("Done on Boot ",i,"\n",sep=""))}
if(typeof(Model[[i]]) == "list"){
coefs[[i]] = data.frame(coef(summary(Model[[i]])))
t_value[[i]] = coefs[[i]]["Species", "t.value"]
HsapVsPanTro_DGE[[i]][j,"Estimate_HsapVsPanTro"] = -1 * coefs[[i]]["Species", "Estimate"]
HsapVsPanTro_DGE[[i]][j,"Pval_HsapVsPanTro"] = 2 * (1 - pnorm(abs(t_value[[i]])))
HsapVsPanTro_DGE[[i]]$FDR_HsapVsPanTro <- p.adjust(HsapVsPanTro_DGE[[i]]$Pval_HsapVsPanTro,method="BH")
}
}
}
}
save(HsapVsPanTro_DGE,file="OUTPUT_DOWNSAMPLE_NeuN/HsapVsPanTro_DGE.RData")
##########
# H vs R #
##########
new_pd_HR <- vector("list", length = B)
Y.b_HR <- vector("list",length = B)
for (i in 1:B)
{
Y.b_HR[[i]] <- Y.b[[i]][grep("Hsap|RheMac",rownames(Y.b[[i]])),]
new_pd_HR[[i]] <- droplevels(new_pd[[i]][grep("Hsap|RheMac",new_pd[[i]]$Species),])
}
Model <- vector("list", length = B)
HsapVsRheMac_DGE <- vector("list",length = B)
coefs <- vector("list",length = B)
t_value <- vector("list",length = B)
for (i in 1:B)
{
HsapVsRheMac_DGE[[i]] <- data.frame(matrix(nrow=ncol(Y.b_HR[[i]]), ncol=2, dimnames = list(colnames(Y.b_HR[[i]]), c("Estimate_HsapVsRheMac", "Pval_HsapVsRheMac"))))
{
for (j in 1:ncol(Y.b_HR[[i]]))
{
Model[[i]]=tryCatch(lm(as.formula(paste("Y.b_HR[[i]][,j] ~ ", paste(colnames(new_pd_HR[[i]]),collapse = " + "))), data = new_pd_HR[[i]]),warning = function(w) w)
if (j %% 8372 == 0) {cat(paste("Done on Boot ",i,"\n",sep=""))}
if(typeof(Model[[i]]) == "list"){
coefs[[i]] = data.frame(coef(summary(Model[[i]])))
t_value[[i]] = coefs[[i]]["Species", "t.value"]
HsapVsRheMac_DGE[[i]][j,"Estimate_HsapVsRheMac"] = -1 * coefs[[i]]["Species", "Estimate"]
HsapVsRheMac_DGE[[i]][j,"Pval_HsapVsRheMac"] = 2 * (1 - pnorm(abs(t_value[[i]])))
HsapVsRheMac_DGE[[i]]$FDR_HsapVsRheMac <- p.adjust(HsapVsRheMac_DGE[[i]]$Pval_HsapVsRheMac,method="BH")
}
}
}
}
save(HsapVsRheMac_DGE,file="OUTPUT_DOWNSAMPLE_NeuN/HsapVsRheMac_DGE.RData")
##########
# C vs R #
##########
new_pd_CR <- vector("list", length = B)
Y.b_CR <- vector("list",length = B)
for (i in 1:B)
{
Y.b_CR[[i]] <- Y.b[[i]][grep("PanTro|RheMac",rownames(Y.b[[i]])),]
new_pd_CR[[i]] <- droplevels(new_pd[[i]][grep("PanTro|RheMac",new_pd[[i]]$Species),])
}
Model <- vector("list", length = B)
PanTroVsRheMac_DGE <- vector("list",length = B)
coefs <- vector("list",length = B)
t_value <- vector("list",length = B)
for (i in 1:B)
{
PanTroVsRheMac_DGE[[i]] <- data.frame(matrix(nrow=ncol(Y.b_CR[[i]]), ncol=2, dimnames = list(colnames(Y.b_CR[[i]]), c("Estimate_PanTroVsRheMac", "Pval_PanTroVsRheMac"))))
{
for (j in 1:ncol(Y.b_CR[[i]]))
{
Model[[i]]=tryCatch(lm(as.formula(paste("Y.b_CR[[i]][,j] ~ ", paste(colnames(new_pd_CR[[i]]),collapse = " + "))), data = new_pd_CR[[i]]),warning = function(w) w)
if (j %% 8372 == 0) {cat(paste("Done on Boot ",i,"\n",sep=""))}
if(typeof(Model[[i]]) == "list"){
coefs[[i]] = data.frame(coef(summary(Model[[i]])))
t_value[[i]] = coefs[[i]]["Species", "t.value"]
PanTroVsRheMac_DGE[[i]][j,"Estimate_PanTroVsRheMac"] = -1 * coefs[[i]]["Species", "Estimate"]
PanTroVsRheMac_DGE[[i]][j,"Pval_PanTroVsRheMac"] = 2 * (1 - pnorm(abs(t_value[[i]])))
PanTroVsRheMac_DGE[[i]]$FDR_PanTroVsRheMac <- p.adjust(PanTroVsRheMac_DGE[[i]]$Pval_PanTroVsRheMac,method="BH")
}
}
}
}
save(PanTroVsRheMac_DGE,file="OUTPUT_DOWNSAMPLE_NeuN/PanTroVsRheMac_DGE.RData")
# Combine data
for (i in 1:B)
{
HsapVsPanTro_DGE[[i]] <- HsapVsPanTro_DGE[[i]] %>% rownames_to_column('Gene')
HsapVsRheMac_DGE[[i]] <- HsapVsRheMac_DGE[[i]] %>% rownames_to_column('Gene')
PanTroVsRheMac_DGE[[i]] <- PanTroVsRheMac_DGE[[i]] %>% rownames_to_column('Gene')
SPECIES_DGE[[i]] <- SPECIES_DGE[[i]] %>% rownames_to_column('Gene')
}
myfiles <- vector("list", length = B)
for (i in 1:B)
{
myfiles[[i]] <- Reduce(function(x, y) merge(x, y,by="Gene",all=TRUE), list(HsapVsPanTro_DGE[[i]], HsapVsRheMac_DGE[[i]], PanTroVsRheMac_DGE[[i]],SPECIES_DGE[[i]]))
}
save(myfiles,file="OUTPUT_DOWNSAMPLE_NeuN/Combined_DGE.RData")
# Human Specific
posH <- vector("list", length = B)
negH <- vector("list", length = B)
for (i in 1:B)
{
posH[[i]] <- myfiles[[i]] %>%
filter(FDR_PanTroVsRheMac > 0.1 &
FDR_HsapVsPanTro < 0.05 &
FDR_HsapVsRheMac < 0.05 &
AnovaAdj_Species < 0.05 &
Estimate_HsapVsPanTro > 0 &
Estimate_HsapVsRheMac > 0) %>%
as.data.frame()
negH[[i]] <- myfiles[[i]] %>%
filter(FDR_PanTroVsRheMac > 0.1 &
FDR_HsapVsPanTro < 0.05 &
FDR_HsapVsRheMac < 0.05 &
AnovaAdj_Species < 0.05 &
Estimate_HsapVsPanTro < 0 &
Estimate_HsapVsRheMac < 0) %>%
as.data.frame()
}
human_specific <- Map(rbind, posH, negH)
save(human_specific,file="OUTPUT_DOWNSAMPLE_NeuN/Hsap_Specific_DGE.RData")
# Chimp Specific
posC <- vector("list", length = B)
negC <- vector("list", length = B)
for (i in 1:B)
{
posC[[i]] <- myfiles[[i]] %>%
filter(FDR_PanTroVsRheMac > 0.05 &
FDR_HsapVsPanTro < 0.05 &
FDR_HsapVsRheMac < 0.1 &
AnovaAdj_Species < 0.05 &
Estimate_HsapVsPanTro < 0 &
Estimate_PanTroVsRheMac > 0) %>%
as.data.frame()
negC[[i]] <- myfiles[[i]] %>%
filter(FDR_PanTroVsRheMac > 0.05 &
FDR_HsapVsPanTro < 0.05 &
FDR_HsapVsRheMac < 0.1 &
AnovaAdj_Species < 0.05 &
Estimate_HsapVsPanTro > 0 &
Estimate_PanTroVsRheMac < 0) %>%
as.data.frame()
}
pantro_specific <- Map(rbind, posC, negC)
save(pantro_specific,file="OUTPUT_DOWNSAMPLE_NeuN/PanTro_Specific_DGE.RData")
# RheMac Specific
posR <- vector("list", length = B)
negR <- vector("list", length = B)
for (i in 1:B)
{
posR[[i]] <- myfiles[[i]] %>%
filter(FDR_PanTroVsRheMac < 0.05 &
FDR_HsapVsPanTro > 0.1 &
FDR_HsapVsRheMac < 0.05 &
AnovaAdj_Species < 0.05 &
Estimate_PanTroVsRheMac < 0 &
Estimate_HsapVsRheMac < 0) %>%
as.data.frame()
negR[[i]] <- myfiles[[i]] %>%
filter(FDR_PanTroVsRheMac < 0.05 &
FDR_HsapVsPanTro > 0.1 &
FDR_HsapVsRheMac < 0.05 &
AnovaAdj_Species < 0.05 &
Estimate_PanTroVsRheMac > 0 &
Estimate_HsapVsRheMac > 0) %>%
as.data.frame()
}
rhemac_specific <- Map(rbind, posR, negR)
save(rhemac_specific,file="OUTPUT_DOWNSAMPLE_NeuN/RheMac_Specific_DGE.RData")
|
# K-means clustering
library(dplyr)
library(purrr)
library(ggplot2)
library(gridExtra)
library(purrr)
library(RColorBrewer)
library(viridis)
library(ggrepel)
library(stringr)
# set wd to folder where RDS results are stored
#set working directory to that folder
setwd("Data/BRT_results")
# read in data
unscaled_imp <- list.files(pattern = ".RDS") %>%
map(readRDS) %>%
bind_rows()
#now scale 0-1
#that's how it was in the original script
data <- unscaled_imp %>%
dplyr::filter(!var %in% c("number_of_samples", "grid_lat", "grid_lng")) %>%
group_by(Species) %>%
mutate(rel.inf_scaled=scales::rescale(rel.inf))
# reset working directory for rest of script up one level
setwd("../../..")
# Need to visualize these results somehow
# summarizing across all species
ggplot(data, aes(x=var, y=rel.inf_scaled, color=var))+
geom_point(alpha=0.8)+
theme_bw()+
theme(axis.text=element_text(color="black"))+
coord_flip()+
scale_color_brewer(palette="Set1")+
guides(color=FALSE)+
xlab("")+
ylab("Scaled importance")
# one potential way to visualize species-specific results
length(unique(data$Species))
data$var <- factor(data$var, levels = c("day","mean_vapour", "mean_max_temp","max_temp_10_days", "min_temp_10_days", "mean_min_temp", "phase", "rain_10_days", "rain_3_days", "mean_rainfall"))
P<- ggplot(data, aes(x=Species, y=rel.inf_scaled,color= var))+
geom_point()+
geom_point(size = 2)+
theme_bw()+
theme(axis.text=element_text(color="black"))+
theme(axis.text.y = element_text(face="italic"))+
coord_flip()+
scale_fill_brewer("Dark2")+
xlab("")+
scale_x_discrete(limits=rev)
#ylab("Scaled importance")
P
######
# this will get difficult though with too many species
library(tidyverse)
library(gridExtra)
#first regroup our data so all the variables associate with a species in one line
regroup <- data %>%
filter(var== "rain_10_days")
regroup$Im_rain_10_days<-regroup$rel.inf_scaled
x<-regroup %>%
left_join(select(rain10days, Im_rain_10_days), by = "Species")
meanrain <- data %>%
filter(var == "mean_rainfall")
meanrain$Im_mean_rain<-meanrain$rel.inf_scaled
rain3days <- data %>%
filter(var == "rain_3_days")
rain3days$Im_rain_3_days<-rain3days$rel.inf_scaled
phase <- data %>%
filter(var == "phase")
phase$Im_phase<-phase$rel.inf_scaled
mintemp <- data %>%
filter(var == "mean_min_temp")
mintemp$Im_min_temp<-mintemp$rel.inf_scaled
maxtemp <- data%>%
filter(var == "mean_max_temp")
maxtemp$Im_max_temp<-maxtemp$rel.inf_scaled
days <- data %>%
filter(var == "day")
days$Im_day<-days$rel.inf_scaled
vapour <- data%>%
filter(var == "mean_vapour")
vapour$Im_vapour<-vapour$rel.inf_scaled
mintemp10 <- data%>%
filter(var == "min_temp_10_days")
mintemp10$Im_mintemp10<-mintemp10$rel.inf_scaled
maxtemp10 <- data%>%
filter(var == "max_temp_10_days")
maxtemp10$Im_maxtemp10<-maxtemp10$rel.inf_scaled
x<-meanrain %>%
left_join(dplyr::select(regroup,Species, Im_rain_10_days), by = "Species")%>%
left_join(dplyr::select(phase,Species, Im_phase), by = "Species")%>%
left_join(dplyr::select(mintemp,Species, Im_min_temp), by = "Species")%>%
left_join(dplyr::select(maxtemp,Species, Im_max_temp), by = "Species")%>%
left_join(dplyr::select(days,Species, Im_day), by = "Species")%>%
left_join(dplyr::select(rain3days,Species, Im_rain_3_days), by = "Species")%>%
left_join(dplyr::select(vapour,Species, Im_vapour), by = "Species")%>%
left_join(dplyr::select(mintemp10,Species, Im_mintemp10), by = "Species")%>%
left_join(dplyr::select(maxtemp10,Species, Im_maxtemp10), by = "Species")
x<-x[-c(1,2,4:10)]
write_csv(x, "Data/K_means_outs/k_means_clusters_06302022.csv")
#Okay, now retrying the tutorial
plot1 <- x%>%
ggplot(aes(x = "species", y = Im_day)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "violet") +
labs(x = "", y="importance of day")
plot2 <- x%>%
ggplot(aes(x = "species", y = Im_phase)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "green") +
labs(x = "", y="importance of moonphase")
plot3 <- x%>%
ggplot(aes(x = "species", y = Im_min_temp)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "pink") +
labs(x = "", y="importance of minimum temperature")
plot4 <- x%>%
ggplot(aes(x = "species", y = Im_max_temp)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "purple") +
labs(x = "", y="importance of maximum temperature")
plot5 <- x%>%
ggplot(aes(x = "species", y = Im_mean_rain)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "navy") +
labs(x = "", y="importance of daily rain")
plot6 <- x%>%
ggplot(aes(x = "species", y = Im_rain_3_days)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "blue") +
labs(x = "", y="importance of previous 3 days rain")
plot7 <- x%>%
ggplot(aes(x = "species", y = Im_rain_10_days)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "turquoise") +
labs(x = "", y="importance of previous 10 days rain")
plot8 <- x%>%
ggplot(aes(x = "species", y = Im_vapour)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "red") +
labs(x = "", y="importance of vapour pressure")
plot9 <- x%>%
ggplot(aes(x = "species", y = Im_mintemp10)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "navy") +
labs(x = "", y="importance of 10 previous days minimum temperature")
plot10 <- x%>%
ggplot(aes(x = "species", y = Im_maxtemp10)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "purple") +
labs(x = "", y="importance of 10 previous days maximum temperature")
grid.arrange(plot1, plot2, plot3, plot4, plot5, plot6, plot7, plot8, plot9, plot10)
# As the initial centroids are defined randomly,
# we define a seed for purposes of reprodutability
set.seed(123)
# Let's remove the column with the species' names, so it won't be used in the clustering
input <- x[-c(1,2)]
# The nstart parameter indicates that we want the algorithm to be executed 20 times.
# This number is not the number of iterations, it is like calling the function 20 times and then
# the execution with lower variance within the groups will be selected as the final result.
kmeans(input, centers = 3, nstart = 20)
#' Plots a chart showing the sum of squares within a group for each execution of the kmeans algorithm.
#' In each execution the number of the initial groups increases by one up to the maximum number of centers passed as argument.
#'
#' @param data The dataframe to perform the kmeans
#' @param nc The maximum number of initial centers
#'
wssplot <- function(data, nc=15, seed=123){
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i)$withinss)}
plot(1:nc, wss, type="b", xlab="Number of groups",
ylab="Sum of squares within a group")}
wssplot(input, nc = 20)
#reran and new results - hinge at 5-8
set.seed(123)
clustering <- kmeans(input, centers = 5, nstart = 20)
clustering
#Total _ss = 62%
set.seed(123)
clustering <- kmeans(input, centers = 6, nstart = 20)
clustering
#Total _ss = 66%
#but I also saw a big drop at 2
set.seed(123)
clustering <- kmeans(input, centers = 2, nstart = 20)
clustering
#Total _ss = 38%
library(cluster)
library(factoextra)
#so keep 7 here?
set.seed(123)
clustering <- kmeans(input, centers = 7, nstart = 20)
clustering
#Total _ss = 69%
sil <- silhouette(clustering$cluster, dist(input))
fviz_silhouette(sil)
#The silhouette plot below gives us evidence that our clustering using seven groups
#is subpar
#even though it's the best option
#because #there's negative silhouette width
#and most of the values are lower than 0.5.
library(GGally)
library(plotly)
x$cluster <- as.factor(clustering$cluster)
write_csv(x, "Data/K_means_outs/K_means_clusters_groups_January.csv")
#new name so you don't erase the other data format
p <- ggparcoord(data = x, columns = c(2,3,4,5,6,7,8,9,10,11,12), groupColumn = "cluster", scale = "globalminmax") + labs(x = "predictors of calling", y = "importance", title = "Clustering")
ggplotly(p)
## now to try identifying the group members
x$cluster
group1 <- x %>%
filter(cluster == "1")
group1$cluster <- as.factor(group1$cluster)
group1 <- ggparcoord(data = group1, columns = c(2:12),
groupColumn = "cluster", scale = "globalminmax") +
labs(x = "variables", y = "scaled importance", title = "Clustering")
ggplotly(group1)
group2 <- x %>%
filter(cluster == "2")
p2 <- ggparcoord(data = group2, columns = c(2:12),
groupColumn = "cluster", scale = "globalminmax") +
labs(x = "variables", y = "scaled importance", title = "Clustering")
ggplotly(p2)
group3 <- x %>%
filter(cluster == "3")
p3 <- ggparcoord(data = group3, columns = c(2:12),
groupColumn = "cluster", scale = "globalminmax") +
labs(x = "variables", y = "scaled importance", title = "Clustering")
ggplotly(p3)
group4 <- x %>%
filter(cluster == "4")
p4<- ggparcoord(data = group4, columns = c(2:12),
groupColumn = "cluster", scale = "globalminmax") +
labs(x = "variables", y = "scaled importance", title = "Clustering")
ggplotly(p4)
group5 <- x %>%
filter(cluster == "5")
write_csv(group5,"Data/K_means_outs/group5.csv")
p5 <- ggparcoord(data = group5, columns = c(2:12),
groupColumn = "cluster", scale = "globalminmax") +
labs(x = "variables", y = "scaled importance", title = "Clustering")
ggplotly(p5)
group6 <- x %>%
filter(cluster == "6")
write_csv(group6,"Data/K_means_outs/group6.csv")
p6 <- ggparcoord(data = group6, columns = c(2:12),
groupColumn = "cluster", scale = "globalminmax") +
labs(x = "variables", y = "scaled importance", title = "Clustering")
ggplotly(p6)
group7 <- x %>%
filter(cluster == "7")
write_csv(group7,"Data/K_means_outs/group7.csv")
p7 <- ggparcoord(data = group7, columns = c(2:12),
groupColumn = "cluster", scale = "globalminmax") +
labs(x = "variables", y = "scaled importance", title = "Clustering")
ggplotly(p7)
library(vegan)
library(tidyverse)
library(ggfortify)
library(ggrepel)
##problems? try no non numeric other columns
pca_dat <- input %>%
dplyr::select(1:10) %>%
prcomp(.) %>%
.$x %>%
as.data.frame() %>%
bind_cols(x %>%
dplyr::select(Species, cluster))
ggplot(pca_dat, aes(x=PC1, y=PC2, color=cluster, label=Species))+
geom_point()+
theme_bw()+
theme(axis.text=element_text(color="black"))+
scale_color_brewer(palette="Set1")
ggplot(pca_dat, aes(x=PC1, y=PC2, color=cluster, label=Species))+
geom_point()+
theme_bw()+
theme(axis.text=element_text(color="black"))+
scale_color_brewer(palette="Dark2")+
geom_label_repel()
| /R/BRT_K_means_clustering.R | no_license | Ochotona-princeps/Citizen-Science-reveals-meteorological-determinants-of-frog-calling-at-a-continental-scale | R | false | false | 10,740 | r | # K-means clustering
library(dplyr)
library(purrr)
library(ggplot2)
library(gridExtra)
library(purrr)
library(RColorBrewer)
library(viridis)
library(ggrepel)
library(stringr)
# set wd to folder where RDS results are stored
#set working directory to that folder
setwd("Data/BRT_results")
# read in data
unscaled_imp <- list.files(pattern = ".RDS") %>%
map(readRDS) %>%
bind_rows()
#now scale 0-1
#that's how it was in the original script
data <- unscaled_imp %>%
dplyr::filter(!var %in% c("number_of_samples", "grid_lat", "grid_lng")) %>%
group_by(Species) %>%
mutate(rel.inf_scaled=scales::rescale(rel.inf))
# reset working directory for rest of script up one level
setwd("../../..")
# Need to visualize these results somehow
# summarizing across all species
ggplot(data, aes(x=var, y=rel.inf_scaled, color=var))+
geom_point(alpha=0.8)+
theme_bw()+
theme(axis.text=element_text(color="black"))+
coord_flip()+
scale_color_brewer(palette="Set1")+
guides(color=FALSE)+
xlab("")+
ylab("Scaled importance")
# one potential way to visualize species-specific results
length(unique(data$Species))
data$var <- factor(data$var, levels = c("day","mean_vapour", "mean_max_temp","max_temp_10_days", "min_temp_10_days", "mean_min_temp", "phase", "rain_10_days", "rain_3_days", "mean_rainfall"))
P<- ggplot(data, aes(x=Species, y=rel.inf_scaled,color= var))+
geom_point()+
geom_point(size = 2)+
theme_bw()+
theme(axis.text=element_text(color="black"))+
theme(axis.text.y = element_text(face="italic"))+
coord_flip()+
scale_fill_brewer("Dark2")+
xlab("")+
scale_x_discrete(limits=rev)
#ylab("Scaled importance")
P
######
# this will get difficult though with too many species
library(tidyverse)
library(gridExtra)
#first regroup our data so all the variables associate with a species in one line
regroup <- data %>%
filter(var== "rain_10_days")
regroup$Im_rain_10_days<-regroup$rel.inf_scaled
x<-regroup %>%
left_join(select(rain10days, Im_rain_10_days), by = "Species")
meanrain <- data %>%
filter(var == "mean_rainfall")
meanrain$Im_mean_rain<-meanrain$rel.inf_scaled
rain3days <- data %>%
filter(var == "rain_3_days")
rain3days$Im_rain_3_days<-rain3days$rel.inf_scaled
phase <- data %>%
filter(var == "phase")
phase$Im_phase<-phase$rel.inf_scaled
mintemp <- data %>%
filter(var == "mean_min_temp")
mintemp$Im_min_temp<-mintemp$rel.inf_scaled
maxtemp <- data%>%
filter(var == "mean_max_temp")
maxtemp$Im_max_temp<-maxtemp$rel.inf_scaled
days <- data %>%
filter(var == "day")
days$Im_day<-days$rel.inf_scaled
vapour <- data%>%
filter(var == "mean_vapour")
vapour$Im_vapour<-vapour$rel.inf_scaled
mintemp10 <- data%>%
filter(var == "min_temp_10_days")
mintemp10$Im_mintemp10<-mintemp10$rel.inf_scaled
maxtemp10 <- data%>%
filter(var == "max_temp_10_days")
maxtemp10$Im_maxtemp10<-maxtemp10$rel.inf_scaled
x<-meanrain %>%
left_join(dplyr::select(regroup,Species, Im_rain_10_days), by = "Species")%>%
left_join(dplyr::select(phase,Species, Im_phase), by = "Species")%>%
left_join(dplyr::select(mintemp,Species, Im_min_temp), by = "Species")%>%
left_join(dplyr::select(maxtemp,Species, Im_max_temp), by = "Species")%>%
left_join(dplyr::select(days,Species, Im_day), by = "Species")%>%
left_join(dplyr::select(rain3days,Species, Im_rain_3_days), by = "Species")%>%
left_join(dplyr::select(vapour,Species, Im_vapour), by = "Species")%>%
left_join(dplyr::select(mintemp10,Species, Im_mintemp10), by = "Species")%>%
left_join(dplyr::select(maxtemp10,Species, Im_maxtemp10), by = "Species")
x<-x[-c(1,2,4:10)]
write_csv(x, "Data/K_means_outs/k_means_clusters_06302022.csv")
#Okay, now retrying the tutorial
plot1 <- x%>%
ggplot(aes(x = "species", y = Im_day)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "violet") +
labs(x = "", y="importance of day")
plot2 <- x%>%
ggplot(aes(x = "species", y = Im_phase)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "green") +
labs(x = "", y="importance of moonphase")
plot3 <- x%>%
ggplot(aes(x = "species", y = Im_min_temp)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "pink") +
labs(x = "", y="importance of minimum temperature")
plot4 <- x%>%
ggplot(aes(x = "species", y = Im_max_temp)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "purple") +
labs(x = "", y="importance of maximum temperature")
plot5 <- x%>%
ggplot(aes(x = "species", y = Im_mean_rain)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "navy") +
labs(x = "", y="importance of daily rain")
plot6 <- x%>%
ggplot(aes(x = "species", y = Im_rain_3_days)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "blue") +
labs(x = "", y="importance of previous 3 days rain")
plot7 <- x%>%
ggplot(aes(x = "species", y = Im_rain_10_days)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "turquoise") +
labs(x = "", y="importance of previous 10 days rain")
plot8 <- x%>%
ggplot(aes(x = "species", y = Im_vapour)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "red") +
labs(x = "", y="importance of vapour pressure")
plot9 <- x%>%
ggplot(aes(x = "species", y = Im_mintemp10)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "navy") +
labs(x = "", y="importance of 10 previous days minimum temperature")
plot10 <- x%>%
ggplot(aes(x = "species", y = Im_maxtemp10)) +
geom_jitter(width = .025, size = 2, alpha = .3, color = "purple") +
labs(x = "", y="importance of 10 previous days maximum temperature")
grid.arrange(plot1, plot2, plot3, plot4, plot5, plot6, plot7, plot8, plot9, plot10)
# As the initial centroids are defined randomly,
# we define a seed for purposes of reprodutability
set.seed(123)
# Let's remove the column with the species' names, so it won't be used in the clustering
input <- x[-c(1,2)]
# The nstart parameter indicates that we want the algorithm to be executed 20 times.
# This number is not the number of iterations, it is like calling the function 20 times and then
# the execution with lower variance within the groups will be selected as the final result.
kmeans(input, centers = 3, nstart = 20)
#' Plots a chart showing the sum of squares within a group for each execution of the kmeans algorithm.
#' In each execution the number of the initial groups increases by one up to the maximum number of centers passed as argument.
#'
#' @param data The dataframe to perform the kmeans
#' @param nc The maximum number of initial centers
#'
wssplot <- function(data, nc=15, seed=123){
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i)$withinss)}
plot(1:nc, wss, type="b", xlab="Number of groups",
ylab="Sum of squares within a group")}
wssplot(input, nc = 20)
#reran and new results - hinge at 5-8
set.seed(123)
clustering <- kmeans(input, centers = 5, nstart = 20)
clustering
#Total _ss = 62%
set.seed(123)
clustering <- kmeans(input, centers = 6, nstart = 20)
clustering
#Total _ss = 66%
#but I also saw a big drop at 2
set.seed(123)
clustering <- kmeans(input, centers = 2, nstart = 20)
clustering
#Total _ss = 38%
library(cluster)
library(factoextra)
#so keep 7 here?
set.seed(123)
clustering <- kmeans(input, centers = 7, nstart = 20)
clustering
#Total _ss = 69%
sil <- silhouette(clustering$cluster, dist(input))
fviz_silhouette(sil)
#The silhouette plot below gives us evidence that our clustering using seven groups
#is subpar
#even though it's the best option
#because #there's negative silhouette width
#and most of the values are lower than 0.5.
library(GGally)
library(plotly)
x$cluster <- as.factor(clustering$cluster)
write_csv(x, "Data/K_means_outs/K_means_clusters_groups_January.csv")
#new name so you don't erase the other data format
p <- ggparcoord(data = x, columns = c(2,3,4,5,6,7,8,9,10,11,12), groupColumn = "cluster", scale = "globalminmax") + labs(x = "predictors of calling", y = "importance", title = "Clustering")
ggplotly(p)
## now to try identifying the group members
x$cluster
group1 <- x %>%
filter(cluster == "1")
group1$cluster <- as.factor(group1$cluster)
group1 <- ggparcoord(data = group1, columns = c(2:12),
groupColumn = "cluster", scale = "globalminmax") +
labs(x = "variables", y = "scaled importance", title = "Clustering")
ggplotly(group1)
group2 <- x %>%
filter(cluster == "2")
p2 <- ggparcoord(data = group2, columns = c(2:12),
groupColumn = "cluster", scale = "globalminmax") +
labs(x = "variables", y = "scaled importance", title = "Clustering")
ggplotly(p2)
group3 <- x %>%
filter(cluster == "3")
p3 <- ggparcoord(data = group3, columns = c(2:12),
groupColumn = "cluster", scale = "globalminmax") +
labs(x = "variables", y = "scaled importance", title = "Clustering")
ggplotly(p3)
group4 <- x %>%
filter(cluster == "4")
p4<- ggparcoord(data = group4, columns = c(2:12),
groupColumn = "cluster", scale = "globalminmax") +
labs(x = "variables", y = "scaled importance", title = "Clustering")
ggplotly(p4)
group5 <- x %>%
filter(cluster == "5")
write_csv(group5,"Data/K_means_outs/group5.csv")
p5 <- ggparcoord(data = group5, columns = c(2:12),
groupColumn = "cluster", scale = "globalminmax") +
labs(x = "variables", y = "scaled importance", title = "Clustering")
ggplotly(p5)
group6 <- x %>%
filter(cluster == "6")
write_csv(group6,"Data/K_means_outs/group6.csv")
p6 <- ggparcoord(data = group6, columns = c(2:12),
groupColumn = "cluster", scale = "globalminmax") +
labs(x = "variables", y = "scaled importance", title = "Clustering")
ggplotly(p6)
group7 <- x %>%
filter(cluster == "7")
write_csv(group7,"Data/K_means_outs/group7.csv")
p7 <- ggparcoord(data = group7, columns = c(2:12),
groupColumn = "cluster", scale = "globalminmax") +
labs(x = "variables", y = "scaled importance", title = "Clustering")
ggplotly(p7)
library(vegan)
library(tidyverse)
library(ggfortify)
library(ggrepel)
##problems? try no non numeric other columns
pca_dat <- input %>%
dplyr::select(1:10) %>%
prcomp(.) %>%
.$x %>%
as.data.frame() %>%
bind_cols(x %>%
dplyr::select(Species, cluster))
ggplot(pca_dat, aes(x=PC1, y=PC2, color=cluster, label=Species))+
geom_point()+
theme_bw()+
theme(axis.text=element_text(color="black"))+
scale_color_brewer(palette="Set1")
ggplot(pca_dat, aes(x=PC1, y=PC2, color=cluster, label=Species))+
geom_point()+
theme_bw()+
theme(axis.text=element_text(color="black"))+
scale_color_brewer(palette="Dark2")+
geom_label_repel()
|
# get_month.R
require( tidyverse )
source('D:/R-Projects/Tornadoes/functions/specify_columns.R')
get_month <- function( file_path ){
file_path
read_csv( fp )
}
fp <- file.path( getwd(),
"data",
"months",
"2000-01.csv")
tornadoes <-
get_month( fp )
spec( tornadoes )
tornadoes <-
tornadoes %>%
mutate
| /functions/get_month.R | no_license | gitHubSlinkman/Tornadoes | R | false | false | 384 | r | # get_month.R
require( tidyverse )
source('D:/R-Projects/Tornadoes/functions/specify_columns.R')
get_month <- function( file_path ){
file_path
read_csv( fp )
}
fp <- file.path( getwd(),
"data",
"months",
"2000-01.csv")
tornadoes <-
get_month( fp )
spec( tornadoes )
tornadoes <-
tornadoes %>%
mutate
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ranges_espace.R
\name{ranges_espace}
\alias{ranges_espace}
\title{Comparison of species ranges in environmental space}
\usage{
ranges_espace(ranges, add_occurrences = TRUE, variables,
max_background = 25000, ranges_representation = "clouds",
background_color = "darkolivegreen", range_colors = NULL, alpha = 0.03,
eye_camera = c(x = 1.95, y = 1.25, z = 1.35))
}
\arguments{
\item{ranges}{object or list of objects produced with any of the following functions:
\code{\link{rangemap_buff}}, \code{\link{rangemap_bound}}, \code{\link{rangemap_hull}},
\code{\link{rangemap_enm}}, and \code{\link{rangemap_tsa}}. For visualization purposes,
using up to three ranges is recommended.}
\item{add_occurrences}{(logical) if TRUE, species occurrences contained in one of
the elements of the list \code{ranges} will be ploted in the figure. Default = TRUE.
If the none of the ranges contains occurrences (e.g. a list of one object created
with the \code{\link{rangemap_bound}} function in which occurrences were not used),
this parameter will be ignored.}
\item{variables}{a RasterStack object of environmental variables that will be used
for creating the principal components to represent the environmental space. Projection
is assumed to be Geographic (longitude and latitude).}
\item{max_background}{(numeric) maximum number of data from variables to be used for
representation of the environmental space. Default = 25000. Increasing this number
results in more detailed views of the available environment but preforming analyses
will take longer.}
\item{ranges_representation}{(character) form in which the environmental space
withing the ranges will be represented. Options are "clouds" and "ellipsoids".
Default = "clouds".}
\item{background_color}{color of the background to be ploted. Default = "darkolivegreen".
Since transparency is used for representing most of components of the plot, colors may
look different.}
\item{range_colors}{vector of colors of the ranges to be represented. If not defined,
default = NULL and default colors will be used. Since transparency is used for
representing most of components of the plot, colors may look different.}
\item{alpha}{(numeric) degree of opacity with which the species ranges will be
represented in the plot. Default = 0.03.}
\item{eye_camera}{(numeric) vector of values to be passed to eye in the list of
arguments of the parameter camera in layour of \code{\link[plotly]{plotly}}. This
values control how the figur is seen in the plot.}
}
\value{
A figure showing, in the environmental space, the species ranges generated with
any of the functions: \code{\link{rangemap_buff}}, \code{\link{rangemap_bound}},
\code{\link{rangemap_hull}}, \code{\link{rangemap_enm}}, and \code{\link{rangemap_tsa}}.
}
\description{
ranges_espace generates a three dimensional comparison of a species'
ranges created using distinct algortihms, to visualize implications of selecting
one of them if environmental conditions are considered.
}
\examples{
if(!require(rgbif)){
install.packages("rgbif")
library(rgbif)
}
# getting the data from GBIF
species <- name_lookup(query = "Dasypus kappleri",
rank="species", return = "data") # information about the species
occ_count(taxonKey = species$key[14], georeferenced = TRUE) # testing if keys return records
key <- species$key[14] # using species key that return information
occ <- occ_search(taxonKey = key, return = "data") # using the taxon key
# keeping only georeferenced records
occ_g <- occ[!is.na(occ$decimalLatitude) & !is.na(occ$decimalLongitude),
c("name", "decimalLongitude", "decimalLatitude")]
# range based on buffers
dist <- 500000
buff <- rangemap_buff(occurrences = occ_g, buffer_distance = dist)
# range based on concave hulls
dist1 <- 250000
hull1 <- "concave"
concave <- rangemap_hull(occurrences = occ_g, hull_type = hull1, buffer_distance = dist1)
# ranges comparison in environmental space
## list of ranges
ranges <- list(buff, concave)
names(ranges) <- c("buff", "concave")
## other data for environmental comparisson
if(!require(raster)){
install.packages("raster")
library(raster)
}
if(!require(maps)){
install.packages("maps")
library(maps)
}
if(!require(maptools)){
install.packages("maptools")
library(maptools)
}
vars <- getData("worldclim", var = "bio", res = 5)
## mask variables to region of interest
WGS84 <- CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
w_map <- map(database = "world", regions = c("Ecuador", "Peru", "Bolivia", "Colombia", "Venezuela",
"Suriname", "Guyana", "French Guyana", "Brazil"),
fill = TRUE, plot = FALSE) # map of the world
w_po <- sapply(strsplit(w_map$names, ":"), function(x) x[1]) # preparing data to create polygon
reg <- map2SpatialPolygons(w_map, IDs = w_po, proj4string = WGS84) # map to polygon
e <- extent(reg)
mask <- as(e, 'SpatialPolygons')
variables <- crop(vars, mask)
## comparison
occur <- TRUE
env_comp <- ranges_espace(ranges = ranges, add_occurrences = occur, variables = variables)
# now play around, zoom in and rotate the figure
# saving this figure may be challenging, try using
# the following lines of code and check your download directory
op <- options() # save default options
options(viewer = NULL) # Set viewer to web browser
name <- "ranges_space" # name for figure
# using web browser to save image
p \%>\% htmlwidgets::onRender(
paste("function(el, x)
{var gd = document.getElementById(el.id);
Plotly.downloadImage(gd, {format: 'svg', width: ", 1000, ", height: ",
800, ", filename: ", paste("\\'", name, "\\'", sep = ""), "});
}", sep = "")
)
Sys.sleep(2) # wait for the execution
options(viewer = op$viewer) # restore viewer to old setting (e.g. RStudio)
}
| /man/ranges_espace.Rd | no_license | ynzhangrong/rangemap | R | false | true | 5,908 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ranges_espace.R
\name{ranges_espace}
\alias{ranges_espace}
\title{Comparison of species ranges in environmental space}
\usage{
ranges_espace(ranges, add_occurrences = TRUE, variables,
max_background = 25000, ranges_representation = "clouds",
background_color = "darkolivegreen", range_colors = NULL, alpha = 0.03,
eye_camera = c(x = 1.95, y = 1.25, z = 1.35))
}
\arguments{
\item{ranges}{object or list of objects produced with any of the following functions:
\code{\link{rangemap_buff}}, \code{\link{rangemap_bound}}, \code{\link{rangemap_hull}},
\code{\link{rangemap_enm}}, and \code{\link{rangemap_tsa}}. For visualization purposes,
using up to three ranges is recommended.}
\item{add_occurrences}{(logical) if TRUE, species occurrences contained in one of
the elements of the list \code{ranges} will be ploted in the figure. Default = TRUE.
If the none of the ranges contains occurrences (e.g. a list of one object created
with the \code{\link{rangemap_bound}} function in which occurrences were not used),
this parameter will be ignored.}
\item{variables}{a RasterStack object of environmental variables that will be used
for creating the principal components to represent the environmental space. Projection
is assumed to be Geographic (longitude and latitude).}
\item{max_background}{(numeric) maximum number of data from variables to be used for
representation of the environmental space. Default = 25000. Increasing this number
results in more detailed views of the available environment but preforming analyses
will take longer.}
\item{ranges_representation}{(character) form in which the environmental space
withing the ranges will be represented. Options are "clouds" and "ellipsoids".
Default = "clouds".}
\item{background_color}{color of the background to be ploted. Default = "darkolivegreen".
Since transparency is used for representing most of components of the plot, colors may
look different.}
\item{range_colors}{vector of colors of the ranges to be represented. If not defined,
default = NULL and default colors will be used. Since transparency is used for
representing most of components of the plot, colors may look different.}
\item{alpha}{(numeric) degree of opacity with which the species ranges will be
represented in the plot. Default = 0.03.}
\item{eye_camera}{(numeric) vector of values to be passed to eye in the list of
arguments of the parameter camera in layour of \code{\link[plotly]{plotly}}. This
values control how the figur is seen in the plot.}
}
\value{
A figure showing, in the environmental space, the species ranges generated with
any of the functions: \code{\link{rangemap_buff}}, \code{\link{rangemap_bound}},
\code{\link{rangemap_hull}}, \code{\link{rangemap_enm}}, and \code{\link{rangemap_tsa}}.
}
\description{
ranges_espace generates a three dimensional comparison of a species'
ranges created using distinct algortihms, to visualize implications of selecting
one of them if environmental conditions are considered.
}
\examples{
if(!require(rgbif)){
install.packages("rgbif")
library(rgbif)
}
# getting the data from GBIF
species <- name_lookup(query = "Dasypus kappleri",
rank="species", return = "data") # information about the species
occ_count(taxonKey = species$key[14], georeferenced = TRUE) # testing if keys return records
key <- species$key[14] # using species key that return information
occ <- occ_search(taxonKey = key, return = "data") # using the taxon key
# keeping only georeferenced records
occ_g <- occ[!is.na(occ$decimalLatitude) & !is.na(occ$decimalLongitude),
c("name", "decimalLongitude", "decimalLatitude")]
# range based on buffers
dist <- 500000
buff <- rangemap_buff(occurrences = occ_g, buffer_distance = dist)
# range based on concave hulls
dist1 <- 250000
hull1 <- "concave"
concave <- rangemap_hull(occurrences = occ_g, hull_type = hull1, buffer_distance = dist1)
# ranges comparison in environmental space
## list of ranges
ranges <- list(buff, concave)
names(ranges) <- c("buff", "concave")
## other data for environmental comparisson
if(!require(raster)){
install.packages("raster")
library(raster)
}
if(!require(maps)){
install.packages("maps")
library(maps)
}
if(!require(maptools)){
install.packages("maptools")
library(maptools)
}
vars <- getData("worldclim", var = "bio", res = 5)
## mask variables to region of interest
WGS84 <- CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
w_map <- map(database = "world", regions = c("Ecuador", "Peru", "Bolivia", "Colombia", "Venezuela",
"Suriname", "Guyana", "French Guyana", "Brazil"),
fill = TRUE, plot = FALSE) # map of the world
w_po <- sapply(strsplit(w_map$names, ":"), function(x) x[1]) # preparing data to create polygon
reg <- map2SpatialPolygons(w_map, IDs = w_po, proj4string = WGS84) # map to polygon
e <- extent(reg)
mask <- as(e, 'SpatialPolygons')
variables <- crop(vars, mask)
## comparison
occur <- TRUE
env_comp <- ranges_espace(ranges = ranges, add_occurrences = occur, variables = variables)
# now play around, zoom in and rotate the figure
# saving this figure may be challenging, try using
# the following lines of code and check your download directory
op <- options() # save default options
options(viewer = NULL) # Set viewer to web browser
name <- "ranges_space" # name for figure
# using web browser to save image
p \%>\% htmlwidgets::onRender(
paste("function(el, x)
{var gd = document.getElementById(el.id);
Plotly.downloadImage(gd, {format: 'svg', width: ", 1000, ", height: ",
800, ", filename: ", paste("\\'", name, "\\'", sep = ""), "});
}", sep = "")
)
Sys.sleep(2) # wait for the execution
options(viewer = op$viewer) # restore viewer to old setting (e.g. RStudio)
}
|
# I want to create a sample script for one community scenario in which we go all through the process of
# generating the community, fitting the model, checking for convergence and visualizing the variation partitioning
# Libraries ---------------------------------------------------------------
# Load the libraries
library(HMSC)
library(doParallel)
library(tidyverse)
library(coda)
# Functions ---------------------------------------------------------------
# Load the functions
# This script has a function to organize the parameters and get them in the format wanted for the simulations
source("functions/prep_pars_fx.R")
source("functions/metacom_sim_fx.R")
source("functions/main_sim_fx.R")
source("functions/full_process_fx.R")
# OrigLandscape -----------------------------------------------------------
XY <- readRDS("outputs/fixedLandscapes/orig-no-seed-XY.RDS")
E <- readRDS("outputs/fixedLandscapes/orig-no-seed-E.RDS")
# These include the 78 vectors originally included by Guillaume
MEMsel <- readRDS("outputs/fixedLandscapes/orig-no-seed-MEMsel.RDS")
# Simple run --------------------------------------------------------------
# We will create one set of parameters for one simulated scenario and then fit HMSC one time to that.
# After that, we will check for convergence
# We will repeat the HMSC fit and get another model so that we can compare both
# Create parameters list
pars <- prep_pars(N = 1000, D = 1, R = 12,
nicheOpt = NULL,
breadth = 0.8, alpha = 0.005, interx_col = 1.5, interx_ext = 1.5)
# Generate a community with these parameters, only creating one iteration
# this is basically a matrix of species occupancy at each of the sites
commSim <- metacom_sim4HMSC(XY = XY, E = E, pars = pars,
nsteps = 200, occupancy = 0.8, niter = 1)
# HMSC part
# First format the data
formData <- as.HMSCdata(Y = commSim[[1]], # This is the occupancy data
X = cbind(scale(E), scale(E)^2, MEMsel), # The explanatory variables which are the env and space by MEM
Random = as.factor(1:1000), # Site-level random effect
scaleX = TRUE, interceptX = TRUE)
# With formatted data, now fit the model
model <- hmsc(formData, family = "probit",
niter = 100000, nburn = 1000, thin = 10)
model <- readRDS("outputs/testOutputs/conv_modl.RDS")
#saveRDS(model, file = "outputs/testOutputs/conv_modl.RDS")
# Chage the structure of the fitted model to be able to use functions from 'coda'
chain <- as.mcmc(model, parameters = "meansParamX", burning = TRUE)
# Get the summary and trace/density plots for the estimated parameters
summary(chain)
pdf("outputs/testOutputs/plotsChain1.pdf")
plot(chain)
dev.off()
raftery.diag(chain) # I find it strange that the burn in suggested is 1-3...
# Also the lower bound min is suggested to be 3746 indep samples
# Look at the autocorrelation between these variables
pdf("outputs/testOutputs/autocorrPlots.pdf")
autocorr.plot(chain) # It shows no autocorrelation... which I find weird...
dev.off()
pdf("outputs/testOutputs/quantileschain.pdf")
cumuplot(chain) # Look at the quantiles of the chain over time
dev.off()
# Repeat the process for a second chain so we can compare them
mod2 <- hmsc(formData, family = "probit",
niter = 100000, nburn = 1000, thin = 10)
#mod2 <- readRDS("outputs/testOutputs/conv_mod2.RDS")
#saveRDS(mod2, file = "outputs/testOutputs/conv_mod2.RDS")
chain2 <- as.mcmc(mod2, parameters = "meansParamX")
combinedChains <- mcmc.list(chain, chain2)
pdf("outputs/testOutputs/combinedChains.pdf")
plot(combinedChains)
dev.off()
# Gelman diagnostic would tell us how different these two chains are... We want them to be similar since they are from the same data
# Values close to 1 mean that between and within chain variance are equal. Larger values indicate notabe differences between the chains
gelman.diag(combinedChains)
pdf("outputs/testOutputs/gelmanplot.pdf")
gelman.plot(combinedChains) # This would show the development of the -scale-reduction over chain steps.
dev.off()
| /old/testingHMSCv2/runScripts/20190402-ex_converg.R | no_license | javirudolph/testingHMSC | R | false | false | 4,140 | r | # I want to create a sample script for one community scenario in which we go all through the process of
# generating the community, fitting the model, checking for convergence and visualizing the variation partitioning
# Libraries ---------------------------------------------------------------
# Load the libraries
library(HMSC)
library(doParallel)
library(tidyverse)
library(coda)
# Functions ---------------------------------------------------------------
# Load the functions
# This script has a function to organize the parameters and get them in the format wanted for the simulations
source("functions/prep_pars_fx.R")
source("functions/metacom_sim_fx.R")
source("functions/main_sim_fx.R")
source("functions/full_process_fx.R")
# OrigLandscape -----------------------------------------------------------
XY <- readRDS("outputs/fixedLandscapes/orig-no-seed-XY.RDS")
E <- readRDS("outputs/fixedLandscapes/orig-no-seed-E.RDS")
# These include the 78 vectors originally included by Guillaume
MEMsel <- readRDS("outputs/fixedLandscapes/orig-no-seed-MEMsel.RDS")
# Simple run --------------------------------------------------------------
# We will create one set of parameters for one simulated scenario and then fit HMSC one time to that.
# After that, we will check for convergence
# We will repeat the HMSC fit and get another model so that we can compare both
# Create parameters list
pars <- prep_pars(N = 1000, D = 1, R = 12,
nicheOpt = NULL,
breadth = 0.8, alpha = 0.005, interx_col = 1.5, interx_ext = 1.5)
# Generate a community with these parameters, only creating one iteration
# this is basically a matrix of species occupancy at each of the sites
commSim <- metacom_sim4HMSC(XY = XY, E = E, pars = pars,
nsteps = 200, occupancy = 0.8, niter = 1)
# HMSC part
# First format the data
formData <- as.HMSCdata(Y = commSim[[1]], # This is the occupancy data
X = cbind(scale(E), scale(E)^2, MEMsel), # The explanatory variables which are the env and space by MEM
Random = as.factor(1:1000), # Site-level random effect
scaleX = TRUE, interceptX = TRUE)
# With formatted data, now fit the model
model <- hmsc(formData, family = "probit",
niter = 100000, nburn = 1000, thin = 10)
model <- readRDS("outputs/testOutputs/conv_modl.RDS")
#saveRDS(model, file = "outputs/testOutputs/conv_modl.RDS")
# Chage the structure of the fitted model to be able to use functions from 'coda'
chain <- as.mcmc(model, parameters = "meansParamX", burning = TRUE)
# Get the summary and trace/density plots for the estimated parameters
summary(chain)
pdf("outputs/testOutputs/plotsChain1.pdf")
plot(chain)
dev.off()
raftery.diag(chain) # I find it strange that the burn in suggested is 1-3...
# Also the lower bound min is suggested to be 3746 indep samples
# Look at the autocorrelation between these variables
pdf("outputs/testOutputs/autocorrPlots.pdf")
autocorr.plot(chain) # It shows no autocorrelation... which I find weird...
dev.off()
pdf("outputs/testOutputs/quantileschain.pdf")
cumuplot(chain) # Look at the quantiles of the chain over time
dev.off()
# Repeat the process for a second chain so we can compare them
mod2 <- hmsc(formData, family = "probit",
niter = 100000, nburn = 1000, thin = 10)
#mod2 <- readRDS("outputs/testOutputs/conv_mod2.RDS")
#saveRDS(mod2, file = "outputs/testOutputs/conv_mod2.RDS")
chain2 <- as.mcmc(mod2, parameters = "meansParamX")
combinedChains <- mcmc.list(chain, chain2)
pdf("outputs/testOutputs/combinedChains.pdf")
plot(combinedChains)
dev.off()
# Gelman diagnostic would tell us how different these two chains are... We want them to be similar since they are from the same data
# Values close to 1 mean that between and within chain variance are equal. Larger values indicate notabe differences between the chains
gelman.diag(combinedChains)
pdf("outputs/testOutputs/gelmanplot.pdf")
gelman.plot(combinedChains) # This would show the development of the -scale-reduction over chain steps.
dev.off()
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53812015169372e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615775920-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 362 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53812015169372e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
library(GenomicAlignments)
library(plyranges)
library(dplyr)
library(ggplot2)
library(data.table)
source("/Volumes/Elements/MYCNAmplicon/Code/CustomThemes.R")
# ------------------------------------------------------------------------------
# Plot IMR-5/75 de novo assembly
# ------------------------------------------------------------------------------
paf = read.table("/Volumes/Elements/IMR-5-75.ONT-flye-assembly-meta-genomesize1g/assembly.maptohg19.paf") %>% as_tibble()
colnames(paf) = c(
"Query", "QueryLength", "QueryStart", "QueryEnd", "RelativeStrand",
"ReferenceSequence", "ReferenceSequenceLength", "ReferenceSequenceStart", "ReferenceSequenceEnd",
"NumberOfResidueMatches", "AlignmentBlockLength", "MappingQuality",
"NoIdea1", "NoIdea2", "NoIdea3", "NoIdea4", "NoIdea5"
)
paf = paf %>% filter(Query == "contig_499")
paf$ReferenceStart = ifelse(paf$RelativeStrand == "+", paf$ReferenceSequenceStart, paf$ReferenceSequenceEnd)
paf$ReferenceEnd = ifelse(paf$RelativeStrand == "+", paf$ReferenceSequenceEnd, paf$ReferenceSequenceStart)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
geom_segment(aes(x=QueryStart, xend=QueryEnd, y=ReferenceSequenceStart, yend=ReferenceSequenceEnd)) +
theme_kons2() +
xlab("De novo assembly") +
ylab("chromosome 2 (hg19)") +
ggtitle("IMR-5/75") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/IMR575_NanoporeDeNovoAssembly_contig499_maptochr2.pdf",
height = 2, width=1.5)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
geom_segment(aes(y=QueryStart, yend=QueryEnd, x=ReferenceStart, xend=ReferenceEnd)) +
theme_kons2() +
ylab("De novo assembly") +
xlab("chromosome 2 (hg19)") +
ggtitle("Kelly") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/IMR575_NanoporeDeNovoAssembly_contig499_maptochr2_Layout2.pdf",
height = 2, width=1.5)
# ------------------------------------------------------------------------------
# Plot Kelly de novo assembly
# ------------------------------------------------------------------------------
# samtools view -h -o /Volumes/Elements/Kelly.ONT-flye-assembly-meta-genomesize1g/assembly.maptohg19.sam /Volumes/Elements/Kelly.ONT-flye-assembly-meta-genomesize1g/assembly.maptohg19.bam
# ~/Desktop/nano-wgs/utils/k8-darwin ~/Desktop/nano-wgs/utils/sam2paf.js /Volumes/Elements/Kelly.ONT-flye-assembly-meta-genomesize1g/assembly.maptohg19.sam > /Volumes/Elements/Kelly.ONT-flye-assembly-meta-genomesize1g/assembly.maptohg19.paf
paf = read.table("/Volumes/Elements/Kelly.ONT-flye-assembly-meta-genomesize1g/assembly.maptohg19.paf") %>% as_tibble()
colnames(paf) = c(
"Query", "QueryLength", "QueryStart", "QueryEnd", "RelativeStrand",
"ReferenceSequence", "ReferenceSequenceLength", "ReferenceSequenceStart", "ReferenceSequenceEnd",
"NumberOfResidueMatches", "AlignmentBlockLength", "MappingQuality",
"NoIdea1", "NoIdea2", "NoIdea3", "NoIdea4", "NoIdea5"
)
paf = paf %>% filter(Query == "contig_31")
paf$ReferenceStart = ifelse(paf$RelativeStrand == "+", paf$ReferenceSequenceStart, paf$ReferenceSequenceEnd)
paf$ReferenceEnd = ifelse(paf$RelativeStrand == "+", paf$ReferenceSequenceEnd, paf$ReferenceSequenceStart)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
geom_segment(aes(x=QueryStart, xend=QueryEnd, y=ReferenceStart, yend=ReferenceEnd)) +
theme_kons2() +
xlab("De novo assembly") +
ylab("chromosome 2 (hg19)") +
ggtitle("Kelly") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/Kelly_NanoporeDeNovoAssembly_contig_31_maptochr2.pdf",
height = 2, width=1.5)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
annotate(ymin=16080683, ymax=16087129, xmin=-Inf, xmax=Inf, geom="rect", color = "steelblue") +
annotate(ymin=16375277, ymax=16385344, xmin=-Inf, xmax=Inf, geom="rect", color = "firebrick3") +
geom_segment(aes(x=QueryStart, xend=QueryEnd, y=ReferenceStart, yend=ReferenceEnd)) +
theme_kons2() +
xlab("De novo assembly") +
ylab("chromosome 2 (hg19)") +
ggtitle("Kelly") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/Kelly_NanoporeDeNovoAssembly_contig_31_maptochr2_Annot.pdf",
height = 2, width=1.5)
paf %>%
ggplot() +
geom_segment(aes(x=QueryStart, xend=QueryEnd, y=ReferenceStart, yend=ReferenceEnd)) +
theme_kons2() +
xlab("De novo assembly") +
facet_grid(ReferenceSequence ~ .) +
ggtitle("Kelly") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/Kelly_NanoporeDeNovoAssembly_contig_31_maptogenome.pdf",
height = 1.5, width=2)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
geom_segment(aes(y=QueryStart, yend=QueryEnd, x=ReferenceStart, xend=ReferenceEnd)) +
theme_kons2() +
ylab("De novo assembly") +
xlab("chromosome 2 (hg19)") +
ggtitle("Kelly") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/Kelly_NanoporeDeNovoAssembly_contig_31_maptochr2_Layout2.pdf",
height = 2, width=1.5)
# ------------------------------------------------------------------------------
# Plot CHP-212 de novo assembly
# ------------------------------------------------------------------------------
paf = fread("/Volumes/Elements/CHP-212.ONT-flye-assembly-meta-genomesize1g/assembly.maptohg19.paf",
fill=T) %>% as_tibble()
colnames(paf) = c(
"Query", "QueryLength", "QueryStart", "QueryEnd", "RelativeStrand",
"ReferenceSequence", "ReferenceSequenceLength", "ReferenceSequenceStart", "ReferenceSequenceEnd",
"NumberOfResidueMatches", "AlignmentBlockLength", "MappingQuality",
"NoIdea1", "NoIdea2", "NoIdea3", "NoIdea4", "NoIdea5", "NoIdea6"
)
paf = paf %>% filter(Query == "contig_1695")
paf$ReferenceStart = ifelse(paf$RelativeStrand == "+", paf$ReferenceSequenceStart, paf$ReferenceSequenceEnd)
paf$ReferenceEnd = ifelse(paf$RelativeStrand == "+", paf$ReferenceSequenceEnd, paf$ReferenceSequenceStart)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
geom_segment(aes(x=QueryStart, xend=QueryEnd, y=ReferenceStart, yend=ReferenceEnd)) +
theme_kons2() +
xlab("De novo assembly") +
ylab("chromosome 2 (hg19)") +
ggtitle("CHP-212") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/CHP212_NanoporeDeNovoAssembly_contig1695_maptochr2.pdf",
height = 2, width=1.5)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
geom_segment(aes(y=QueryStart, yend=QueryEnd, x=ReferenceStart, xend=ReferenceEnd)) +
theme_kons2() +
ylab("De novo assembly") +
xlab("chromosome 2 (hg19)") +
ggtitle("CHP-212") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/CHP212_NanoporeDeNovoAssembly_contig1695_maptochr2_Layout2.pdf",
height = 2, width=1.5)
paf %>%
ggplot() +
geom_segment(aes(x=QueryStart, xend=QueryEnd, y=ReferenceStart, yend=ReferenceEnd)) +
theme_kons2() +
xlab("De novo assembly") +
facet_grid(ReferenceSequence ~ .) +
ggtitle("CHP-212")
# ------------------------------------------------------------------------------
# Plot NGP de novo assembly
# ------------------------------------------------------------------------------
paf = fread("/Volumes/Elements/NGP.ONT-flye-assembly-meta-genomesize1g/assembly.maptohg19.paf",
fill=T) %>% as_tibble()
colnames(paf) = c(
"Query", "QueryLength", "QueryStart", "QueryEnd", "RelativeStrand",
"ReferenceSequence", "ReferenceSequenceLength", "ReferenceSequenceStart", "ReferenceSequenceEnd",
"NumberOfResidueMatches", "AlignmentBlockLength", "MappingQuality",
"NoIdea1", "NoIdea2", "NoIdea3", "NoIdea4", "NoIdea5", "NoIdea6"
)
paf = paf %>% filter(Query == "contig_45")
paf$ReferenceStart = ifelse(paf$RelativeStrand == "+", paf$ReferenceSequenceStart, paf$ReferenceSequenceEnd)
paf$ReferenceEnd = ifelse(paf$RelativeStrand == "+", paf$ReferenceSequenceEnd, paf$ReferenceSequenceStart)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
annotate(ymin=16080683, ymax=16087129, xmin=-Inf, xmax=Inf, geom="rect", color = "steelblue") +
annotate(ymin=16375277, ymax=16385344, xmin=-Inf, xmax=Inf, geom="rect", color = "firebrick3") +
geom_segment(aes(x=QueryStart, xend=QueryEnd, y=ReferenceStart, yend=ReferenceEnd)) +
theme_kons2() +
xlab("De novo assembly") +
ylab("chromosome 2 (hg19)") +
ggtitle("NGP") +
ylim(15000000,18000000) +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/NGP_NanoporeDeNovoAssembly_contig45_maptochr2_annotateMYCNAndE4.pdf",
height = 2, width=1.5)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
geom_segment(aes(x=QueryStart, xend=QueryEnd, y=ReferenceStart, yend=ReferenceEnd)) +
theme_kons2() +
xlab("De novo assembly") +
ylab("chromosome 2 (hg19)") +
ggtitle("NGP") +
ylim(15000000,18000000) +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/NGP_NanoporeDeNovoAssembly_contig45_maptochr2.pdf",
height = 2, width=1.5)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
geom_segment(aes(y=QueryStart, yend=QueryEnd, x=ReferenceStart, xend=ReferenceEnd)) +
theme_kons2() +
ylab("De novo assembly") +
xlab("chromosome 2 (hg19)") +
ggtitle("NGP") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/NGP_NanoporeDeNovoAssembly_contig45_maptochr2_Layout2.pdf",
height = 2, width=1.5)
paf %>%
ggplot() +
geom_segment(aes(x=QueryStart, xend=QueryEnd, y=ReferenceStart, yend=ReferenceEnd)) +
theme_kons2() +
xlab("De novo assembly") +
facet_grid(ReferenceSequence ~ .) +
ggtitle("NGP") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/NGP_NanoporeDeNovoAssembly_contig45_allchr.pdf",
height = 2, width=1.5)
| /Code/PlotAssemblyMapping.R | no_license | wisekh6/MYCNAmplicon | R | false | false | 9,657 | r | library(GenomicAlignments)
library(plyranges)
library(dplyr)
library(ggplot2)
library(data.table)
source("/Volumes/Elements/MYCNAmplicon/Code/CustomThemes.R")
# ------------------------------------------------------------------------------
# Plot IMR-5/75 de novo assembly
# ------------------------------------------------------------------------------
paf = read.table("/Volumes/Elements/IMR-5-75.ONT-flye-assembly-meta-genomesize1g/assembly.maptohg19.paf") %>% as_tibble()
colnames(paf) = c(
"Query", "QueryLength", "QueryStart", "QueryEnd", "RelativeStrand",
"ReferenceSequence", "ReferenceSequenceLength", "ReferenceSequenceStart", "ReferenceSequenceEnd",
"NumberOfResidueMatches", "AlignmentBlockLength", "MappingQuality",
"NoIdea1", "NoIdea2", "NoIdea3", "NoIdea4", "NoIdea5"
)
paf = paf %>% filter(Query == "contig_499")
paf$ReferenceStart = ifelse(paf$RelativeStrand == "+", paf$ReferenceSequenceStart, paf$ReferenceSequenceEnd)
paf$ReferenceEnd = ifelse(paf$RelativeStrand == "+", paf$ReferenceSequenceEnd, paf$ReferenceSequenceStart)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
geom_segment(aes(x=QueryStart, xend=QueryEnd, y=ReferenceSequenceStart, yend=ReferenceSequenceEnd)) +
theme_kons2() +
xlab("De novo assembly") +
ylab("chromosome 2 (hg19)") +
ggtitle("IMR-5/75") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/IMR575_NanoporeDeNovoAssembly_contig499_maptochr2.pdf",
height = 2, width=1.5)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
geom_segment(aes(y=QueryStart, yend=QueryEnd, x=ReferenceStart, xend=ReferenceEnd)) +
theme_kons2() +
ylab("De novo assembly") +
xlab("chromosome 2 (hg19)") +
ggtitle("Kelly") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/IMR575_NanoporeDeNovoAssembly_contig499_maptochr2_Layout2.pdf",
height = 2, width=1.5)
# ------------------------------------------------------------------------------
# Plot Kelly de novo assembly
# ------------------------------------------------------------------------------
# samtools view -h -o /Volumes/Elements/Kelly.ONT-flye-assembly-meta-genomesize1g/assembly.maptohg19.sam /Volumes/Elements/Kelly.ONT-flye-assembly-meta-genomesize1g/assembly.maptohg19.bam
# ~/Desktop/nano-wgs/utils/k8-darwin ~/Desktop/nano-wgs/utils/sam2paf.js /Volumes/Elements/Kelly.ONT-flye-assembly-meta-genomesize1g/assembly.maptohg19.sam > /Volumes/Elements/Kelly.ONT-flye-assembly-meta-genomesize1g/assembly.maptohg19.paf
paf = read.table("/Volumes/Elements/Kelly.ONT-flye-assembly-meta-genomesize1g/assembly.maptohg19.paf") %>% as_tibble()
colnames(paf) = c(
"Query", "QueryLength", "QueryStart", "QueryEnd", "RelativeStrand",
"ReferenceSequence", "ReferenceSequenceLength", "ReferenceSequenceStart", "ReferenceSequenceEnd",
"NumberOfResidueMatches", "AlignmentBlockLength", "MappingQuality",
"NoIdea1", "NoIdea2", "NoIdea3", "NoIdea4", "NoIdea5"
)
paf = paf %>% filter(Query == "contig_31")
paf$ReferenceStart = ifelse(paf$RelativeStrand == "+", paf$ReferenceSequenceStart, paf$ReferenceSequenceEnd)
paf$ReferenceEnd = ifelse(paf$RelativeStrand == "+", paf$ReferenceSequenceEnd, paf$ReferenceSequenceStart)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
geom_segment(aes(x=QueryStart, xend=QueryEnd, y=ReferenceStart, yend=ReferenceEnd)) +
theme_kons2() +
xlab("De novo assembly") +
ylab("chromosome 2 (hg19)") +
ggtitle("Kelly") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/Kelly_NanoporeDeNovoAssembly_contig_31_maptochr2.pdf",
height = 2, width=1.5)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
annotate(ymin=16080683, ymax=16087129, xmin=-Inf, xmax=Inf, geom="rect", color = "steelblue") +
annotate(ymin=16375277, ymax=16385344, xmin=-Inf, xmax=Inf, geom="rect", color = "firebrick3") +
geom_segment(aes(x=QueryStart, xend=QueryEnd, y=ReferenceStart, yend=ReferenceEnd)) +
theme_kons2() +
xlab("De novo assembly") +
ylab("chromosome 2 (hg19)") +
ggtitle("Kelly") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/Kelly_NanoporeDeNovoAssembly_contig_31_maptochr2_Annot.pdf",
height = 2, width=1.5)
paf %>%
ggplot() +
geom_segment(aes(x=QueryStart, xend=QueryEnd, y=ReferenceStart, yend=ReferenceEnd)) +
theme_kons2() +
xlab("De novo assembly") +
facet_grid(ReferenceSequence ~ .) +
ggtitle("Kelly") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/Kelly_NanoporeDeNovoAssembly_contig_31_maptogenome.pdf",
height = 1.5, width=2)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
geom_segment(aes(y=QueryStart, yend=QueryEnd, x=ReferenceStart, xend=ReferenceEnd)) +
theme_kons2() +
ylab("De novo assembly") +
xlab("chromosome 2 (hg19)") +
ggtitle("Kelly") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/Kelly_NanoporeDeNovoAssembly_contig_31_maptochr2_Layout2.pdf",
height = 2, width=1.5)
# ------------------------------------------------------------------------------
# Plot CHP-212 de novo assembly
# ------------------------------------------------------------------------------
paf = fread("/Volumes/Elements/CHP-212.ONT-flye-assembly-meta-genomesize1g/assembly.maptohg19.paf",
fill=T) %>% as_tibble()
colnames(paf) = c(
"Query", "QueryLength", "QueryStart", "QueryEnd", "RelativeStrand",
"ReferenceSequence", "ReferenceSequenceLength", "ReferenceSequenceStart", "ReferenceSequenceEnd",
"NumberOfResidueMatches", "AlignmentBlockLength", "MappingQuality",
"NoIdea1", "NoIdea2", "NoIdea3", "NoIdea4", "NoIdea5", "NoIdea6"
)
paf = paf %>% filter(Query == "contig_1695")
paf$ReferenceStart = ifelse(paf$RelativeStrand == "+", paf$ReferenceSequenceStart, paf$ReferenceSequenceEnd)
paf$ReferenceEnd = ifelse(paf$RelativeStrand == "+", paf$ReferenceSequenceEnd, paf$ReferenceSequenceStart)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
geom_segment(aes(x=QueryStart, xend=QueryEnd, y=ReferenceStart, yend=ReferenceEnd)) +
theme_kons2() +
xlab("De novo assembly") +
ylab("chromosome 2 (hg19)") +
ggtitle("CHP-212") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/CHP212_NanoporeDeNovoAssembly_contig1695_maptochr2.pdf",
height = 2, width=1.5)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
geom_segment(aes(y=QueryStart, yend=QueryEnd, x=ReferenceStart, xend=ReferenceEnd)) +
theme_kons2() +
ylab("De novo assembly") +
xlab("chromosome 2 (hg19)") +
ggtitle("CHP-212") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/CHP212_NanoporeDeNovoAssembly_contig1695_maptochr2_Layout2.pdf",
height = 2, width=1.5)
paf %>%
ggplot() +
geom_segment(aes(x=QueryStart, xend=QueryEnd, y=ReferenceStart, yend=ReferenceEnd)) +
theme_kons2() +
xlab("De novo assembly") +
facet_grid(ReferenceSequence ~ .) +
ggtitle("CHP-212")
# ------------------------------------------------------------------------------
# Plot NGP de novo assembly
# ------------------------------------------------------------------------------
paf = fread("/Volumes/Elements/NGP.ONT-flye-assembly-meta-genomesize1g/assembly.maptohg19.paf",
fill=T) %>% as_tibble()
colnames(paf) = c(
"Query", "QueryLength", "QueryStart", "QueryEnd", "RelativeStrand",
"ReferenceSequence", "ReferenceSequenceLength", "ReferenceSequenceStart", "ReferenceSequenceEnd",
"NumberOfResidueMatches", "AlignmentBlockLength", "MappingQuality",
"NoIdea1", "NoIdea2", "NoIdea3", "NoIdea4", "NoIdea5", "NoIdea6"
)
paf = paf %>% filter(Query == "contig_45")
paf$ReferenceStart = ifelse(paf$RelativeStrand == "+", paf$ReferenceSequenceStart, paf$ReferenceSequenceEnd)
paf$ReferenceEnd = ifelse(paf$RelativeStrand == "+", paf$ReferenceSequenceEnd, paf$ReferenceSequenceStart)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
annotate(ymin=16080683, ymax=16087129, xmin=-Inf, xmax=Inf, geom="rect", color = "steelblue") +
annotate(ymin=16375277, ymax=16385344, xmin=-Inf, xmax=Inf, geom="rect", color = "firebrick3") +
geom_segment(aes(x=QueryStart, xend=QueryEnd, y=ReferenceStart, yend=ReferenceEnd)) +
theme_kons2() +
xlab("De novo assembly") +
ylab("chromosome 2 (hg19)") +
ggtitle("NGP") +
ylim(15000000,18000000) +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/NGP_NanoporeDeNovoAssembly_contig45_maptochr2_annotateMYCNAndE4.pdf",
height = 2, width=1.5)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
geom_segment(aes(x=QueryStart, xend=QueryEnd, y=ReferenceStart, yend=ReferenceEnd)) +
theme_kons2() +
xlab("De novo assembly") +
ylab("chromosome 2 (hg19)") +
ggtitle("NGP") +
ylim(15000000,18000000) +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/NGP_NanoporeDeNovoAssembly_contig45_maptochr2.pdf",
height = 2, width=1.5)
paf %>%
filter(ReferenceSequence == "chr2") %>%
ggplot() +
geom_segment(aes(y=QueryStart, yend=QueryEnd, x=ReferenceStart, xend=ReferenceEnd)) +
theme_kons2() +
ylab("De novo assembly") +
xlab("chromosome 2 (hg19)") +
ggtitle("NGP") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/NGP_NanoporeDeNovoAssembly_contig45_maptochr2_Layout2.pdf",
height = 2, width=1.5)
paf %>%
ggplot() +
geom_segment(aes(x=QueryStart, xend=QueryEnd, y=ReferenceStart, yend=ReferenceEnd)) +
theme_kons2() +
xlab("De novo assembly") +
facet_grid(ReferenceSequence ~ .) +
ggtitle("NGP") +
ggsave("/Volumes/Elements/MYCNAmplicon/Results/NGP_NanoporeDeNovoAssembly_contig45_allchr.pdf",
height = 2, width=1.5)
|
#' Piper Diagram
#'
#' Produces a Piper diagram.
#'
#' @details The \code{what} component of the \code{Plot} argument must be either
#' "points" or "none."
#'
#' The units for \code{xCat}, \code{yCat}, \code{zCat}, \code{xAn},
#'\code{yAn}, and \code{zAn} should be in milli-equivalents.
#'
#' @param xCat data for the cation x-axis, generally calcium.
#' @param yCat data for the cation y-axis, generally magnesium.
#' @param zCat data for the cation z-axis, generally sodium plus potasium.
#' @param xAn data for the anion x-axis, generally chloride plus other minor
#' constituents.
#' @param yAn data for the anion y-axis, generally carbonate plus bicarbonate.
#' @param zAn data for the anion z-axis, generally sulfate.
#' @param Plot control parameters of the plot, see \code{link{setMultiPlot}}
#' and \bold{Details} for details.
#' @param axis.range the range of the axes. Must be either c(0, 1) or c(0,
#' 100).
#' @param num.labels the number of labels to draw on each axis. Best selections
#' are 2 giving (0, 100), 3 (0, 50, 100), 5 (0, 25, 50, 75, 100), or 6 (o, 20,
#' 40, 60, 80, 100).
#' @param ticks logical, if \code{TRUE}, then draw ticks.
#' @param grids logical, if \code{TRUE}, then draw grid lines.
#' @param xCat.title title (also called caption) for the cation x-axis.
#' @param yCat.title title (also called caption) for the cation y-axis.
#' @param zCat.title title (also called caption) for the cation x-axis.
#' @param xAn.title title (also called caption) for the anion x-axis.
#' @param yAn.title title (also called caption) for the anion y-axis.
#' @param zAn.title title (also called caption) for the anion z-axis.
#' @param x.yCat.title title for the cation x- and y-axis for the central Piper
#' graph.
#' @param x.zAn.title title for the anion x- and z-axis for the central Piper
#' graph.
#' @param units.title the units titles, should be either "Percent" of
#' "Proportion" depending on \code{axis.range}.
#' @param caption the figure caption.
#' @param margin set up the plot area margins---ignored, included for
#' consistency with other plotting functions in this package.
#' @return Information about the graph and current plot.
#' @note A call should be made to \code{setPage} to set up the graphics
#' environment before calling \code{piperPlot}.
#'
#' @seealso \code{\link{setPage}}, \code{\link{setMultiPlot}},
#' \code{\link{ternaryPlot}}, \code{\link{addPiper}}
#' @references Hem J.D., 1989, Study and interpretation of the chemical
#' characteristics of natural water: U.S. Geological Survey Water-Supply Paper
#' 2254, 263 p.
#' @keywords hplot
#' @examples
#' \dontrun{
#' # See for examples of piperPlot:
#' vignette(topic="PiperPlot", package="smwrGraphs")
#' demo(topic="PiperScript", package="smwrGraphs")
#' }
#' @export piperPlot
piperPlot <- function(xCat, yCat, zCat, xAn, yAn, zAn, # data (need not sum to 100)
Plot=list(name="", what="points", type="solid",
width="standard", symbol="circle", filled=TRUE,
size=0.09, color="black"), # plot controls (for each point)
axis.range=c(0,100), num.labels=6, ticks=FALSE,
grids=!ticks, # axis controls and labels
xCat.title="Calcium", yCat.title="Magnesium",
zCat.title="Sodium plus Potassium",
xAn.title="Chloride, Fluoride, Nitrite plus Nitrate",
yAn.title="Carbonate plus Bicarbonate",
zAn.title="Sulfate",
x.yCat.title="Calcium plus Magnesium",
x.zAn.title="Sulfate plus Chloride",
units.title="Percent", # axis titles
caption="", # caption
margin=c(NA, NA, NA, NA)) {# margin controls
# Coding history:
# 2001Mar02 DLLorenz Original coding.
# 2002Apr05 DLLorenz Add explanation.
# 2002Jul24 DLLorenz Revise color and symbol argument usage and error trapping.
# 2004Apr30 DLLorenz Fixed units bug in piperplot.
# 2008Jul08 DLLorenz Start revisions for pub-quality output
# 2011Jan22 DLLorenz Conversion to R
# 2011Apr07 DLLorenz First working version in R
# 2011Apr16 DLLorenz Added complete complement of args to setMultiPlot
# 2011May24 DLLorenz Argument documentation
# 2011Oct19 DLLorenz Fix logical abbrevs and others
# 2012Feb18 DLLorenz Fixed for specified figure instead of centered
# 2012Nov02 DLLorenz Tweak ternarySub for centered axis titles
# 2013Apr09 DLLorenz Added setGD
# 2014Jun26 DLLorenz Converted to roxygen
##
## Simple sub region set up function:
setSubRgn <- function(xll, yll, xur, yur, fig, curmai, usr, pin) {
## Set marigns according to the user coordinates in the call
## Compute the desired plotting region by setting up new margins
uin <- pin/c(usr[2] - usr[1], usr[4] - usr[3]) # inches per user unit, x and y
newmai <- c(curmai[1] + (yll - usr[3]) * uin[2], curmai[2] + (xll - usr[1]) *
uin[1], curmai[3] + (usr[4] - yur) * uin[2], curmai[4] +
(usr[2] - xur) * uin[1])
par(fig=fig, mai = newmai, xpd=TRUE)
invisible(par(no.readonly=TRUE))
} # End of setSubRgn function
## Begin Execution, normalize the data according to range
## and revert to old names
units.max <- axis.range[2]
tsum <- sumComposition(xCat, yCat, zCat, Range=units.max)
cations <- as.data.frame(tsum) # part of returned data
names(cations) <- make.names(c(xCat.title, yCat.title, zCat.title))
ca <- tsum[,1]
mg <- tsum[,2]
na.k <- tsum[,3]
tsum <- sumComposition(xAn, yAn, zAn, Range=units.max)
anions <- as.data.frame(tsum) # part of returned data
names(anions) <- make.names(c(xAn.title, yAn.title, zAn.title))
cl.f.no2.no3 <- tsum[,1]
co3.hco3 <- tsum[,2]
so4 <- tsum[,3]
## Set up the main plotting axis as square, ranging from -1 to 1 on both axes.
## This is manual set up for square plot--larger than the default
if(dev.cur() == 1)
setGD("PiperPlot")
fin <- par("fin")
vdist <- fin[2] - 1.24
hdist <- fin[1] - 0.44
if(vdist > hdist)
mai <- c(1.02 + (vdist - hdist), .22, .22, .22)
else
mai <- c(1.02, .22 + (hdist - vdist)/2, .22, .22 + (hdist - vdist)/2)
par(xaxs="i", yaxs="i", mai=mai, family="USGS")
fig <- par("fig")
plot(c(-1,1),c(-1,1),axes=FALSE,type="n",xlab=units.title,ylab="")
## Process plot control
what=Plot$what[1]
parms <- setMultiPlot(Plot, length(ca), name="", what='points', type='solid',
width='standard', symbol='circle', filled=TRUE,
size=0.09, color='black')
symbol <- parms$current$pch
color <- parms$current$col
size <- parms$current$csi
plot.info <- as.data.frame(parms$current, stringsAsFactors=FALSE) # part of returned data
adjSize <- par('csi') # This just happens to work out nicely!
adjHei <- (1 - adjSize) * sqrt(3) / 2
## Plot the ternary diagrams and get the x-y data
oldpars <- par(no.readonly=TRUE)
## Set up subregion for cation plot
catplot <- setSubRgn(-1, -1, -adjSize, adjHei - 1, fig, mai,
oldpars$usr, oldpars$pin)
catplot$usr <- ternarySubplot(ca, mg, na.k, what, symbol, color, size,
axis.range=axis.range, num.labels=num.labels,
ticks=ticks,grids=grids,
xtitle=xCat.title, ytitle=yCat.title,
ztitle=zCat.title)
cations <- cbind(cations, ternarySubplot(ca, mg, na.k, axis.range=axis.range,
plot=FALSE))
par(oldpars) # Reset to original
anplot <- setSubRgn(adjSize,-1, 1, adjHei - 1, fig, mai,
oldpars$usr, oldpars$pin)
anplot$usr <- ternarySubplot(cl.f.no2.no3, co3.hco3, so4, what, symbol,
color, size, axis.range=axis.range,
num.labels=num.labels, ticks=ticks,
grids=grids, orient="a",
xtitle=xAn.title,ytitle=yAn.title, ztitle=zAn.title)
anions <- cbind(anions, ternarySubplot(cl.f.no2.no3, co3.hco3, so4,
axis.range=axis.range, orient="a",
plot=FALSE))
## Plot the piper diagram
y <- so4 + cl.f.no2.no3
adjBot <- adjSize*sqrt(3) - 1
adjWid <- (.732 - adjBot)/2/sqrt(3)
par(oldpars)
piperplot <- setSubRgn(-adjWid, adjBot, adjWid, .732, fig, mai,
oldpars$usr, oldpars$pin)
piperplot$usr <- piperSubplot(na.k, y, what, symbol, color, size,
axis.range=axis.range, num.labels=num.labels,
ticks=ticks, grids=grids, x1title="",
y1title=x.zAn.title, x2title=x.yCat.title,
y2title="")
piper <- as.data.frame(piperSubplot(na.k, y, axis.range=axis.range,
plot=FALSE))
## Finish the axis labels and caption
par(oldpars)
par(fig=fig)
toff <- 0.5 + par("csi")
text(x=-toff, y=0.0, labels=units.title, srt=60, family="USGS")
text(x=toff, y=0.0, labels=units.title, srt=300, family="USGS")
if(caption != "")
mtext(text=caption, side=1, line=toff - 0.5,
at=par("usr")[1], adj=0, family="USGS")
## Return
retval <- list(cations=cations, anions=anions, piper=piper, plotInfo=plot.info,
axis.range=axis.range, catplot=catplot, anplot=anplot,
piperplot=piperplot, explanation=parms$Explan)
invisible(retval)
}
| /R/piperPlot.R | permissive | ldecicco-USGS/smwrGraphs | R | false | false | 9,676 | r | #' Piper Diagram
#'
#' Produces a Piper diagram.
#'
#' @details The \code{what} component of the \code{Plot} argument must be either
#' "points" or "none."
#'
#' The units for \code{xCat}, \code{yCat}, \code{zCat}, \code{xAn},
#'\code{yAn}, and \code{zAn} should be in milli-equivalents.
#'
#' @param xCat data for the cation x-axis, generally calcium.
#' @param yCat data for the cation y-axis, generally magnesium.
#' @param zCat data for the cation z-axis, generally sodium plus potasium.
#' @param xAn data for the anion x-axis, generally chloride plus other minor
#' constituents.
#' @param yAn data for the anion y-axis, generally carbonate plus bicarbonate.
#' @param zAn data for the anion z-axis, generally sulfate.
#' @param Plot control parameters of the plot, see \code{link{setMultiPlot}}
#' and \bold{Details} for details.
#' @param axis.range the range of the axes. Must be either c(0, 1) or c(0,
#' 100).
#' @param num.labels the number of labels to draw on each axis. Best selections
#' are 2 giving (0, 100), 3 (0, 50, 100), 5 (0, 25, 50, 75, 100), or 6 (o, 20,
#' 40, 60, 80, 100).
#' @param ticks logical, if \code{TRUE}, then draw ticks.
#' @param grids logical, if \code{TRUE}, then draw grid lines.
#' @param xCat.title title (also called caption) for the cation x-axis.
#' @param yCat.title title (also called caption) for the cation y-axis.
#' @param zCat.title title (also called caption) for the cation x-axis.
#' @param xAn.title title (also called caption) for the anion x-axis.
#' @param yAn.title title (also called caption) for the anion y-axis.
#' @param zAn.title title (also called caption) for the anion z-axis.
#' @param x.yCat.title title for the cation x- and y-axis for the central Piper
#' graph.
#' @param x.zAn.title title for the anion x- and z-axis for the central Piper
#' graph.
#' @param units.title the units titles, should be either "Percent" of
#' "Proportion" depending on \code{axis.range}.
#' @param caption the figure caption.
#' @param margin set up the plot area margins---ignored, included for
#' consistency with other plotting functions in this package.
#' @return Information about the graph and current plot.
#' @note A call should be made to \code{setPage} to set up the graphics
#' environment before calling \code{piperPlot}.
#'
#' @seealso \code{\link{setPage}}, \code{\link{setMultiPlot}},
#' \code{\link{ternaryPlot}}, \code{\link{addPiper}}
#' @references Hem J.D., 1989, Study and interpretation of the chemical
#' characteristics of natural water: U.S. Geological Survey Water-Supply Paper
#' 2254, 263 p.
#' @keywords hplot
#' @examples
#' \dontrun{
#' # See for examples of piperPlot:
#' vignette(topic="PiperPlot", package="smwrGraphs")
#' demo(topic="PiperScript", package="smwrGraphs")
#' }
#' @export piperPlot
piperPlot <- function(xCat, yCat, zCat, xAn, yAn, zAn, # data (need not sum to 100)
Plot=list(name="", what="points", type="solid",
width="standard", symbol="circle", filled=TRUE,
size=0.09, color="black"), # plot controls (for each point)
axis.range=c(0,100), num.labels=6, ticks=FALSE,
grids=!ticks, # axis controls and labels
xCat.title="Calcium", yCat.title="Magnesium",
zCat.title="Sodium plus Potassium",
xAn.title="Chloride, Fluoride, Nitrite plus Nitrate",
yAn.title="Carbonate plus Bicarbonate",
zAn.title="Sulfate",
x.yCat.title="Calcium plus Magnesium",
x.zAn.title="Sulfate plus Chloride",
units.title="Percent", # axis titles
caption="", # caption
margin=c(NA, NA, NA, NA)) {# margin controls
# Coding history:
# 2001Mar02 DLLorenz Original coding.
# 2002Apr05 DLLorenz Add explanation.
# 2002Jul24 DLLorenz Revise color and symbol argument usage and error trapping.
# 2004Apr30 DLLorenz Fixed units bug in piperplot.
# 2008Jul08 DLLorenz Start revisions for pub-quality output
# 2011Jan22 DLLorenz Conversion to R
# 2011Apr07 DLLorenz First working version in R
# 2011Apr16 DLLorenz Added complete complement of args to setMultiPlot
# 2011May24 DLLorenz Argument documentation
# 2011Oct19 DLLorenz Fix logical abbrevs and others
# 2012Feb18 DLLorenz Fixed for specified figure instead of centered
# 2012Nov02 DLLorenz Tweak ternarySub for centered axis titles
# 2013Apr09 DLLorenz Added setGD
# 2014Jun26 DLLorenz Converted to roxygen
##
## Simple sub region set up function:
setSubRgn <- function(xll, yll, xur, yur, fig, curmai, usr, pin) {
## Set marigns according to the user coordinates in the call
## Compute the desired plotting region by setting up new margins
uin <- pin/c(usr[2] - usr[1], usr[4] - usr[3]) # inches per user unit, x and y
newmai <- c(curmai[1] + (yll - usr[3]) * uin[2], curmai[2] + (xll - usr[1]) *
uin[1], curmai[3] + (usr[4] - yur) * uin[2], curmai[4] +
(usr[2] - xur) * uin[1])
par(fig=fig, mai = newmai, xpd=TRUE)
invisible(par(no.readonly=TRUE))
} # End of setSubRgn function
## Begin Execution, normalize the data according to range
## and revert to old names
units.max <- axis.range[2]
tsum <- sumComposition(xCat, yCat, zCat, Range=units.max)
cations <- as.data.frame(tsum) # part of returned data
names(cations) <- make.names(c(xCat.title, yCat.title, zCat.title))
ca <- tsum[,1]
mg <- tsum[,2]
na.k <- tsum[,3]
tsum <- sumComposition(xAn, yAn, zAn, Range=units.max)
anions <- as.data.frame(tsum) # part of returned data
names(anions) <- make.names(c(xAn.title, yAn.title, zAn.title))
cl.f.no2.no3 <- tsum[,1]
co3.hco3 <- tsum[,2]
so4 <- tsum[,3]
## Set up the main plotting axis as square, ranging from -1 to 1 on both axes.
## This is manual set up for square plot--larger than the default
if(dev.cur() == 1)
setGD("PiperPlot")
fin <- par("fin")
vdist <- fin[2] - 1.24
hdist <- fin[1] - 0.44
if(vdist > hdist)
mai <- c(1.02 + (vdist - hdist), .22, .22, .22)
else
mai <- c(1.02, .22 + (hdist - vdist)/2, .22, .22 + (hdist - vdist)/2)
par(xaxs="i", yaxs="i", mai=mai, family="USGS")
fig <- par("fig")
plot(c(-1,1),c(-1,1),axes=FALSE,type="n",xlab=units.title,ylab="")
## Process plot control
what=Plot$what[1]
parms <- setMultiPlot(Plot, length(ca), name="", what='points', type='solid',
width='standard', symbol='circle', filled=TRUE,
size=0.09, color='black')
symbol <- parms$current$pch
color <- parms$current$col
size <- parms$current$csi
plot.info <- as.data.frame(parms$current, stringsAsFactors=FALSE) # part of returned data
adjSize <- par('csi') # This just happens to work out nicely!
adjHei <- (1 - adjSize) * sqrt(3) / 2
## Plot the ternary diagrams and get the x-y data
oldpars <- par(no.readonly=TRUE)
## Set up subregion for cation plot
catplot <- setSubRgn(-1, -1, -adjSize, adjHei - 1, fig, mai,
oldpars$usr, oldpars$pin)
catplot$usr <- ternarySubplot(ca, mg, na.k, what, symbol, color, size,
axis.range=axis.range, num.labels=num.labels,
ticks=ticks,grids=grids,
xtitle=xCat.title, ytitle=yCat.title,
ztitle=zCat.title)
cations <- cbind(cations, ternarySubplot(ca, mg, na.k, axis.range=axis.range,
plot=FALSE))
par(oldpars) # Reset to original
anplot <- setSubRgn(adjSize,-1, 1, adjHei - 1, fig, mai,
oldpars$usr, oldpars$pin)
anplot$usr <- ternarySubplot(cl.f.no2.no3, co3.hco3, so4, what, symbol,
color, size, axis.range=axis.range,
num.labels=num.labels, ticks=ticks,
grids=grids, orient="a",
xtitle=xAn.title,ytitle=yAn.title, ztitle=zAn.title)
anions <- cbind(anions, ternarySubplot(cl.f.no2.no3, co3.hco3, so4,
axis.range=axis.range, orient="a",
plot=FALSE))
## Plot the piper diagram
y <- so4 + cl.f.no2.no3
adjBot <- adjSize*sqrt(3) - 1
adjWid <- (.732 - adjBot)/2/sqrt(3)
par(oldpars)
piperplot <- setSubRgn(-adjWid, adjBot, adjWid, .732, fig, mai,
oldpars$usr, oldpars$pin)
piperplot$usr <- piperSubplot(na.k, y, what, symbol, color, size,
axis.range=axis.range, num.labels=num.labels,
ticks=ticks, grids=grids, x1title="",
y1title=x.zAn.title, x2title=x.yCat.title,
y2title="")
piper <- as.data.frame(piperSubplot(na.k, y, axis.range=axis.range,
plot=FALSE))
## Finish the axis labels and caption
par(oldpars)
par(fig=fig)
toff <- 0.5 + par("csi")
text(x=-toff, y=0.0, labels=units.title, srt=60, family="USGS")
text(x=toff, y=0.0, labels=units.title, srt=300, family="USGS")
if(caption != "")
mtext(text=caption, side=1, line=toff - 0.5,
at=par("usr")[1], adj=0, family="USGS")
## Return
retval <- list(cations=cations, anions=anions, piper=piper, plotInfo=plot.info,
axis.range=axis.range, catplot=catplot, anplot=anplot,
piperplot=piperplot, explanation=parms$Explan)
invisible(retval)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_results.R
\name{write_results}
\alias{write_results}
\title{Write a data frame as a .xlsx, .xls, or .csv file}
\usage{
write_results(data, file_name, digits)
}
\arguments{
\item{data}{Data frame to be written to a directory.}
\item{file_name}{Character string naming the output file. Required.}
\item{digits}{Integer indicating the number of decimal places or significant digits used to round flow values. Use follows
that of base::round() digits argument.}
}
\description{
Write a data frame to a directory with all numbers rounded to specified digits. Can write as .xls, .xlsx, or .csv
file types. Writing as .xlsx or .xls uses the 'writexl' package.
}
\examples{
\dontrun{
# Working examples:
# Example data to write
data_results <- calc_longterm_daily_stats(station_number = c("08HA002", "08HA011"),
start_year = 1971, end_year = 2000)
# Write the data and round numbers to 1 decimal place
write_results(data = data_results,
file_name = "Cowichan River Long-term Flows (1971-2000).xlsx",
digits = 1)
}
}
| /man/write_results.Rd | permissive | gravitytrope/fasstr | R | false | true | 1,189 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_results.R
\name{write_results}
\alias{write_results}
\title{Write a data frame as a .xlsx, .xls, or .csv file}
\usage{
write_results(data, file_name, digits)
}
\arguments{
\item{data}{Data frame to be written to a directory.}
\item{file_name}{Character string naming the output file. Required.}
\item{digits}{Integer indicating the number of decimal places or significant digits used to round flow values. Use follows
that of base::round() digits argument.}
}
\description{
Write a data frame to a directory with all numbers rounded to specified digits. Can write as .xls, .xlsx, or .csv
file types. Writing as .xlsx or .xls uses the 'writexl' package.
}
\examples{
\dontrun{
# Working examples:
# Example data to write
data_results <- calc_longterm_daily_stats(station_number = c("08HA002", "08HA011"),
start_year = 1971, end_year = 2000)
# Write the data and round numbers to 1 decimal place
write_results(data = data_results,
file_name = "Cowichan River Long-term Flows (1971-2000).xlsx",
digits = 1)
}
}
|
library(SpatialExtremes)
### Name: USHCNTemp
### Title: Summer/Winter annual maxima/minima temperature in continental
### US.
### Aliases: USHCNTemp
### Keywords: datasets
### ** Examples
data(USHCNTemp)
require(maps)
maps::map("usa")
points(metadata[,2:3], pch = 15)
| /data/genthat_extracted_code/SpatialExtremes/examples/USHCNTemp.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 277 | r | library(SpatialExtremes)
### Name: USHCNTemp
### Title: Summer/Winter annual maxima/minima temperature in continental
### US.
### Aliases: USHCNTemp
### Keywords: datasets
### ** Examples
data(USHCNTemp)
require(maps)
maps::map("usa")
points(metadata[,2:3], pch = 15)
|
root.dir <- "~/Documents/U/ubb/sccc/math146"
data.dir <- paste(root.dir, "data/bps/PC-Text", sep = "/")
hw3.data.dir <- paste(data.dir, "ch03", sep = "/")
hw.dir <- paste(root.dir, "week03/hw", sep = "/")
figures.dir <- paste(hw.dir, "figures", sep = "/")
setwd(hw3.data.dir)
# exercise 25
ex25.range <- function(mean, sd) {
c(mean - 2 * sd, mean + 2 * sd)
}
ex25.range(373, 67)
# exercise 27
pnorm(70, 100, 15)
# exercise 28
plot <- ggplot(data.frame(x = c(-25, 525)), aes(x = x)) +
stat_function(fun = function(x) dnorm(x, 250, 176)) +
stat_function(fun = limit.domain(function(x) dnorm(x, 250, 176), -25, 25),
geom = "area", fill = "lightblue") +
stat_function(fun = limit.domain(function(x) dnorm(x, 250, 176), 475, 525),
geom = "area", fill = "lightblue")
print(plot)
ggsave(paste(figures.dir, 'ex28c.pdf', sep = '/'))
# exercise 46
ex46 <- read.delim("ex03-46.dat", header = TRUE, sep = '\t')
str(ex46)
plot <- ggplot(ex46, aes(x = IQ)) +
geom_histogram(fill = "lightblue", color = "black", binwidth = 10) +
labs(x = "IQ", y = "Count") +
ggtitle("IQ")
print(plot)
ggsave("~/Documents/U/ubb/sccc/math146/week03/hw/figures/ex46.eps", width = 4, height = 2.5)
iq <- ex46$IQ
round(mean(iq), digits = 2)
round(sd(iq), digits = 2)
ex46.expected.1sd <- round( .68 * 31 )
ex46.actual.1sd <- length(iq[mean(iq) - 1 * sd(iq) < iq & iq < mean(iq) + 1 * sd(iq)])
ex46.expected.2sd <- round( .95 * 31 )
ex46.actual.2sd <- length(iq[mean(iq) - 2 * sd(iq) < iq & iq < mean(iq) + 2 * sd(iq)])
ex46.expected.1sd
ex46.actual.1sd
ex46.expected.2sd
ex46.actual.2sd
round(mean(iq) - 2 * sd(iq), digits = 2)
# exercise 48
ex48 <- c(4.33, 5.05, 5.44, 5.79, 6.81)
diff(ex48)
z1 <- round((5.05 - 5.43)/0.54, digits = 4)
round(pnorm(z2), 4)
z2 <- round((5.79 - 5.43)/0.54, digits = 4)
round(pnorm(z2), 4)
z2
pnorm(5.05, 5.43, 0.54)
pnorm(5.79, 5.43, 0.54)
| /sccc/math146/week03/hw/hw03.R | no_license | etellman/ubb | R | false | false | 1,913 | r | root.dir <- "~/Documents/U/ubb/sccc/math146"
data.dir <- paste(root.dir, "data/bps/PC-Text", sep = "/")
hw3.data.dir <- paste(data.dir, "ch03", sep = "/")
hw.dir <- paste(root.dir, "week03/hw", sep = "/")
figures.dir <- paste(hw.dir, "figures", sep = "/")
setwd(hw3.data.dir)
# exercise 25
ex25.range <- function(mean, sd) {
c(mean - 2 * sd, mean + 2 * sd)
}
ex25.range(373, 67)
# exercise 27
pnorm(70, 100, 15)
# exercise 28
plot <- ggplot(data.frame(x = c(-25, 525)), aes(x = x)) +
stat_function(fun = function(x) dnorm(x, 250, 176)) +
stat_function(fun = limit.domain(function(x) dnorm(x, 250, 176), -25, 25),
geom = "area", fill = "lightblue") +
stat_function(fun = limit.domain(function(x) dnorm(x, 250, 176), 475, 525),
geom = "area", fill = "lightblue")
print(plot)
ggsave(paste(figures.dir, 'ex28c.pdf', sep = '/'))
# exercise 46
ex46 <- read.delim("ex03-46.dat", header = TRUE, sep = '\t')
str(ex46)
plot <- ggplot(ex46, aes(x = IQ)) +
geom_histogram(fill = "lightblue", color = "black", binwidth = 10) +
labs(x = "IQ", y = "Count") +
ggtitle("IQ")
print(plot)
ggsave("~/Documents/U/ubb/sccc/math146/week03/hw/figures/ex46.eps", width = 4, height = 2.5)
iq <- ex46$IQ
round(mean(iq), digits = 2)
round(sd(iq), digits = 2)
ex46.expected.1sd <- round( .68 * 31 )
ex46.actual.1sd <- length(iq[mean(iq) - 1 * sd(iq) < iq & iq < mean(iq) + 1 * sd(iq)])
ex46.expected.2sd <- round( .95 * 31 )
ex46.actual.2sd <- length(iq[mean(iq) - 2 * sd(iq) < iq & iq < mean(iq) + 2 * sd(iq)])
ex46.expected.1sd
ex46.actual.1sd
ex46.expected.2sd
ex46.actual.2sd
round(mean(iq) - 2 * sd(iq), digits = 2)
# exercise 48
ex48 <- c(4.33, 5.05, 5.44, 5.79, 6.81)
diff(ex48)
z1 <- round((5.05 - 5.43)/0.54, digits = 4)
round(pnorm(z2), 4)
z2 <- round((5.79 - 5.43)/0.54, digits = 4)
round(pnorm(z2), 4)
z2
pnorm(5.05, 5.43, 0.54)
pnorm(5.79, 5.43, 0.54)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is_effectsize_name.R
\name{is_effectsize_name}
\alias{is_effectsize_name}
\alias{get_effectsize_name}
\alias{get_effectsize_label}
\title{Checks for a Valid Effect Size Name}
\usage{
is_effectsize_name(x, ignore_case = TRUE)
get_effectsize_name(x, ignore_case = TRUE)
get_effectsize_label(
x,
ignore_case = TRUE,
use_symbols = getOption("es.use_symbols", FALSE)
)
}
\arguments{
\item{x}{A character, or a vector.}
\item{ignore_case}{Should case of input be ignored?}
\item{use_symbols}{Should proper symbols be printed (\code{TRUE}) instead of
transliterated effect size names (\code{FALSE}). See \link{effectsize_options}.}
}
\description{
For use by other functions and packages.
}
| /man/is_effectsize_name.Rd | no_license | cran/effectsize | R | false | true | 801 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is_effectsize_name.R
\name{is_effectsize_name}
\alias{is_effectsize_name}
\alias{get_effectsize_name}
\alias{get_effectsize_label}
\title{Checks for a Valid Effect Size Name}
\usage{
is_effectsize_name(x, ignore_case = TRUE)
get_effectsize_name(x, ignore_case = TRUE)
get_effectsize_label(
x,
ignore_case = TRUE,
use_symbols = getOption("es.use_symbols", FALSE)
)
}
\arguments{
\item{x}{A character, or a vector.}
\item{ignore_case}{Should case of input be ignored?}
\item{use_symbols}{Should proper symbols be printed (\code{TRUE}) instead of
transliterated effect size names (\code{FALSE}). See \link{effectsize_options}.}
}
\description{
For use by other functions and packages.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OMEROServer.R
\name{getProjects}
\alias{getProjects}
\title{Get all projects of the logged in user}
\usage{
getProjects(server)
}
\arguments{
\item{server}{The server}
}
\value{
The projects @seealso \linkS4class{Project}
}
\description{
Get all projects of the logged in user
}
| /man/getProjects.Rd | no_license | manics/rOMERO-gateway | R | false | true | 357 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OMEROServer.R
\name{getProjects}
\alias{getProjects}
\title{Get all projects of the logged in user}
\usage{
getProjects(server)
}
\arguments{
\item{server}{The server}
}
\value{
The projects @seealso \linkS4class{Project}
}
\description{
Get all projects of the logged in user
}
|
#------------------------------------------------
#' @title Draw from Dirichlet distribution
#'
#' @description Draw from Dirichlet distribution given a vector of shape
#' parameters.
#'
#' @param shape vector of shape parameters.
#'
#' @importFrom stats rgamma
#' @export
rdirichlet <- function(shape = rep(1,3)) {
x <- rgamma(length(shape), shape = shape, rate = max(shape))
return(x/sum(x))
}
#------------------------------------------------
#' @title Draw from Beta-binomial distribution
#'
#' @description Draw from Beta-binomial distribution.
#'
#' @param n number of draws.
#' @param k number of binomial trials.
#' @param alpha first shape parameter of beta distribution.
#' @param beta second shape parameter of beta distribution.
#'
#' @importFrom stats rbeta rbinom
#' @export
rbetabinom <- function(n = 1, k = 10, alpha = 1, beta = 1) {
p <- rbeta(n = n, shape1 = alpha, shape2 = beta)
ret <- rbinom(n = n, size = k, prob = p)
return(ret)
}
| /R/probability_v2.R | permissive | bobverity/bobfunctions2 | R | false | false | 972 | r |
#------------------------------------------------
#' @title Draw from Dirichlet distribution
#'
#' @description Draw from Dirichlet distribution given a vector of shape
#' parameters.
#'
#' @param shape vector of shape parameters.
#'
#' @importFrom stats rgamma
#' @export
rdirichlet <- function(shape = rep(1,3)) {
x <- rgamma(length(shape), shape = shape, rate = max(shape))
return(x/sum(x))
}
#------------------------------------------------
#' @title Draw from Beta-binomial distribution
#'
#' @description Draw from Beta-binomial distribution.
#'
#' @param n number of draws.
#' @param k number of binomial trials.
#' @param alpha first shape parameter of beta distribution.
#' @param beta second shape parameter of beta distribution.
#'
#' @importFrom stats rbeta rbinom
#' @export
rbetabinom <- function(n = 1, k = 10, alpha = 1, beta = 1) {
p <- rbeta(n = n, shape1 = alpha, shape2 = beta)
ret <- rbinom(n = n, size = k, prob = p)
return(ret)
}
|
# 01-05-2020
# Week 3
#Peer-graded assigment: R Markdown Presentation & Plotly
#Read car and truck values from tab-delimited autos.dat
autos_data <- read.table("C:/R/autos.dat", header=T, sep="\t")
# Compute the largest y value used in the data (or we could
# just use range again)
max_y <- max(autos_data)
# Define colors to be used for cars, trucks, suvs
plot_colors <- c("blue","red","forestgreen")
# Start PNG device driver to save output to figure.png
png(filename="C:/R/figure.png", height=295, width=300,
bg="white")
# Graph autos using y axis that ranges from 0 to max_y.
# Turn off axes and annotations (axis labels) so we can
# specify them ourself
plot(autos_data$cars, type="o", col=plot_colors[1],
ylim=c(0,max_y), axes=FALSE, ann=FALSE)
# Make x axis using Mon-Fri labels
axis(1, at=1:5, lab=c("Mon", "Tue", "Wed", "Thu", "Fri"))
# Make y axis with horizontal labels that display ticks at
# every 4 marks. 4*0:max_y is equivalent to c(0,4,8,12).
axis(2, las=1, at=4*0:max_y)
# Create box around plot
box()
# Graph trucks with red dashed line and square points
lines(autos_data$trucks, type="o", pch=22, lty=2,
col=plot_colors[2])
# Graph suvs with green dotted line and diamond points
lines(autos_data$suvs, type="o", pch=23, lty=3,
col=plot_colors[3])
# Create a title with a red, bold/italic font
title(main="Autos", col.main="red", font.main=4)
# Label the x and y axes with dark green text
title(xlab= "Days", col.lab=rgb(0,0.5,0))
title(ylab= "Total", col.lab=rgb(0,0.5,0))
# Create a legend at (1, max_y) that is slightly smaller
# (cex) and uses the same line colors and points used by
# the actual plots
legend(1, max_y, names(autos_data), cex=0.8, col=plot_colors,
pch=21:23, lty=1:3);
# Turn off device driver (to flush output to png)
dev.off()
Graph plotting data from autos.dat
| /markdown presentation plotly.r | no_license | SiddhikaArun/R-Markdown-Presentation-Plotly | R | false | false | 1,858 | r | # 01-05-2020
# Week 3
#Peer-graded assigment: R Markdown Presentation & Plotly
#Read car and truck values from tab-delimited autos.dat
autos_data <- read.table("C:/R/autos.dat", header=T, sep="\t")
# Compute the largest y value used in the data (or we could
# just use range again)
max_y <- max(autos_data)
# Define colors to be used for cars, trucks, suvs
plot_colors <- c("blue","red","forestgreen")
# Start PNG device driver to save output to figure.png
png(filename="C:/R/figure.png", height=295, width=300,
bg="white")
# Graph autos using y axis that ranges from 0 to max_y.
# Turn off axes and annotations (axis labels) so we can
# specify them ourself
plot(autos_data$cars, type="o", col=plot_colors[1],
ylim=c(0,max_y), axes=FALSE, ann=FALSE)
# Make x axis using Mon-Fri labels
axis(1, at=1:5, lab=c("Mon", "Tue", "Wed", "Thu", "Fri"))
# Make y axis with horizontal labels that display ticks at
# every 4 marks. 4*0:max_y is equivalent to c(0,4,8,12).
axis(2, las=1, at=4*0:max_y)
# Create box around plot
box()
# Graph trucks with red dashed line and square points
lines(autos_data$trucks, type="o", pch=22, lty=2,
col=plot_colors[2])
# Graph suvs with green dotted line and diamond points
lines(autos_data$suvs, type="o", pch=23, lty=3,
col=plot_colors[3])
# Create a title with a red, bold/italic font
title(main="Autos", col.main="red", font.main=4)
# Label the x and y axes with dark green text
title(xlab= "Days", col.lab=rgb(0,0.5,0))
title(ylab= "Total", col.lab=rgb(0,0.5,0))
# Create a legend at (1, max_y) that is slightly smaller
# (cex) and uses the same line colors and points used by
# the actual plots
legend(1, max_y, names(autos_data), cex=0.8, col=plot_colors,
pch=21:23, lty=1:3);
# Turn off device driver (to flush output to png)
dev.off()
Graph plotting data from autos.dat
|
library(igraph)
source("code/vars.R")
myOutPath = paste0(outPath, 'networks/');
netPath = paste0(dataPath, 'networks/')
cacheFile = paste(myOutPath, "graphs.RData", sep="")
if(file.exists(cacheFile)) {
load(cacheFile)
} else {
## Read network
files = listFiles(netPath, "gml")
graphs = lapply(files, read.graph, "gml")
names(graphs) = files
graph = mergeGraphs(graphs)
## Add gene symbols
library(org.Mm.eg.db)
library(GO.db)
ids = sub("L:", "", V(graph)$name)
V(graph)$entrez = ids
V(graph)$label = as.character(as.list(org.Mm.egSYMBOL)[ids])
## Add GO annotation
goids = lapply(as.list(org.Mm.egGO)[ids], names)
id2term = Term(GOTERM)
id2ont = Ontology(GOTERM)
go.bp = lapply(goids, function(g) {
g = setdiff(g, "GO:0008150")
id2term[g[id2ont[g] == "BP"]]
})
V(graph)$goids = sapply(goids, paste, collapse=";")
V(graph)$goBP = sapply(go.bp, paste, collapse=";")
graph.full = graph
graph = as.undirected(graph.full, "each")
graph = simplify(graph)
save(graph.full, graph, file=cacheFile)
}
| /code/loadNetworks.R | permissive | thomaskelder/ADT-liver-network | R | false | false | 1,060 | r | library(igraph)
source("code/vars.R")
myOutPath = paste0(outPath, 'networks/');
netPath = paste0(dataPath, 'networks/')
cacheFile = paste(myOutPath, "graphs.RData", sep="")
if(file.exists(cacheFile)) {
load(cacheFile)
} else {
## Read network
files = listFiles(netPath, "gml")
graphs = lapply(files, read.graph, "gml")
names(graphs) = files
graph = mergeGraphs(graphs)
## Add gene symbols
library(org.Mm.eg.db)
library(GO.db)
ids = sub("L:", "", V(graph)$name)
V(graph)$entrez = ids
V(graph)$label = as.character(as.list(org.Mm.egSYMBOL)[ids])
## Add GO annotation
goids = lapply(as.list(org.Mm.egGO)[ids], names)
id2term = Term(GOTERM)
id2ont = Ontology(GOTERM)
go.bp = lapply(goids, function(g) {
g = setdiff(g, "GO:0008150")
id2term[g[id2ont[g] == "BP"]]
})
V(graph)$goids = sapply(goids, paste, collapse=";")
V(graph)$goBP = sapply(go.bp, paste, collapse=";")
graph.full = graph
graph = as.undirected(graph.full, "each")
graph = simplify(graph)
save(graph.full, graph, file=cacheFile)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/yingtools2.R
\name{pretty_scientific}
\alias{pretty_scientific}
\title{Fancy Scientific Notation}
\usage{
pretty_scientific(l, parse = TRUE)
}
\arguments{
\item{l}{number vector to be formatted.}
}
\value{
Expression for l, in scientific notation.
}
\description{
Use to format axes in ggplot with scientific notation.
}
\examples{
pretty_scientific(c(111e-12,230000022.11111,0.1234567))
ggplot(mtcars,aes(mpg*1e-6)) + geom_bar() + scale_x_continuous(label=pretty_scientific)
}
| /man/pretty_scientific.Rd | no_license | ying14/yingtools2 | R | false | true | 556 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/yingtools2.R
\name{pretty_scientific}
\alias{pretty_scientific}
\title{Fancy Scientific Notation}
\usage{
pretty_scientific(l, parse = TRUE)
}
\arguments{
\item{l}{number vector to be formatted.}
}
\value{
Expression for l, in scientific notation.
}
\description{
Use to format axes in ggplot with scientific notation.
}
\examples{
pretty_scientific(c(111e-12,230000022.11111,0.1234567))
ggplot(mtcars,aes(mpg*1e-6)) + geom_bar() + scale_x_continuous(label=pretty_scientific)
}
|
#' Convert rows of a matrix to images
#'
#' Unmasks rows of a matrix and writes as images.
#'
#'
#' @param dataMatrix A matrix such as that created by \code{imagesToMatrix}.
#' @param mask An \code{antsImage} containing a binary mask.
#' Rows of the matrix
#' are unmasked and written as images. The mask defines the output image space.
#' @author Cook PA
#' @seealso \code{\link{imagesToMatrix}, \link{getMask}}
#' @examples
#'
#' \dontrun{
#' # mat = matrixToImages( aMat, mask )
#' }
#'
#'
#' @export matrixToImages
matrixToImages <- function(dataMatrix, mask) {
# Writes rows of a matrix to 3D images. mask should be an antsImage of the
# correct dimensions and physical space
if (!is(mask, "antsImage")) {
stop("Mask must be an antsImage")
}
numImages <- dim(dataMatrix)[1]
numVoxelsInMatrix <- dim(dataMatrix)[2]
numVoxelsInMask <- length(which(mask > 0))
if (numVoxelsInMatrix != numVoxelsInMask) {
stop(paste("Number of masked voxels", numVoxelsInMask, " do not match data",
numVoxelsInMatrix))
}
imagelist <- list()
for (i in 1:numImages) {
img <- antsImageClone(mask)
vec <- dataMatrix[i, ]
img[mask <= 0] <- 0
img[mask > 0] <- vec
imagelist <- lappend(imagelist, img)
# antsImageWrite( img, paste(outputRoot, sprintf('%03d.nii.gz', i), sep = '') )
}
return(imagelist)
}
| /R/matrixToImages.R | no_license | bkandel/ANTsRCheck | R | false | false | 1,354 | r | #' Convert rows of a matrix to images
#'
#' Unmasks rows of a matrix and writes as images.
#'
#'
#' @param dataMatrix A matrix such as that created by \code{imagesToMatrix}.
#' @param mask An \code{antsImage} containing a binary mask.
#' Rows of the matrix
#' are unmasked and written as images. The mask defines the output image space.
#' @author Cook PA
#' @seealso \code{\link{imagesToMatrix}, \link{getMask}}
#' @examples
#'
#' \dontrun{
#' # mat = matrixToImages( aMat, mask )
#' }
#'
#'
#' @export matrixToImages
matrixToImages <- function(dataMatrix, mask) {
# Writes rows of a matrix to 3D images. mask should be an antsImage of the
# correct dimensions and physical space
if (!is(mask, "antsImage")) {
stop("Mask must be an antsImage")
}
numImages <- dim(dataMatrix)[1]
numVoxelsInMatrix <- dim(dataMatrix)[2]
numVoxelsInMask <- length(which(mask > 0))
if (numVoxelsInMatrix != numVoxelsInMask) {
stop(paste("Number of masked voxels", numVoxelsInMask, " do not match data",
numVoxelsInMatrix))
}
imagelist <- list()
for (i in 1:numImages) {
img <- antsImageClone(mask)
vec <- dataMatrix[i, ]
img[mask <= 0] <- 0
img[mask > 0] <- vec
imagelist <- lappend(imagelist, img)
# antsImageWrite( img, paste(outputRoot, sprintf('%03d.nii.gz', i), sep = '') )
}
return(imagelist)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_node_df.R
\name{add_node_df}
\alias{add_node_df}
\title{Add nodes from a node data frame to an existing graph object}
\usage{
add_node_df(graph, node_df)
}
\arguments{
\item{graph}{a graph object of class \code{dgr_graph}.}
\item{node_df}{a node data frame that is created using
\code{\link{create_node_df}()}.}
}
\value{
a graph object of class \code{dgr_graph}.
}
\description{
With a graph object of class \code{dgr_graph} add nodes from a node data
frame to that graph.
}
\examples{
# Create an empty graph
graph <- create_graph()
# Create a node data frame (ndf)
ndf <-
create_node_df(n = 2)
# Add the node data frame to
# the graph object to create
# a graph with nodes
graph <-
graph \%>\%
add_node_df(
node_df = ndf)
# Inspect the graph's ndf
graph \%>\% get_node_df()
# Create another ndf
ndf_2 <-
create_node_df(n = 3)
# Add the second node data
# frame to the graph object
# to add more nodes with
# attributes to the graph
graph <-
graph \%>\%
add_node_df(
node_df = ndf_2)
# View the graph's internal
# node data frame using the
# `get_node_df()` function
graph \%>\% get_node_df()
}
| /man/add_node_df.Rd | permissive | lionel-/DiagrammeR | R | false | true | 1,208 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_node_df.R
\name{add_node_df}
\alias{add_node_df}
\title{Add nodes from a node data frame to an existing graph object}
\usage{
add_node_df(graph, node_df)
}
\arguments{
\item{graph}{a graph object of class \code{dgr_graph}.}
\item{node_df}{a node data frame that is created using
\code{\link{create_node_df}()}.}
}
\value{
a graph object of class \code{dgr_graph}.
}
\description{
With a graph object of class \code{dgr_graph} add nodes from a node data
frame to that graph.
}
\examples{
# Create an empty graph
graph <- create_graph()
# Create a node data frame (ndf)
ndf <-
create_node_df(n = 2)
# Add the node data frame to
# the graph object to create
# a graph with nodes
graph <-
graph \%>\%
add_node_df(
node_df = ndf)
# Inspect the graph's ndf
graph \%>\% get_node_df()
# Create another ndf
ndf_2 <-
create_node_df(n = 3)
# Add the second node data
# frame to the graph object
# to add more nodes with
# attributes to the graph
graph <-
graph \%>\%
add_node_df(
node_df = ndf_2)
# View the graph's internal
# node data frame using the
# `get_node_df()` function
graph \%>\% get_node_df()
}
|
context("tbl_df/grouped_df")
test_that("grouped_df returns tbl_df if no groups", {
df <- grouped_df(mtcars, list())
expect_equal(class(df), c("tbl_df", "tbl", "data.frame"))
})
test_that("[ never drops", {
mtcars2 <- tbl_df(mtcars)
expect_is(mtcars2[, 1], "data.frame")
expect_is(mtcars2[, 1], "tbl_df")
expect_equal(mtcars2[, 1], mtcars2[1])
})
test_that("[ with 0 cols creates correct row names (#656)", {
zero_row <- iris %>% tbl_df() %>% `[`(, 0)
expect_is(zero_row, "tbl_df")
expect_equal(nrow(zero_row), 150)
expect_equal(ncol(zero_row), 0)
expect_output(print(zero_row), "[150 x 0]", fixed = TRUE)
})
test_that("[ with 0 cols creates correct row names (#656)", {
zero_row <- iris %>% tbl_df() %>% `[`(0)
expect_is(zero_row, "tbl_df")
expect_equal(nrow(zero_row), 150)
expect_equal(ncol(zero_row), 0)
expect_output(print(zero_row), "[150 x 0]", fixed = TRUE)
})
| /tests/testthat/test-tbl-df.r | no_license | mattwg/dplyr | R | false | false | 904 | r | context("tbl_df/grouped_df")
test_that("grouped_df returns tbl_df if no groups", {
df <- grouped_df(mtcars, list())
expect_equal(class(df), c("tbl_df", "tbl", "data.frame"))
})
test_that("[ never drops", {
mtcars2 <- tbl_df(mtcars)
expect_is(mtcars2[, 1], "data.frame")
expect_is(mtcars2[, 1], "tbl_df")
expect_equal(mtcars2[, 1], mtcars2[1])
})
test_that("[ with 0 cols creates correct row names (#656)", {
zero_row <- iris %>% tbl_df() %>% `[`(, 0)
expect_is(zero_row, "tbl_df")
expect_equal(nrow(zero_row), 150)
expect_equal(ncol(zero_row), 0)
expect_output(print(zero_row), "[150 x 0]", fixed = TRUE)
})
test_that("[ with 0 cols creates correct row names (#656)", {
zero_row <- iris %>% tbl_df() %>% `[`(0)
expect_is(zero_row, "tbl_df")
expect_equal(nrow(zero_row), 150)
expect_equal(ncol(zero_row), 0)
expect_output(print(zero_row), "[150 x 0]", fixed = TRUE)
})
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
QRADM <- function(xr, yr, ta, rhor, lambdar, iter, intercept, M, penalty, a) {
.Call('_QRADMM_QRADM', PACKAGE = 'QRADMM', xr, yr, ta, rhor, lambdar, iter, intercept, M, penalty, a)
}
QRADMMCPP <- function(x, y, tau, rho, lambda, iter, intercept, penalty, a, lambda1, lambda2) {
.Call('_QRADMM_QRADMMCPP', PACKAGE = 'QRADMM', x, y, tau, rho, lambda, iter, intercept, penalty, a, lambda1, lambda2)
}
| /R/RcppExports.R | no_license | DawnGnius/QRADMM | R | false | false | 535 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
QRADM <- function(xr, yr, ta, rhor, lambdar, iter, intercept, M, penalty, a) {
.Call('_QRADMM_QRADM', PACKAGE = 'QRADMM', xr, yr, ta, rhor, lambdar, iter, intercept, M, penalty, a)
}
QRADMMCPP <- function(x, y, tau, rho, lambda, iter, intercept, penalty, a, lambda1, lambda2) {
.Call('_QRADMM_QRADMMCPP', PACKAGE = 'QRADMM', x, y, tau, rho, lambda, iter, intercept, penalty, a, lambda1, lambda2)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mirror.R
\name{mirror}
\alias{mirror}
\alias{mirror.coords}
\alias{mirror.pdb}
\title{Reflexion of Atomic Coordinates}
\usage{
mirror(...)
\method{mirror}{coords}(x, p1, p2 = NULL, p3 = NULL, mask = TRUE,
cryst1 = NULL, ...)
\method{mirror}{pdb}(x, p1, p2 = NULL, p3 = NULL, mask = TRUE,
cryst1 = x$cryst1, ...)
}
\arguments{
\item{x}{an R object containing atomic coordinates.}
\item{p1}{a numeric vector of length 3 containing the coordinates of the
first point defining the reflexion plan. Can also be a 3x3 matrix or
data.frame containing by row \code{p1}, \code{p2} and \code{p3}.}
\item{p2}{a numeric vector of length 3 containing the coordinates of the
second point defining the reflexion plan.}
\item{p3}{a numeric vector of length 3 containing the coordinates of the
thrid point defining the reflexion plan.}
\item{mask}{a logical vector indicating the set of coordinates to which to
apply the reflexion.}
\item{cryst1}{an object of class \sQuote{cryst1} use to convert fractional
into Cartesian coordinates when need.}
\item{\dots}{further arguments passed to or from other methods.}
}
\value{
An object of the same class as \code{x} with reflected coordinates.
}
\description{
Perform a reflexion (or mirror) operation on atomic coordinates with respect
to a given reflexion plan.
}
\details{
\code{mirror} is generic functions. Method for objects of class
\sQuote{coords} first convert the coordinates into Cartesian coordinates
using \code{cryst1} if needed. Once reflected, the coordinates are
reconverted back to the orginal basis set using again \code{cryst1}. Method
for objects of class \sQuote{pdb} first extract coordinates from the object
using the function \code{coords}, perform the reflection, and update the
coordinates of the \sQuote{pdb} object using the function \code{coords<-}.
}
\examples{
# First lets read a pdb file
x <- read.pdb(system.file("examples/PCBM_ODCB.pdb",package="Rpdb"))
cell <- cell.coords(x)
visualize(x, mode = NULL)
# Mirror operation with respect to the ab-plan
visualize(mirror(x, rep(0,3), p1=cell[,"a"], p2=cell[,"b"]), mode = NULL)
# Mirror operation with respect to the ab-plan for residue 1
visualize(mirror(x, rep(0,3), p1=cell[,"a"], p2=cell[,"b"], mask=x$atoms$resid==1), mode = NULL)
}
\seealso{
Helper functions for reflection with respect to a given Cartesian
plan or a plan defined by two lattice vectors:\cr \code{\link{Mxy}},
\code{\link{Myz}}, \code{\link{Mzx}}, \code{\link{Mab}}, \code{\link{Mbc}},
\code{\link{Mca}}\cr Passing from Cartesian to fractional coordinates (or Vis
Versa):\cr \code{\link{xyz2abc}}, \code{\link{abc2xyz}}
}
\keyword{manip}
| /man/mirror.Rd | no_license | cran/Rpdb | R | false | true | 2,720 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mirror.R
\name{mirror}
\alias{mirror}
\alias{mirror.coords}
\alias{mirror.pdb}
\title{Reflexion of Atomic Coordinates}
\usage{
mirror(...)
\method{mirror}{coords}(x, p1, p2 = NULL, p3 = NULL, mask = TRUE,
cryst1 = NULL, ...)
\method{mirror}{pdb}(x, p1, p2 = NULL, p3 = NULL, mask = TRUE,
cryst1 = x$cryst1, ...)
}
\arguments{
\item{x}{an R object containing atomic coordinates.}
\item{p1}{a numeric vector of length 3 containing the coordinates of the
first point defining the reflexion plan. Can also be a 3x3 matrix or
data.frame containing by row \code{p1}, \code{p2} and \code{p3}.}
\item{p2}{a numeric vector of length 3 containing the coordinates of the
second point defining the reflexion plan.}
\item{p3}{a numeric vector of length 3 containing the coordinates of the
thrid point defining the reflexion plan.}
\item{mask}{a logical vector indicating the set of coordinates to which to
apply the reflexion.}
\item{cryst1}{an object of class \sQuote{cryst1} use to convert fractional
into Cartesian coordinates when need.}
\item{\dots}{further arguments passed to or from other methods.}
}
\value{
An object of the same class as \code{x} with reflected coordinates.
}
\description{
Perform a reflexion (or mirror) operation on atomic coordinates with respect
to a given reflexion plan.
}
\details{
\code{mirror} is generic functions. Method for objects of class
\sQuote{coords} first convert the coordinates into Cartesian coordinates
using \code{cryst1} if needed. Once reflected, the coordinates are
reconverted back to the orginal basis set using again \code{cryst1}. Method
for objects of class \sQuote{pdb} first extract coordinates from the object
using the function \code{coords}, perform the reflection, and update the
coordinates of the \sQuote{pdb} object using the function \code{coords<-}.
}
\examples{
# First lets read a pdb file
x <- read.pdb(system.file("examples/PCBM_ODCB.pdb",package="Rpdb"))
cell <- cell.coords(x)
visualize(x, mode = NULL)
# Mirror operation with respect to the ab-plan
visualize(mirror(x, rep(0,3), p1=cell[,"a"], p2=cell[,"b"]), mode = NULL)
# Mirror operation with respect to the ab-plan for residue 1
visualize(mirror(x, rep(0,3), p1=cell[,"a"], p2=cell[,"b"], mask=x$atoms$resid==1), mode = NULL)
}
\seealso{
Helper functions for reflection with respect to a given Cartesian
plan or a plan defined by two lattice vectors:\cr \code{\link{Mxy}},
\code{\link{Myz}}, \code{\link{Mzx}}, \code{\link{Mab}}, \code{\link{Mbc}},
\code{\link{Mca}}\cr Passing from Cartesian to fractional coordinates (or Vis
Versa):\cr \code{\link{xyz2abc}}, \code{\link{abc2xyz}}
}
\keyword{manip}
|
library(data.table)
Dataset<-fread("household_power_consumption.txt")
subSetData <- Dataset[Dataset$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalReactivePower <- as.numeric(subSetData$Global_reactive_power)
globalActivePower <- as.numeric(subSetData$Global_active_power)
voltage <- as.numeric(subSetData$Voltage)
subMetering1 <- as.numeric(subSetData$Sub_metering_1)
subMetering2 <- as.numeric(subSetData$Sub_metering_2)
subMetering3 <- as.numeric(subSetData$Sub_metering_3)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off() | /plot4.R | no_license | Akshay-1024/ExData_Plotting1 | R | false | false | 1,202 | r | library(data.table)
Dataset<-fread("household_power_consumption.txt")
subSetData <- Dataset[Dataset$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalReactivePower <- as.numeric(subSetData$Global_reactive_power)
globalActivePower <- as.numeric(subSetData$Global_active_power)
voltage <- as.numeric(subSetData$Voltage)
subMetering1 <- as.numeric(subSetData$Sub_metering_1)
subMetering2 <- as.numeric(subSetData$Sub_metering_2)
subMetering3 <- as.numeric(subSetData$Sub_metering_3)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off() |
.io = import('ebits/io')
.files = list(
MASTER_LIST = "20140320_MASTER_LIST.ro",
ANALYSIS_SET = "",
BASAL_EXPRESSION = "BASAL_EXPRESSION_12062013v2.ro",
DRUG_PROPS = "DRUG_ANALYSIS_SET_20150123.rdata",
DRUG_IC50 = "v17a_IC50s.Rdata",
DRUG_AUC = "v17a_AUCs.Rdata",
INTOGEN_DRIVERS = "cancer_drivers_5_2_2014.ro",
CONC = "SCREEN_CONC.RData",
NGS_BEM = "NGS_BEM_COSMIC_NURIAS_26022014.ro"
)
get = function(id) .io$load(module_file('cache', .files[[id]], mustWork=TRUE))
| /gdsc/file.r | no_license | ipstone/data | R | false | false | 505 | r | .io = import('ebits/io')
.files = list(
MASTER_LIST = "20140320_MASTER_LIST.ro",
ANALYSIS_SET = "",
BASAL_EXPRESSION = "BASAL_EXPRESSION_12062013v2.ro",
DRUG_PROPS = "DRUG_ANALYSIS_SET_20150123.rdata",
DRUG_IC50 = "v17a_IC50s.Rdata",
DRUG_AUC = "v17a_AUCs.Rdata",
INTOGEN_DRIVERS = "cancer_drivers_5_2_2014.ro",
CONC = "SCREEN_CONC.RData",
NGS_BEM = "NGS_BEM_COSMIC_NURIAS_26022014.ro"
)
get = function(id) .io$load(module_file('cache', .files[[id]], mustWork=TRUE))
|
##########################################################################
# This script reads in:
# 1. Madison Work Area Characteristic (WAC) Data
# 2. Selects the tract ID and total jobs columns
# 3. Madison Residence Area Characteristic (RAC) Data
# 4. Selects the tract ID and total jobs columns
# 5. Joins them.
#
##########################################################################
# Read in WAC Data
MD_WAC_file <- file.path(data_directory,
"LODES/wi_wac_S000_JT00_2017.csv.gz")
MD_WAC <- read_csv(MD_WAC_file) %>%
dplyr::select(geocode = w_geocode, C000) %>%
mutate(geocode = as.character(substr(geocode, 1, 11))) %>%
group_by(geocode) %>%
summarize(jobs_in_tract = sum(C000, na.rm = TRUE)) %>%
filter(geocode %in% MD_Census_geoinfo$GEOID) # from MD - 20 - Collect Census Data
# Read in RAC Data
MD_RAC_file <- file.path(data_directory,
"LODES/wi_rac_S000_JT00_2017.csv.gz")
MD_RAC <- read_csv(MD_RAC_file) %>%
dplyr::select(geocode = h_geocode, C000) %>%
mutate(geocode = as.character(substr(geocode, 1, 11))) %>%
group_by(geocode) %>%
summarize(workers_in_tract = sum(C000, na.rm = TRUE)) %>%
filter(geocode %in% MD_Census_geoinfo$GEOID) # from MD - 20 - Collect Census Data
# Join them
MD_LODES <- left_join(MD_WAC, MD_RAC, by = c("geocode"))
MD_LODES_RDS <- file.path(data_directory,
"~RData/Philadelphia/MD_LODES")
# saveRDS(MD_LODES,
# file = MD_LODES_RDS)
| /~code/Madison/MD - 03 - LODE data (Close).R | no_license | Louisville-Scooters/Practicum | R | false | false | 1,501 | r | ##########################################################################
# This script reads in:
# 1. Madison Work Area Characteristic (WAC) Data
# 2. Selects the tract ID and total jobs columns
# 3. Madison Residence Area Characteristic (RAC) Data
# 4. Selects the tract ID and total jobs columns
# 5. Joins them.
#
##########################################################################
# Read in WAC Data
MD_WAC_file <- file.path(data_directory,
"LODES/wi_wac_S000_JT00_2017.csv.gz")
MD_WAC <- read_csv(MD_WAC_file) %>%
dplyr::select(geocode = w_geocode, C000) %>%
mutate(geocode = as.character(substr(geocode, 1, 11))) %>%
group_by(geocode) %>%
summarize(jobs_in_tract = sum(C000, na.rm = TRUE)) %>%
filter(geocode %in% MD_Census_geoinfo$GEOID) # from MD - 20 - Collect Census Data
# Read in RAC Data
MD_RAC_file <- file.path(data_directory,
"LODES/wi_rac_S000_JT00_2017.csv.gz")
MD_RAC <- read_csv(MD_RAC_file) %>%
dplyr::select(geocode = h_geocode, C000) %>%
mutate(geocode = as.character(substr(geocode, 1, 11))) %>%
group_by(geocode) %>%
summarize(workers_in_tract = sum(C000, na.rm = TRUE)) %>%
filter(geocode %in% MD_Census_geoinfo$GEOID) # from MD - 20 - Collect Census Data
# Join them
MD_LODES <- left_join(MD_WAC, MD_RAC, by = c("geocode"))
MD_LODES_RDS <- file.path(data_directory,
"~RData/Philadelphia/MD_LODES")
# saveRDS(MD_LODES,
# file = MD_LODES_RDS)
|
# xlsx包中的函数read.xlsx()可以读取Excel文件
# 安装引用xlsx需要安装Java放在百度云软件/Java
# 算了这里也放一份吧 JavaJRE\jre-8u91-windows-x64.exe
#install.packages("xlsx")
#library(xlsx)
# 使用read.xlsx()加载工作表时,既可以通过序数参数sheetIndex来指定,
# 也可以用工作表名参数sheetName来指定
# read.xlsx("test.xlsx", sheetIndex=2) read.xlsx("test.xlsx", sheetName="test")
#data <- read.xlsx("test.xlsx", 1)
# 如果要阅读老版本的excel文件(xls)gdata包提供了函数read.xls();需要安装perl之类太麻烦
# 可以直接用read.xlsx()来阅读xls文件
install.packages("gdata")
library(gdata)
data <- read.xlsx("testxls.xls", 1)
data$名字
# SPSS文件 还没用过呢 先不了解了
| /DemoTwo/DemoTwo.r | no_license | UglyMelon007/RTestDemo | R | false | false | 773 | r | # xlsx包中的函数read.xlsx()可以读取Excel文件
# 安装引用xlsx需要安装Java放在百度云软件/Java
# 算了这里也放一份吧 JavaJRE\jre-8u91-windows-x64.exe
#install.packages("xlsx")
#library(xlsx)
# 使用read.xlsx()加载工作表时,既可以通过序数参数sheetIndex来指定,
# 也可以用工作表名参数sheetName来指定
# read.xlsx("test.xlsx", sheetIndex=2) read.xlsx("test.xlsx", sheetName="test")
#data <- read.xlsx("test.xlsx", 1)
# 如果要阅读老版本的excel文件(xls)gdata包提供了函数read.xls();需要安装perl之类太麻烦
# 可以直接用read.xlsx()来阅读xls文件
install.packages("gdata")
library(gdata)
data <- read.xlsx("testxls.xls", 1)
data$名字
# SPSS文件 还没用过呢 先不了解了
|
\name{O3prep}
\alias{O3prep}
\title{
Identify outliers for different combinations of variables
}
\description{
Check the dataset and parameters prior to analysis. Identify outliers for the variable combinations and methods/tolerance levels specified. Prepare input for the two plotting functions \code{O3plotT} and \code{O3plotM}.
}
\usage{
O3prep(data, k1=1, K=ncol(data), method="HDo", tols=0.05, boxplotLimits=6,
tolHDo=0.05, tolPCS=0.01, tolBAC=0.001, toladj=0.05, tolDDC=0.01, tolMCD=0.000001)
}
\arguments{
\item{data}{
dataset to be checked for outliers
}
\item{k1}{
lowest number of variables in a combination
}
\item{K}{
highest number of variables in a combination
}
\item{method}{
method(s) used for identifying outliers (up to six can be used)
}
\item{tols}{
outlier tolerance level(s) (up to three can be used if only one method is specified, otherwise just one). For consistent use of the argument, it is transformed for some of the methods. See details below of how the argument is applied for each approach.
}
\item{boxplotLimits}{
boxplot limit(s) used if a method does not apply for a single variable (there should be the same number of boxplotLimits as tols)
}
\item{tolHDo}{
an individual outlier tolerance level for the HDoutliers method. The default in \pkg{HDoutliers}, alpha, is 0.05.
}
\item{tolPCS}{
an individual outlier tolerance level for the FastPCS method. This equals (1-alpha) for the argument in \pkg{FastPCS}, where the default is 0.5.
}
\item{tolBAC}{
an individual outlier tolerance level for the mvBACON method. The default for alpha in \pkg{robustX} is 0.95. This seems high, but it is divided by n, the dataset size.
}
\item{toladj}{
an individual outlier tolerance level for the adjOutlyingness method. This equals (1-alpha.cutoff) for the argument in \pkg{robustbase}, where the default is 0.75.
}
\item{tolDDC}{
an individual outlier tolerance level for the DectectDeviatingCells method. This equals (1-tolProb) for the argument in \pkg{cellWise}, where the default is 0.99.
}
\item{tolMCD}{
an individual outlier tolerance level for the covMcd method. The default is 0.025 (based on the help page for plot.mcd in \pkg{robustbase}). This is NOT the alpha argument in \command{covMcd}, which is used for determining subset size and set to 0.9 in OutliersO3.
}
}
\details{
To check outliers for all possible combinations of variables choose k1=1 and K=number of variables in the dataset (the default).
The optional methods are "HDo" \command{HDoutliers} (from \pkg{HDoutliers}), "PCS" \command{FastPCS} (\pkg{FastPCS}), "BAC" \command{mvBACON} (\pkg{robustX}), "adjOut" \command{adjOutlyingness} (\pkg{robustbase}), "DDC" \command{DectectDeviatingCells} (\pkg{Cellwise}), "MCD" \command{covMcd} (\pkg{robustbase}). References for all these methods can be found on their help pages, linked below.
If only one method is specified, then up to three tolerance levels (tols) and three boxplot limits (boxplotLimits) can be specified.
\code{tol} is the argument determining outlyingness and should be set low, as in \command{HDoutliers} and \command{mvBACON}, where it is called alpha, and in \command{covMcd}. For the other methods \code{(1-tol)} is used. In \command{DectectDeviatingCells} the argument is called tolProb. Using the same tolerance level for all methods does not make them directly comparable, which is why it is recommended to set them individually when drawing a comparative O3 plot. The defaults suggested on the methods' help pages mostly found too many outliers and so other defaults have been set. Users need to decide for themselves, possibly dependent on the dataset they are analysing.
Methods "HDo" and "adjOut", and "MCD" can analyse single variables. For the other methods boxplot limits are used for single variables and any case > (Q3 + boxplotLimit*IQR) or < (Q1 - boxplotLimit*IQR) is classed an outlier, where \code{boxplotLimit} is the limit specified.
}
\value{
\item{data}{
the dataset analysed
}
\item{nw}{
the number of variable combinations analysed
}
\item{mm}{
the outlier methods used
}
\item{tols}{
the individual tolerance levels for the outlier methods used (if more than one), otherwise up to 3 tolerance levels used for one method
}
\item{outList}{
a list for each method/tolerance level, and within that for each variable combination, of the variables used, the indices of cases identified as outliers, and the outlier distances for all cases in the dataset.
}
}
\author{
Antony Unwin unwin@math.uni-augsburg.de
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{\code{\link[HDoutliers]{HDoutliers}} in \pkg{HDoutliers}, \code{\link[FastPCS]{FastPCS}} in \pkg{FastPCS}, \code{\link[robustX]{mvBACON}} in \pkg{robustX}, \code{\link[robustbase]{adjOutlyingness}} in \pkg{robustbase}, \code{\link[cellWise]{DetectDeviatingCells}} in \pkg{cellWise}, \code{\link[robustbase]{covMcd}} in \pkg{robustbase}
}
\examples{
a0 <- O3prep(stackloss, method="PCS", tols=0.05, boxplotLimits=3)
b0 <- O3prep(stackloss, method=c("BAC", "adjOut"), k1=2, tols=0.01, boxplotLimits=6)
\dontrun{
a1 <- O3prep(stackloss, method="PCS", tols=c(0.1, 0.05, 0.01), boxplotLimits=c(3, 6, 10))
b1 <- O3prep(stackloss, method=c("HDo", "BAC", "DDC"), tolHDo=0.025, tolBAC=0.01, tolDDC=0.05)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/O3prep.Rd | no_license | pridiltal/OutliersO3 | R | false | false | 5,547 | rd | \name{O3prep}
\alias{O3prep}
\title{
Identify outliers for different combinations of variables
}
\description{
Check the dataset and parameters prior to analysis. Identify outliers for the variable combinations and methods/tolerance levels specified. Prepare input for the two plotting functions \code{O3plotT} and \code{O3plotM}.
}
\usage{
O3prep(data, k1=1, K=ncol(data), method="HDo", tols=0.05, boxplotLimits=6,
tolHDo=0.05, tolPCS=0.01, tolBAC=0.001, toladj=0.05, tolDDC=0.01, tolMCD=0.000001)
}
\arguments{
\item{data}{
dataset to be checked for outliers
}
\item{k1}{
lowest number of variables in a combination
}
\item{K}{
highest number of variables in a combination
}
\item{method}{
method(s) used for identifying outliers (up to six can be used)
}
\item{tols}{
outlier tolerance level(s) (up to three can be used if only one method is specified, otherwise just one). For consistent use of the argument, it is transformed for some of the methods. See details below of how the argument is applied for each approach.
}
\item{boxplotLimits}{
boxplot limit(s) used if a method does not apply for a single variable (there should be the same number of boxplotLimits as tols)
}
\item{tolHDo}{
an individual outlier tolerance level for the HDoutliers method. The default in \pkg{HDoutliers}, alpha, is 0.05.
}
\item{tolPCS}{
an individual outlier tolerance level for the FastPCS method. This equals (1-alpha) for the argument in \pkg{FastPCS}, where the default is 0.5.
}
\item{tolBAC}{
an individual outlier tolerance level for the mvBACON method. The default for alpha in \pkg{robustX} is 0.95. This seems high, but it is divided by n, the dataset size.
}
\item{toladj}{
an individual outlier tolerance level for the adjOutlyingness method. This equals (1-alpha.cutoff) for the argument in \pkg{robustbase}, where the default is 0.75.
}
\item{tolDDC}{
an individual outlier tolerance level for the DectectDeviatingCells method. This equals (1-tolProb) for the argument in \pkg{cellWise}, where the default is 0.99.
}
\item{tolMCD}{
an individual outlier tolerance level for the covMcd method. The default is 0.025 (based on the help page for plot.mcd in \pkg{robustbase}). This is NOT the alpha argument in \command{covMcd}, which is used for determining subset size and set to 0.9 in OutliersO3.
}
}
\details{
To check outliers for all possible combinations of variables choose k1=1 and K=number of variables in the dataset (the default).
The optional methods are "HDo" \command{HDoutliers} (from \pkg{HDoutliers}), "PCS" \command{FastPCS} (\pkg{FastPCS}), "BAC" \command{mvBACON} (\pkg{robustX}), "adjOut" \command{adjOutlyingness} (\pkg{robustbase}), "DDC" \command{DectectDeviatingCells} (\pkg{Cellwise}), "MCD" \command{covMcd} (\pkg{robustbase}). References for all these methods can be found on their help pages, linked below.
If only one method is specified, then up to three tolerance levels (tols) and three boxplot limits (boxplotLimits) can be specified.
\code{tol} is the argument determining outlyingness and should be set low, as in \command{HDoutliers} and \command{mvBACON}, where it is called alpha, and in \command{covMcd}. For the other methods \code{(1-tol)} is used. In \command{DectectDeviatingCells} the argument is called tolProb. Using the same tolerance level for all methods does not make them directly comparable, which is why it is recommended to set them individually when drawing a comparative O3 plot. The defaults suggested on the methods' help pages mostly found too many outliers and so other defaults have been set. Users need to decide for themselves, possibly dependent on the dataset they are analysing.
Methods "HDo" and "adjOut", and "MCD" can analyse single variables. For the other methods boxplot limits are used for single variables and any case > (Q3 + boxplotLimit*IQR) or < (Q1 - boxplotLimit*IQR) is classed an outlier, where \code{boxplotLimit} is the limit specified.
}
\value{
\item{data}{
the dataset analysed
}
\item{nw}{
the number of variable combinations analysed
}
\item{mm}{
the outlier methods used
}
\item{tols}{
the individual tolerance levels for the outlier methods used (if more than one), otherwise up to 3 tolerance levels used for one method
}
\item{outList}{
a list for each method/tolerance level, and within that for each variable combination, of the variables used, the indices of cases identified as outliers, and the outlier distances for all cases in the dataset.
}
}
\author{
Antony Unwin unwin@math.uni-augsburg.de
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{\code{\link[HDoutliers]{HDoutliers}} in \pkg{HDoutliers}, \code{\link[FastPCS]{FastPCS}} in \pkg{FastPCS}, \code{\link[robustX]{mvBACON}} in \pkg{robustX}, \code{\link[robustbase]{adjOutlyingness}} in \pkg{robustbase}, \code{\link[cellWise]{DetectDeviatingCells}} in \pkg{cellWise}, \code{\link[robustbase]{covMcd}} in \pkg{robustbase}
}
\examples{
a0 <- O3prep(stackloss, method="PCS", tols=0.05, boxplotLimits=3)
b0 <- O3prep(stackloss, method=c("BAC", "adjOut"), k1=2, tols=0.01, boxplotLimits=6)
\dontrun{
a1 <- O3prep(stackloss, method="PCS", tols=c(0.1, 0.05, 0.01), boxplotLimits=c(3, 6, 10))
b1 <- O3prep(stackloss, method=c("HDo", "BAC", "DDC"), tolHDo=0.025, tolBAC=0.01, tolDDC=0.05)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
library(gridExtra)
library(tidyverse)
library(knitr)
library(ggfortify)
library(lindia)
library(leaps)
library(car)
library(GGally)
library(caret)
library(dplyr)
border <- read.csv("Border_Crossing_Entry_Data.csv")
head(border)
border <- border %>%
mutate(Year = format(as.Date(border$Date, format="%d/%m/%Y"),"%Y"),
ModYear = case_when(Year >= 1996 & Year <= 2000 ~ "1996 - 2000",
Year >= 2001 & Year <= 2005 ~ "2001 - 2005",
Year >= 2006 & Year <= 2010 ~ "2006 - 2010",
Year >= 2011 & Year <= 2015 ~ "2011 - 2015",
Year >= 2016 & Year <= 2020 ~ "2016 - 2020")) %>%
filter(!Value %in% c(0)) %>%
drop_na()
head(border)
ggplot(border, aes(x = State, y = Value, col = State)) +
geom_bar(stat = "identity") +
facet_wrap(~ModYear) +
theme(axis.text.x = element_text(angle = 60))
ggplot(border, aes(x = State, y = Value, col = State)) +
geom_bar(stat = "identity") +
facet_wrap(~Border) +
theme(axis.text.x = element_text(angle = 45))
border.full <- lm(Value ~ Port.Name + State + Port.Code + Border + Measure + Year, data = border)
summary(border.full)
border.reduced <- lm(Value ~ Port.Name + Port.Code + Border + Measure + Year, data = border)
summary(border.reduced)
border.backward <- step(border.reduced, direction = "backward")
summary(border.backward)
autoplot(border.backward)
gg_boxcox(border.backward)
border.fit <- lm(log(Value) ~ Port.Name + Measure + Year, data = border)
summary(border.fit)
autoplot(border.fit)
set.seed(536255)
train_control <- trainControl(method = "cv", number = 5)
border.cv <- train(log(Value) ~ Port.Name + Measure + Year, data = border, trControl = train_control, method="lm")
print(border.cv)
new_data = data.frame(Port.Name = "Santa Teresa", Measure = "Buses", Year = "2010")
exp(predict(border.cv, newdata = new_data))
test.df <- data.frame(filter(border, Port.Name == "Santa Teresa" & Measure == "Buses" & Year == "2010"))
mean(test.df$Value) | /us-border-entry.R | no_license | varunvasudeva1/usborderentry | R | false | false | 2,036 | r | library(gridExtra)
library(tidyverse)
library(knitr)
library(ggfortify)
library(lindia)
library(leaps)
library(car)
library(GGally)
library(caret)
library(dplyr)
border <- read.csv("Border_Crossing_Entry_Data.csv")
head(border)
border <- border %>%
mutate(Year = format(as.Date(border$Date, format="%d/%m/%Y"),"%Y"),
ModYear = case_when(Year >= 1996 & Year <= 2000 ~ "1996 - 2000",
Year >= 2001 & Year <= 2005 ~ "2001 - 2005",
Year >= 2006 & Year <= 2010 ~ "2006 - 2010",
Year >= 2011 & Year <= 2015 ~ "2011 - 2015",
Year >= 2016 & Year <= 2020 ~ "2016 - 2020")) %>%
filter(!Value %in% c(0)) %>%
drop_na()
head(border)
ggplot(border, aes(x = State, y = Value, col = State)) +
geom_bar(stat = "identity") +
facet_wrap(~ModYear) +
theme(axis.text.x = element_text(angle = 60))
ggplot(border, aes(x = State, y = Value, col = State)) +
geom_bar(stat = "identity") +
facet_wrap(~Border) +
theme(axis.text.x = element_text(angle = 45))
border.full <- lm(Value ~ Port.Name + State + Port.Code + Border + Measure + Year, data = border)
summary(border.full)
border.reduced <- lm(Value ~ Port.Name + Port.Code + Border + Measure + Year, data = border)
summary(border.reduced)
border.backward <- step(border.reduced, direction = "backward")
summary(border.backward)
autoplot(border.backward)
gg_boxcox(border.backward)
border.fit <- lm(log(Value) ~ Port.Name + Measure + Year, data = border)
summary(border.fit)
autoplot(border.fit)
set.seed(536255)
train_control <- trainControl(method = "cv", number = 5)
border.cv <- train(log(Value) ~ Port.Name + Measure + Year, data = border, trControl = train_control, method="lm")
print(border.cv)
new_data = data.frame(Port.Name = "Santa Teresa", Measure = "Buses", Year = "2010")
exp(predict(border.cv, newdata = new_data))
test.df <- data.frame(filter(border, Port.Name == "Santa Teresa" & Measure == "Buses" & Year == "2010"))
mean(test.df$Value) |
library(tidyr)
library(dplyr)
library(tidycensus)
library(sf)
library(ggplot2)
grafton <- get_acs(year=2018, geography = "tract", state="NH", county="Grafton", geometry = TRUE,
variables = c(median.household.income = "B19013_001"))
other.vars <- get_acs(year=2018, geography = "tract", state="NH", county="Grafton", geometry = TRUE,
variables = c(med.age = "B01002H_001",
total = "B03002_001",
white = "B03002_003"))
grafton$med.age = other.vars$estimate[other.vars$variable == "med.age"]
grafton$prop.white = other.vars$estimate[other.vars$variable == "white"] /
other.vars$estimate[other.vars$variable == "total"]
grafton$town = c("Bethlehem, Sugar Hill, Franconia",
"Canaan",
"Ellsworth, Rumney, Campton",
"Holderness and Ashland",
"Orange, Alexandria, Grafton",
"Littleton",
"Haverhill",
"Bridgewater and Bristol",
"Enfield",
"Hanover",
"Lincoln, Livermore, Waterville Valley, Thorton",
"Benton, Woodstock, Warren",
"Piermont, Orford, Wentworth",
"Lebanon",
"Lyme, Dorchester, Groton, Hebron",
"Monroe, Lyman, Lisbon, Bath, Landaff, Easton",
"Hanover",
"Plymouth",
"Lebanon")
town.votes = read.csv("C:/Users/katri/Downloads/grafton.pres.2020.csv")
votes.2016 = read.csv("C:/Users/katri/Downloads/grafton2016.csv")
colnames(town.votes) <- c("town.name", "Trump","Biden","Other","total","X2")
colnames(votes.2016) <- c("town.name", "Trump","Biden","Other","total","X2")
for (i in 2:6){
votes.2016[,i] = as.numeric(gsub(",", "", votes.2016[,i]))
}
votes.2016$total[is.na(votes.2016$total)]= 0
votes.2016 = votes.2016[!is.na(town.votes$Biden),]
votes.2016$Other = votes.2016$Other + votes.2016$total + votes.2016$X2
votes.2016$total = votes.2016$Biden + votes.2016$Trump + votes.2016$Other
votes.2016$town.name[grepl("Lebanon", votes.2016$town.name, fixed=T)] = "Lebanon"
town.votes$Biden = as.numeric(gsub(",", "", town.votes$Biden))
town.votes$Trump = as.numeric(gsub(",", "", town.votes$Trump))
town.votes$Other = as.numeric(gsub(",", "", town.votes$Other))
town.votes$total = town.votes$Biden + town.votes$Trump + town.votes$Other
town.votes = town.votes[!is.na(town.votes$Biden),]
town.votes$town.name[grepl("Lebanon", town.votes$town.name, fixed=T)] = "Lebanon"
grafton$biden.votes = 0
grafton$trump.votes = 0
grafton$other.votes = 0
for (i in 1:19){
for (j in 1:dim(town.votes)[1]) {
if (grepl(town.votes$town.name[j], grafton$town[i], fixed=T)){
grafton$biden.votes.2020[i] = grafton$biden.votes[i] + town.votes$Biden[j]
grafton$trump.votes.2020[i] = grafton$trump.votes[i] + town.votes$Trump[j]
grafton$other.votes.2020[i] = grafton$other.votes[i] + town.votes$Other[j]
grafton$biden.votes.2016[i] = grafton$biden.votes[i] + votes.2016$Biden[j]
grafton$trump.votes.2016[i] = grafton$trump.votes[i] + votes.2016$Trump[j]
grafton$other.votes.2016[i] = grafton$other.votes[i] + votes.2016$Other[j]
}
}
}
grafton$total.2020 = grafton$biden.votes.2020 + grafton$trump.votes.2020 + grafton$other.votes.2020
grafton$biden.share.2020 = grafton$biden.votes.2020 / grafton$total.2020
grafton$trump.share.2020 = grafton$trump.votes.2020 / grafton$total.2020
grafton$other.share.2020 = grafton$other.votes.2020 / grafton$total.2020
grafton$biden.margin.2020 = (grafton$biden.share.2020 - grafton$trump.share.2020) * 100
grafton$total.2016 = grafton$biden.votes.2016 + grafton$trump.votes.2016 + grafton$other.votes.2016
grafton$biden.share.2016 = grafton$biden.votes.2016 / grafton$total.2016
grafton$trump.share.2016 = grafton$trump.votes.2016 / grafton$total.2016
grafton$other.share.2016 = grafton$other.votes.2016 / grafton$total.2016
grafton$biden.margin.2016 = (grafton$biden.share.2016 - grafton$trump.share.2016) * 100
grafton$biden.margin.diff = grafton$biden.margin.2020 - grafton$biden.margin.2016
grafton.data = grafton[,7:18]
grafton.data$med.hh.inc = grafton$estimate
grafton.data$high.inc = grafton.data$med.hh.inc > 60000
grafton.data = grafton.data[-c(17,19),]
ed <- get_acs(year=2018, geography = "tract", state="NH", county="Grafton", geometry = TRUE,
variables = c(total = "B06009_001",
some.college = "B06009_004",
college = "B06009_005",
grad.degree = "B06009_006"))
grafton$ed = (ed$estimate[ed$variable == "some.college"] +
ed$estimate[ed$variable == "college"] +
ed$estimate[ed$variable == "grad.degree"]) /
ed$estimate[ed$variable == "total"]
grafton.data$ed = grafton$ed[-c(17,19)]
grafton.data$total.2020 = grafton.data$biden.votes.2020 + grafton.data$trump.votes.2020 + grafton.data$other.votes.2020
| /Data_cleaning.R | no_license | kwheelan/GraftonVoting2020 | R | false | false | 5,162 | r | library(tidyr)
library(dplyr)
library(tidycensus)
library(sf)
library(ggplot2)
grafton <- get_acs(year=2018, geography = "tract", state="NH", county="Grafton", geometry = TRUE,
variables = c(median.household.income = "B19013_001"))
other.vars <- get_acs(year=2018, geography = "tract", state="NH", county="Grafton", geometry = TRUE,
variables = c(med.age = "B01002H_001",
total = "B03002_001",
white = "B03002_003"))
grafton$med.age = other.vars$estimate[other.vars$variable == "med.age"]
grafton$prop.white = other.vars$estimate[other.vars$variable == "white"] /
other.vars$estimate[other.vars$variable == "total"]
grafton$town = c("Bethlehem, Sugar Hill, Franconia",
"Canaan",
"Ellsworth, Rumney, Campton",
"Holderness and Ashland",
"Orange, Alexandria, Grafton",
"Littleton",
"Haverhill",
"Bridgewater and Bristol",
"Enfield",
"Hanover",
"Lincoln, Livermore, Waterville Valley, Thorton",
"Benton, Woodstock, Warren",
"Piermont, Orford, Wentworth",
"Lebanon",
"Lyme, Dorchester, Groton, Hebron",
"Monroe, Lyman, Lisbon, Bath, Landaff, Easton",
"Hanover",
"Plymouth",
"Lebanon")
town.votes = read.csv("C:/Users/katri/Downloads/grafton.pres.2020.csv")
votes.2016 = read.csv("C:/Users/katri/Downloads/grafton2016.csv")
colnames(town.votes) <- c("town.name", "Trump","Biden","Other","total","X2")
colnames(votes.2016) <- c("town.name", "Trump","Biden","Other","total","X2")
for (i in 2:6){
votes.2016[,i] = as.numeric(gsub(",", "", votes.2016[,i]))
}
votes.2016$total[is.na(votes.2016$total)]= 0
votes.2016 = votes.2016[!is.na(town.votes$Biden),]
votes.2016$Other = votes.2016$Other + votes.2016$total + votes.2016$X2
votes.2016$total = votes.2016$Biden + votes.2016$Trump + votes.2016$Other
votes.2016$town.name[grepl("Lebanon", votes.2016$town.name, fixed=T)] = "Lebanon"
town.votes$Biden = as.numeric(gsub(",", "", town.votes$Biden))
town.votes$Trump = as.numeric(gsub(",", "", town.votes$Trump))
town.votes$Other = as.numeric(gsub(",", "", town.votes$Other))
town.votes$total = town.votes$Biden + town.votes$Trump + town.votes$Other
town.votes = town.votes[!is.na(town.votes$Biden),]
town.votes$town.name[grepl("Lebanon", town.votes$town.name, fixed=T)] = "Lebanon"
grafton$biden.votes = 0
grafton$trump.votes = 0
grafton$other.votes = 0
for (i in 1:19){
for (j in 1:dim(town.votes)[1]) {
if (grepl(town.votes$town.name[j], grafton$town[i], fixed=T)){
grafton$biden.votes.2020[i] = grafton$biden.votes[i] + town.votes$Biden[j]
grafton$trump.votes.2020[i] = grafton$trump.votes[i] + town.votes$Trump[j]
grafton$other.votes.2020[i] = grafton$other.votes[i] + town.votes$Other[j]
grafton$biden.votes.2016[i] = grafton$biden.votes[i] + votes.2016$Biden[j]
grafton$trump.votes.2016[i] = grafton$trump.votes[i] + votes.2016$Trump[j]
grafton$other.votes.2016[i] = grafton$other.votes[i] + votes.2016$Other[j]
}
}
}
grafton$total.2020 = grafton$biden.votes.2020 + grafton$trump.votes.2020 + grafton$other.votes.2020
grafton$biden.share.2020 = grafton$biden.votes.2020 / grafton$total.2020
grafton$trump.share.2020 = grafton$trump.votes.2020 / grafton$total.2020
grafton$other.share.2020 = grafton$other.votes.2020 / grafton$total.2020
grafton$biden.margin.2020 = (grafton$biden.share.2020 - grafton$trump.share.2020) * 100
grafton$total.2016 = grafton$biden.votes.2016 + grafton$trump.votes.2016 + grafton$other.votes.2016
grafton$biden.share.2016 = grafton$biden.votes.2016 / grafton$total.2016
grafton$trump.share.2016 = grafton$trump.votes.2016 / grafton$total.2016
grafton$other.share.2016 = grafton$other.votes.2016 / grafton$total.2016
grafton$biden.margin.2016 = (grafton$biden.share.2016 - grafton$trump.share.2016) * 100
grafton$biden.margin.diff = grafton$biden.margin.2020 - grafton$biden.margin.2016
grafton.data = grafton[,7:18]
grafton.data$med.hh.inc = grafton$estimate
grafton.data$high.inc = grafton.data$med.hh.inc > 60000
grafton.data = grafton.data[-c(17,19),]
ed <- get_acs(year=2018, geography = "tract", state="NH", county="Grafton", geometry = TRUE,
variables = c(total = "B06009_001",
some.college = "B06009_004",
college = "B06009_005",
grad.degree = "B06009_006"))
grafton$ed = (ed$estimate[ed$variable == "some.college"] +
ed$estimate[ed$variable == "college"] +
ed$estimate[ed$variable == "grad.degree"]) /
ed$estimate[ed$variable == "total"]
grafton.data$ed = grafton$ed[-c(17,19)]
grafton.data$total.2020 = grafton.data$biden.votes.2020 + grafton.data$trump.votes.2020 + grafton.data$other.votes.2020
|
\name{ellipseCA}
\alias{ellipseCA}
\title{Draw confidence ellipses in CA}
\description{
Draw confidence ellipses in CA around rows and/or columns.
}
\usage{
ellipseCA (x, ellipse=c("col","row"), method="multinomial", nbsample=100,
axes=c(1,2), xlim=NULL, ylim=NULL, col.row="red", col.col="blue",
col.row.ell=col.row, col.col.ell=col.col, \dots)
}
\arguments{
\item{x}{an object of class CA}
\item{ellipse}{a vector of character that defines which ellipses are drawn}
\item{method}{the method to construct ellipses (see details below)}
\item{nbsample}{number of samples drawn to evaluate the stability of the points}
\item{axes}{a length 2 vector specifying the components to plot}
\item{xlim}{range for the plotted 'x' values, defaulting to the range of the finite values of 'x'}
\item{ylim}{range for the plotted 'y' values, defaulting to the range of the finite values of 'y'}
\item{col.row}{a color for the rows points}
\item{col.col}{a color for columns points}
\item{col.row.ell}{a color for the ellipses of rows points (the color "transparent" can be used if an ellipse should not be drawn)}
\item{col.col.ell}{a color for the ellipses of columns points (the color "transparent" can be used if an ellipse should not be drawn)}
\item{\dots}{further arguments passed to or from the plot.CA function, such as title, invisible, ...}
}
\value{
Returns the factor map with the joint plot of CA with ellipses around some elements.
}
\details{
With \code{method="multinomial"}, the table X with the active elements is taken as a reference. Then new data tables are drawn in the following way:
N (the sum of X) values are drawn from a multinomial distribution with theoretical frequencies equals to the values in the cells divided by N.\cr
With \code{method="boot"}, the values are bootstrapped row by row: Ni (the sum of row i in the X table) values are taken in a vector with Nij equals to column j (with j varying from 1 to J).
Thus \code{nbsample} new datasets are drawn and projected as supplementary rows and/or supplementary columns. Then confidence ellipses are drawn for each elements thanks to the \code{nbsample} supplementary points.
}
\references{
Lebart, L., Morineau, A. and Piron, M. (1995) Statistique exploratoire multidimensionnelle, \emph{Dunod}.
}
\author{Francois Husson \email{Francois.Husson@agrocampus-ouest.fr}}
\seealso{ \code{\link{plot.CA}}, \code{\link{CA}}}
\examples{
data(children)
res.ca <- CA (children, col.sup = 6:8, row.sup = 15:18)
## Ellipses for all the active elements
ellipseCA(res.ca)
## Ellipses around some columns only
ellipseCA(res.ca,ellipse="col",col.col.ell=c(rep("blue",2),rep("transparent",3)),
invisible=c("row.sup","col.sup"))
}
\keyword{multivariate}
| /man/ellipseCA.Rd | no_license | Rpython1/FactoMineR | R | false | false | 2,754 | rd | \name{ellipseCA}
\alias{ellipseCA}
\title{Draw confidence ellipses in CA}
\description{
Draw confidence ellipses in CA around rows and/or columns.
}
\usage{
ellipseCA (x, ellipse=c("col","row"), method="multinomial", nbsample=100,
axes=c(1,2), xlim=NULL, ylim=NULL, col.row="red", col.col="blue",
col.row.ell=col.row, col.col.ell=col.col, \dots)
}
\arguments{
\item{x}{an object of class CA}
\item{ellipse}{a vector of character that defines which ellipses are drawn}
\item{method}{the method to construct ellipses (see details below)}
\item{nbsample}{number of samples drawn to evaluate the stability of the points}
\item{axes}{a length 2 vector specifying the components to plot}
\item{xlim}{range for the plotted 'x' values, defaulting to the range of the finite values of 'x'}
\item{ylim}{range for the plotted 'y' values, defaulting to the range of the finite values of 'y'}
\item{col.row}{a color for the rows points}
\item{col.col}{a color for columns points}
\item{col.row.ell}{a color for the ellipses of rows points (the color "transparent" can be used if an ellipse should not be drawn)}
\item{col.col.ell}{a color for the ellipses of columns points (the color "transparent" can be used if an ellipse should not be drawn)}
\item{\dots}{further arguments passed to or from the plot.CA function, such as title, invisible, ...}
}
\value{
Returns the factor map with the joint plot of CA with ellipses around some elements.
}
\details{
With \code{method="multinomial"}, the table X with the active elements is taken as a reference. Then new data tables are drawn in the following way:
N (the sum of X) values are drawn from a multinomial distribution with theoretical frequencies equals to the values in the cells divided by N.\cr
With \code{method="boot"}, the values are bootstrapped row by row: Ni (the sum of row i in the X table) values are taken in a vector with Nij equals to column j (with j varying from 1 to J).
Thus \code{nbsample} new datasets are drawn and projected as supplementary rows and/or supplementary columns. Then confidence ellipses are drawn for each elements thanks to the \code{nbsample} supplementary points.
}
\references{
Lebart, L., Morineau, A. and Piron, M. (1995) Statistique exploratoire multidimensionnelle, \emph{Dunod}.
}
\author{Francois Husson \email{Francois.Husson@agrocampus-ouest.fr}}
\seealso{ \code{\link{plot.CA}}, \code{\link{CA}}}
\examples{
data(children)
res.ca <- CA (children, col.sup = 6:8, row.sup = 15:18)
## Ellipses for all the active elements
ellipseCA(res.ca)
## Ellipses around some columns only
ellipseCA(res.ca,ellipse="col",col.col.ell=c(rep("blue",2),rep("transparent",3)),
invisible=c("row.sup","col.sup"))
}
\keyword{multivariate}
|
#' Symmetric AES encryption
#'
#' Low-level symmetric encryption/decryption using the AES block cipher in CBC mode.
#' The key is a raw vector, for example a hash of some secret. When no shared
#' secret is available, a random key can be used which is exchanged via an
#' asymettric protocol such as RSA. See \code{\link{rsa_encrypt}} for a worked example
#' or \code{\link{encrypt_envelope}} for a high-level wrapper combining AES and RSA.
#'
#' @export
#' @rdname aes_cbc
#' @name aes_cbc
#' @param data raw vector or path to file with data to encrypt or decrypt
#' @param key raw vector of length 16, 24 or 32, e.g. the hash of a shared secret
#' @param iv raw vector of length 16 (aes block size) or NULL. The initialization vector
#' is not secret but should be random
#' @examples # aes-256 requires 32 byte key
#' passphrase <- charToRaw("This is super secret")
#' key <- sha256(passphrase)
#'
#' # symmetric encryption uses same key for decryption
#' x <- serialize(iris, NULL)
#' y <- aes_cbc_encrypt(x, key = key)
#' x2 <- aes_cbc_decrypt(y, key = key)
#' stopifnot(identical(x, x2))
aes_cbc_encrypt <- function(data, key, iv = rand_bytes(16)){
data <- path_or_raw(data)
if(!is.raw(data))
stop("The 'data' must path to a file or raw vector")
out <- aes_cbc(data, key, iv, TRUE)
structure(out, iv = iv)
}
#' @export
#' @rdname aes_cbc
aes_cbc_decrypt <- function(data, key, iv = attr(data, "iv")){
data <- path_or_raw(data)
if(!is.raw(data))
stop("The 'data' must be raw vector")
aes_cbc(data, key, iv, FALSE);
}
#' @useDynLib openssl R_aes_cbc
aes_cbc <- function(x, key, iv = NULL, encrypt){
if(is.null(iv)){
iv <- as.raw(rep(0, 16))
}
stopifnot(is.raw(x))
stopifnot(is.raw(key))
stopifnot(is.raw(iv))
stopifnot(is.logical(encrypt))
.Call(R_aes_cbc, x, key, iv, encrypt)
}
| /openssl/R/aes.R | no_license | ingted/R-Examples | R | false | false | 1,826 | r | #' Symmetric AES encryption
#'
#' Low-level symmetric encryption/decryption using the AES block cipher in CBC mode.
#' The key is a raw vector, for example a hash of some secret. When no shared
#' secret is available, a random key can be used which is exchanged via an
#' asymettric protocol such as RSA. See \code{\link{rsa_encrypt}} for a worked example
#' or \code{\link{encrypt_envelope}} for a high-level wrapper combining AES and RSA.
#'
#' @export
#' @rdname aes_cbc
#' @name aes_cbc
#' @param data raw vector or path to file with data to encrypt or decrypt
#' @param key raw vector of length 16, 24 or 32, e.g. the hash of a shared secret
#' @param iv raw vector of length 16 (aes block size) or NULL. The initialization vector
#' is not secret but should be random
#' @examples # aes-256 requires 32 byte key
#' passphrase <- charToRaw("This is super secret")
#' key <- sha256(passphrase)
#'
#' # symmetric encryption uses same key for decryption
#' x <- serialize(iris, NULL)
#' y <- aes_cbc_encrypt(x, key = key)
#' x2 <- aes_cbc_decrypt(y, key = key)
#' stopifnot(identical(x, x2))
aes_cbc_encrypt <- function(data, key, iv = rand_bytes(16)){
data <- path_or_raw(data)
if(!is.raw(data))
stop("The 'data' must path to a file or raw vector")
out <- aes_cbc(data, key, iv, TRUE)
structure(out, iv = iv)
}
#' @export
#' @rdname aes_cbc
aes_cbc_decrypt <- function(data, key, iv = attr(data, "iv")){
data <- path_or_raw(data)
if(!is.raw(data))
stop("The 'data' must be raw vector")
aes_cbc(data, key, iv, FALSE);
}
#' @useDynLib openssl R_aes_cbc
aes_cbc <- function(x, key, iv = NULL, encrypt){
if(is.null(iv)){
iv <- as.raw(rep(0, 16))
}
stopifnot(is.raw(x))
stopifnot(is.raw(key))
stopifnot(is.raw(iv))
stopifnot(is.logical(encrypt))
.Call(R_aes_cbc, x, key, iv, encrypt)
}
|
getwd()
setwd('~/Documents/Github/morphological-processing-project/')
# load libraries
library('WpmWithLdl')
library('readr')
library('dplyr')
library('MASS')
# load data ('/data')
# file1: chars_pic.csv ----------------------character-bitmap filename mapping created by match_char_pic.py
# file2: dict.csv----------------dict created by read_dict.py
# file3: Sbig.rda------------------------semantic matrix
# character-bitmap filename mapping
c2p = read_csv('data/chars_pic.csv')
wlist = c2p$char
flist = c2p$pic
cues = get_graphic_cues(allwords=wlist,
fileNames=flist,
folder='data/files_char',
ext=".png")
write.csv(cues,file = 'data/chars_hog.csv')
dict = read_csv('data/dict.csv', col_names = TRUE)
unique(dict$Str) # 6 unique structures
one <- dict[is.na(dict$Com2),] # 408 characters with 1 component
one_problematic <- one[one$Str != 'SG',] # with 1 component but not SG structure
one_clean <- setdiff(one, one_problematic)
two <- dict[!is.na(dict$Com2) & is.na(dict$Com3),]
two_problematic <- two[two$Str == 'SG',] # with 2 components but SG structure
two_clean <- setdiff(two, two_problematic) # 2 components only AND not SG structure
twochar <- two_clean$Cha
more <- dict[!is.na(dict$Com3),]
sg <- read_csv('data/sg.csv', col_names = TRUE, col_types = cols('SeR'=col_character(),
'PhR' = col_character(),
'Com2' = col_character(),
'Com3' = col_character(),
'Com4' = col_character(),
'Com5' = col_character()))
overlap <- intersect(one_clean$Cha, sg$Cha)
one_new <- union(one_clean[one_clean$Cha != overlap,], sg)
distinct(one_new)
dict_new <- union(one_new, two_clean)
save(dict_new, file = 'dict_new.Rda')
write.csv(dict_new, file = 'data/dict_new.csv', fileEncoding='UTF-8', row.names = FALSE, quote = FALSE)
dict = read_csv('data/dict_new.csv', col_names = TRUE)
rad_po <- read.csv('data/rad_po_data.csv')
ch_rad_po = read.csv('data/chars_rad_po_matrix.csv', header = TRUE, row.names = 1)
ch_hog = read.csv('data/chars_hog_matrix.csv', header = TRUE, row.names = 1)
load('data/Sbig.rda')
ch_rad_po_temp <- tibble::rownames_to_column(ch_rad_po, "Cha")
ch_hog_temp <- tibble::rownames_to_column(ch_hog, "Cha")
Sbig_temp <- tibble::rownames_to_column(as.data.frame(Sbig), "Cha")
Cha_common = Reduce(intersect, list(ch_rad_po_temp$Cha, ch_hog_temp$Cha,Sbig_temp$Cha))
ch_rad_po_clean_temp <- ch_rad_po_temp %>%
filter(Cha %in% Cha_common) %>%
arrange(Cha)
ch_rad_po_clean <- ch_rad_po_clean_temp[,-1]
ch_hog_clean_temp <- ch_hog_temp %>%
filter(Cha %in% Cha_common) %>%
arrange(Cha)
ch_hog_clean <- ch_hog_clean_temp[,-1]
Sbig_clean_temp <- Sbig_temp %>%
filter(Cha %in% Cha_common) %>%
arrange(Cha)
Sbig_clean <- Sbig_clean_temp[,-1]
ch_hog_clean_matrix <- as.matrix(ch_hog_clean)
ch_rad_po_clean_matrix <- as.matrix(ch_rad_po_clean)
Sbig_clean_matrix <- as.matrix(Sbig_clean)
ch_hog_clean_matrix_removed <- ch_hog_clean_matrix[-c(1314, 3670),]
ch_rad_po_clean_matrix_removed <- ch_rad_po_clean_matrix[-c(1314, 3670),]
Sbig_clean_matrix_removed <- Sbig_clean_matrix[-c(1314, 3670),]
save(ch_hog_clean_matrix, file = 'ch_hog_clean_matrix.Rda')
save(ch_rad_po_clean_matrix, file = 'ch_rad_po_clean_matrix.Rda')
save(Sbig_clean_matrix, file = 'Sbig_clean_matrix.Rda')
write.table(ch_hog_clean_matrix, file = 'hog_matrix.csv', quote = FALSE, sep = ",", row.names = FALSE, col.names = FALSE, fileEncoding = "UTF-8")
write.table(ch_rad_po_clean_matrix, file = 'rad_matrix.csv', quote = FALSE, sep = ",", row.names = FALSE, col.names = FALSE, fileEncoding = "UTF-8")
write.table(Sbig_clean_matrix, file = 'Sbig_matrix.csv', quote = FALSE, sep = ",", row.names = FALSE, col.names = FALSE, fileEncoding = "UTF-8")
write.table(ch_hog_clean_matrix_removed, file = 'hog_matrix_removed.csv', quote = FALSE, sep = ",", row.names = FALSE, col.names = FALSE, fileEncoding = "UTF-8")
write.table(ch_rad_po_clean_matrix_removed, file = 'rad_matrix_removed.csv', quote = FALSE, sep = ",", row.names = FALSE, col.names = FALSE, fileEncoding = "UTF-8")
write.table(Sbig_clean_matrix_removed, file = 'Sbig_matrix_removed.csv', quote = FALSE, sep = ",", row.names = FALSE, col.names = FALSE, fileEncoding = "UTF-8")
| /script.R | no_license | MegamindHenry/morphological-processing-project | R | false | false | 4,523 | r | getwd()
setwd('~/Documents/Github/morphological-processing-project/')
# load libraries
library('WpmWithLdl')
library('readr')
library('dplyr')
library('MASS')
# load data ('/data')
# file1: chars_pic.csv ----------------------character-bitmap filename mapping created by match_char_pic.py
# file2: dict.csv----------------dict created by read_dict.py
# file3: Sbig.rda------------------------semantic matrix
# character-bitmap filename mapping
c2p = read_csv('data/chars_pic.csv')
wlist = c2p$char
flist = c2p$pic
cues = get_graphic_cues(allwords=wlist,
fileNames=flist,
folder='data/files_char',
ext=".png")
write.csv(cues,file = 'data/chars_hog.csv')
dict = read_csv('data/dict.csv', col_names = TRUE)
unique(dict$Str) # 6 unique structures
one <- dict[is.na(dict$Com2),] # 408 characters with 1 component
one_problematic <- one[one$Str != 'SG',] # with 1 component but not SG structure
one_clean <- setdiff(one, one_problematic)
two <- dict[!is.na(dict$Com2) & is.na(dict$Com3),]
two_problematic <- two[two$Str == 'SG',] # with 2 components but SG structure
two_clean <- setdiff(two, two_problematic) # 2 components only AND not SG structure
twochar <- two_clean$Cha
more <- dict[!is.na(dict$Com3),]
sg <- read_csv('data/sg.csv', col_names = TRUE, col_types = cols('SeR'=col_character(),
'PhR' = col_character(),
'Com2' = col_character(),
'Com3' = col_character(),
'Com4' = col_character(),
'Com5' = col_character()))
overlap <- intersect(one_clean$Cha, sg$Cha)
one_new <- union(one_clean[one_clean$Cha != overlap,], sg)
distinct(one_new)
dict_new <- union(one_new, two_clean)
save(dict_new, file = 'dict_new.Rda')
write.csv(dict_new, file = 'data/dict_new.csv', fileEncoding='UTF-8', row.names = FALSE, quote = FALSE)
dict = read_csv('data/dict_new.csv', col_names = TRUE)
rad_po <- read.csv('data/rad_po_data.csv')
ch_rad_po = read.csv('data/chars_rad_po_matrix.csv', header = TRUE, row.names = 1)
ch_hog = read.csv('data/chars_hog_matrix.csv', header = TRUE, row.names = 1)
load('data/Sbig.rda')
ch_rad_po_temp <- tibble::rownames_to_column(ch_rad_po, "Cha")
ch_hog_temp <- tibble::rownames_to_column(ch_hog, "Cha")
Sbig_temp <- tibble::rownames_to_column(as.data.frame(Sbig), "Cha")
Cha_common = Reduce(intersect, list(ch_rad_po_temp$Cha, ch_hog_temp$Cha,Sbig_temp$Cha))
ch_rad_po_clean_temp <- ch_rad_po_temp %>%
filter(Cha %in% Cha_common) %>%
arrange(Cha)
ch_rad_po_clean <- ch_rad_po_clean_temp[,-1]
ch_hog_clean_temp <- ch_hog_temp %>%
filter(Cha %in% Cha_common) %>%
arrange(Cha)
ch_hog_clean <- ch_hog_clean_temp[,-1]
Sbig_clean_temp <- Sbig_temp %>%
filter(Cha %in% Cha_common) %>%
arrange(Cha)
Sbig_clean <- Sbig_clean_temp[,-1]
ch_hog_clean_matrix <- as.matrix(ch_hog_clean)
ch_rad_po_clean_matrix <- as.matrix(ch_rad_po_clean)
Sbig_clean_matrix <- as.matrix(Sbig_clean)
ch_hog_clean_matrix_removed <- ch_hog_clean_matrix[-c(1314, 3670),]
ch_rad_po_clean_matrix_removed <- ch_rad_po_clean_matrix[-c(1314, 3670),]
Sbig_clean_matrix_removed <- Sbig_clean_matrix[-c(1314, 3670),]
save(ch_hog_clean_matrix, file = 'ch_hog_clean_matrix.Rda')
save(ch_rad_po_clean_matrix, file = 'ch_rad_po_clean_matrix.Rda')
save(Sbig_clean_matrix, file = 'Sbig_clean_matrix.Rda')
write.table(ch_hog_clean_matrix, file = 'hog_matrix.csv', quote = FALSE, sep = ",", row.names = FALSE, col.names = FALSE, fileEncoding = "UTF-8")
write.table(ch_rad_po_clean_matrix, file = 'rad_matrix.csv', quote = FALSE, sep = ",", row.names = FALSE, col.names = FALSE, fileEncoding = "UTF-8")
write.table(Sbig_clean_matrix, file = 'Sbig_matrix.csv', quote = FALSE, sep = ",", row.names = FALSE, col.names = FALSE, fileEncoding = "UTF-8")
write.table(ch_hog_clean_matrix_removed, file = 'hog_matrix_removed.csv', quote = FALSE, sep = ",", row.names = FALSE, col.names = FALSE, fileEncoding = "UTF-8")
write.table(ch_rad_po_clean_matrix_removed, file = 'rad_matrix_removed.csv', quote = FALSE, sep = ",", row.names = FALSE, col.names = FALSE, fileEncoding = "UTF-8")
write.table(Sbig_clean_matrix_removed, file = 'Sbig_matrix_removed.csv', quote = FALSE, sep = ",", row.names = FALSE, col.names = FALSE, fileEncoding = "UTF-8")
|
power <- read.table(file.choose(), sep=";", header=TRUE, stringsAsFactors=FALSE, na.strings="?")
power$Date<-as.Date(power$Date, format="%d/%m/%Y")
df<-subset(power, Date=="2007-02-01" | Date=="2007-02-02")
df$Global_active_power<-as.numeric(df$Global_active_power)
df<-transform(df, timestamp=as.POSIXct(paste(Date, Time)), "%Y-%m-%d %H:%M:%S")
df$Sub_metering_1<-as.numeric(as.character(df$Sub_metering_1))
df$Sub_metering_2<-as.numeric(as.character(df$Sub_metering_2))
df$Sub_metering_3<-as.numeric(as.character(df$Sub_metering_3))
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(df,{
plot(Global_active_power~timestamp, type="l", ylab="Global Active Power", xlab="")
plot(Voltage~timestamp, type="l", ylab="Voltage",xlab="datetime")
plot(Sub_metering_1~timestamp,type="l",xlab="", ylab="Energy sub metering")
lines(Sub_metering_2~timestamp, col="red")
lines(Sub_metering_3~timestamp, col="blue")
legend("topright",col=c("black","red","blue"),c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=1,lwd=2, bty="n")
plot(Global_reactive_power~timestamp, type="l", ylab="Global Reactive Power", xlab="datetime")
})
dev.copy(png, file="plot4.png", width=480, height=480)
dev.off()
cat("plot4.png has been saved in", getwd())
| /plot4.R | no_license | Mahsa-Kiani/ExData_Plotting1 | R | false | false | 1,238 | r | power <- read.table(file.choose(), sep=";", header=TRUE, stringsAsFactors=FALSE, na.strings="?")
power$Date<-as.Date(power$Date, format="%d/%m/%Y")
df<-subset(power, Date=="2007-02-01" | Date=="2007-02-02")
df$Global_active_power<-as.numeric(df$Global_active_power)
df<-transform(df, timestamp=as.POSIXct(paste(Date, Time)), "%Y-%m-%d %H:%M:%S")
df$Sub_metering_1<-as.numeric(as.character(df$Sub_metering_1))
df$Sub_metering_2<-as.numeric(as.character(df$Sub_metering_2))
df$Sub_metering_3<-as.numeric(as.character(df$Sub_metering_3))
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(df,{
plot(Global_active_power~timestamp, type="l", ylab="Global Active Power", xlab="")
plot(Voltage~timestamp, type="l", ylab="Voltage",xlab="datetime")
plot(Sub_metering_1~timestamp,type="l",xlab="", ylab="Energy sub metering")
lines(Sub_metering_2~timestamp, col="red")
lines(Sub_metering_3~timestamp, col="blue")
legend("topright",col=c("black","red","blue"),c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=1,lwd=2, bty="n")
plot(Global_reactive_power~timestamp, type="l", ylab="Global Reactive Power", xlab="datetime")
})
dev.copy(png, file="plot4.png", width=480, height=480)
dev.off()
cat("plot4.png has been saved in", getwd())
|
\name{ciTableMean}
\alias{ciTableMean}
\title{
Table of Confidence Intervals for Mean or Difference Between Two Means
}
\description{
Create a table of confidence intervals for the mean of a normal distribution
or the difference between two means following Bacchetti (2010), by varying
the estimated standard deviation and the estimated mean or differene between
the two estimated means given the sample size(s).
}
\usage{
ciTableMean(n1 = 10, n2 = n1, diff.or.mean = 2:0, SD = 1:3,
sample.type = "two.sample", ci.type = "two.sided", conf.level = 0.95,
digits = 1)
}
\arguments{
\item{n1}{
positive integer greater than 1 specifying the sample size when \cr
\code{sample.type="one.sample"} or the sample size for group 1 when \cr
\code{sample.type="two.sample"}. The default value is \code{n1=10}.
}
\item{n2}{
positive integer greater than 1 specifying the sample size for group 2 when
\code{sample.type="two.sample"}. The default value is \code{n2=n1}, i.e.,
equal sample sizes. This argument is ignored when \code{sample.type="one.sample"}.
}
\item{diff.or.mean}{
numeric vector indicating either the assumed difference between the two sample means
when \code{sample.type="two.sample"} or the value of the sample mean when
\code{sample.type="one.sample"}. The default value is \code{diff.or.mean=2:0}.
Missing (\code{NA}), undefined (\code{NaN}), an infinite
(\code{-Inf}, \code{Inf}) values are not allowed.
}
\item{SD}{
numeric vector of positive values specifying the assumed estimated standard
deviation. The default value is \code{SD=1:3}.
Missing (\code{NA}), undefined (\code{NaN}), an infinite
(\code{-Inf}, \code{Inf}) values are not allowed.
}
\item{sample.type}{
character string specifying whether to create confidence intervals for the difference
between two means (\code{sample.type="two.sample"}; the default) or confidence
intervals for a single mean (\code{sample.type="one.sample"}).
}
\item{ci.type}{
character string indicating what kind of confidence interval to compute. The
possible values are \code{"two-sided"} (the default), \code{"lower"}, and
\code{"upper"}.
}
\item{conf.level}{
a scalar between 0 and 1 indicating the confidence level of the confidence interval.
The default value is \code{conf.level=0.95}.
}
\item{digits}{
positive integer indicating how many decimal places to display in the table. The
default value is \code{digits=1}.
}
}
\details{
Following Bacchetti (2010) (see NOTE below), the function \code{ciTableMean}
allows you to perform sensitivity analyses while planning future studies by
producing a table of confidence intervals for the mean or the difference
between two means by varying the estimated standard deviation and the
estimated mean or differene between the two estimated means given the
sample size(s).
\emph{One Sample Case} (\code{sample.type="one.sample"}) \cr
Let \eqn{\underline{x} = (x_1, x_2, \ldots, x_n)} be a vector of
\eqn{n} observations from an \link[stats:Normal]{normal (Gaussian) distribution} with
parameters \code{mean=}\eqn{\mu} and \code{sd=}\eqn{\sigma}.
The usual confidence interval for \eqn{\mu} is constructed as follows.
If \code{ci.type="two-sided"}, the \eqn{(1-\alpha)}100\% confidence interval for
\eqn{\mu} is given by:
\deqn{[\hat{\mu} - t(n-1, 1-\alpha/2) \frac{\hat{\sigma}}{\sqrt{n}}, \, \hat{\mu} + t(n-1, 1-\alpha/2) \frac{\hat{\sigma}}{\sqrt{n}}] \;\;\;\;\;\; (1)}
where
\deqn{\hat{\mu} = \bar{x} = \frac{1}{n} \sum_{i=1}^n x_i \;\;\;\;\;\; (2)}
\deqn{\hat{\sigma}^2 = s^2 = \frac{1}{n-1} \sum_{i=1}^n (x_i - \bar{x})^2 \;\;\;\;\;\; (3)}
and \eqn{t(\nu, p)} is the \eqn{p}'th quantile of
\link[stats:TDist]{Student's t-distribution} with \eqn{\nu} degrees of freedom
(Zar, 2010; Gilbert, 1987; Ott, 1995; Helsel and Hirsch, 1992).
If \code{ci.type="lower"}, the \eqn{(1-\alpha)}100\% confidence interval for
\eqn{\mu} is given by:
\deqn{[\hat{\mu} - t(n-1, 1-\alpha) \frac{\hat{\sigma}}{\sqrt{n}}, \, \infty] \;\;\;\; (4)}
and if \code{ci.type="upper"}, the confidence interval is given by:
\deqn{[-\infty, \, \hat{\mu} + t(n-1, 1-\alpha/2) \frac{\hat{\sigma}}{\sqrt{n}}] \;\;\;\; (5)}
For the one-sample case, the argument \code{n1} corresponds to \eqn{n} in
Equation (1), the argument \cr
\code{diff.or.mean} corresponds to
\eqn{\hat{\mu} = \bar{x}} in Equation (2), and the argument \code{SD} corresponds to
\eqn{\hat{\sigma} = s} in Equation (3).
\cr
\emph{Two Sample Case} (\code{sample.type="two.sample"}) \cr
Let \eqn{\underline{x}_1 = (x_{11}, x_{21}, \ldots, x_{n_11})} be a vector of
\eqn{n_1} observations from an \link[stats:Normal]{normal (Gaussian) distribution}
with parameters \code{mean=}\eqn{\mu_1} and \code{sd=}\eqn{\sigma}, and let
\eqn{\underline{x}_2 = (x_{12}, x_{22}, \ldots, x_{n_22})} be a vector of
\eqn{n_2} observations from an \link[stats:Normal]{normal (Gaussian) distribution}
with parameters \code{mean=}\eqn{\mu_2} and \code{sd=}\eqn{\sigma}.
The usual confidence interval for the difference between the two population means
\eqn{\mu_1 - \mu_2} is constructed as follows.
If \code{ci.type="two-sided"}, the \eqn{(1-\alpha)}100\% confidence interval for
\eqn{\mu_1 - \mu_2} is given by:
\deqn{[(\hat{\mu}_1 - \hat{\mu}_2) - t(n_1 + n_2 -2, 1-\alpha/2) \hat{\sigma}\sqrt{\frac{1}{n_1} + \frac{1}{n_2}}, \; (\hat{\mu}_1 - \hat{\mu}_2) + t(n_1 + n_2 -2, 1-\alpha/2) \hat{\sigma}\sqrt{\frac{1}{n_1} + \frac{1}{n_2}}] \;\;\;\;\;\; (6)}
where
\deqn{\hat{\mu}_1 = \bar{x}_1 = \frac{1}{n_1} \sum_{i=1}^{n_1} x_{i1} \;\;\;\;\;\; (7)}
\deqn{\hat{\mu}_2 = \bar{x}_2 = \frac{1}{n_2} \sum_{i=1}^{n_2} x_{i2} \;\;\;\;\;\; (8)}
\deqn{\hat{\sigma}^2 = s_p^2 = \frac{(n_1-1) s_1^2 + (n_2-1)s_2^2}{n_1 + n_2 - 2} \;\;\;\;\;\; (9)}
\deqn{s_1^2 = \frac{1}{n_1-1} \sum_{i=1}^{n_1} (x_{i1} - \bar{x}_1)^2 \;\;\;\;\;\; (10)}
\deqn{s_2^2 = \frac{1}{n_2-1} \sum_{i=1}^{n_2} (x_{i2} - \bar{x}_2)^2 \;\;\;\;\;\; (11)}
and \eqn{t(\nu, p)} is the \eqn{p}'th quantile of
\link[stats:TDist]{Student's t-distribution} with \eqn{\nu} degrees of freedom
(Zar, 2010; Gilbert, 1987; Ott, 1995; Helsel and Hirsch, 1992).
If \code{ci.type="lower"}, the \eqn{(1-\alpha)}100\% confidence interval for
\eqn{\mu_1 - \mu_2} is given by:
\deqn{[(\hat{\mu}_1 - \hat{\mu}_2) - t(n_1 + n_2 -2, 1-\alpha) \hat{\sigma}\sqrt{\frac{1}{n_1} + \frac{1}{n_2}}, \; \infty] \;\;\;\;\;\; (12)}
and if \code{ci.type="upper"}, the confidence interval is given by:
\deqn{[-\infty, \; (\hat{\mu}_1 - \hat{\mu}_2) - t(n_1 + n_2 -2, 1-\alpha) \hat{\sigma}\sqrt{\frac{1}{n_1} + \frac{1}{n_2}}] \;\;\;\;\;\; (13)}
For the two-sample case, the arguments \code{n1} and \code{n2} correspond to
\eqn{n_1} and \eqn{n_2} in Equation (6), the argument \code{diff.or.mean} corresponds
to \eqn{\hat{\mu_1} - \hat{\mu_2} = \bar{x}_1 - \bar{x}_2} in Equations (7) and (8),
and the argument \code{SD} corresponds to \eqn{\hat{\sigma} = s_p} in Equation (9).
}
\value{
a data frame with the rows varying the standard deviation and the columns
varying the estimated mean or difference between the means. Elements of the
data frame are character strings indicating the confidence intervals.
}
\references{
Bacchetti, P. (2010). Current sample size conventions: Flaws, Harms, and
Alternatives. \emph{BMC Medicine} \bold{8}, 17--23.
Berthouex, P.M., and L.C. Brown. (2002).
\emph{Statistics for Environmental Engineers}. Second Edition.
Lewis Publishers, Boca Raton, FL.
Gilbert, R.O. (1987). \emph{Statistical Methods for Environmental Pollution Monitoring}.
Van Nostrand Reinhold, New York, NY.
Helsel, D.R., and R.M. Hirsch. (1992).
\emph{Statistical Methods in Water Resources Research}.
Elsevier, New York, NY.
Millard, S.P., and N.K. Neerchal. (2001). \emph{Environmental Statistics with S-PLUS}.
CRC Press, Boca Raton, FL.
Ott, W.R. (1995). \emph{Environmental Statistics and Data Analysis}.
Lewis Publishers, Boca Raton, FL.
Zar, J.H. (2010). \emph{Biostatistical Analysis}. Fifth Edition.
Prentice-Hall, Upper Saddle River, NJ.
}
\author{
Steven P. Millard (\email{EnvStats@ProbStatInfo.com})
}
\note{
Bacchetti (2010) presents strong arguments against the current convention in
scientific research for computing sample size that is based on formulas that
use a fixed Type I error (usually 5\%) and a fixed minimal power (often 80\%)
without regard to costs. He notes that a key input to these formulas is a
measure of variability (usually a standard deviation) that is difficult to
measure accurately "unless there is so much preliminary data that the study
isn't really needed." Also, study designers often avoid defining what a
scientifically meaningful difference is by presenting sample size results in
terms of the effect size (i.e., the difference of interest divided by the
elusive standard deviation). Bacchetti (2010) encourages study designers to use
simple tables in a sensitivity analysis to see what results of a study may look
like for low, moderate, and high rates of variability and large, intermediate,
and no underlying differences in the populations or processes being studied.
}
\seealso{
\code{\link{enorm}}, \code{\link{t.test}}, \code{\link{ciTableProp}},
\code{\link{ciNormHalfWidth}}, \code{\link{ciNormN}},
\code{\link{plotCiNormDesign}}.
}
\examples{
# Show how potential confidence intervals for the difference between two means
# will look assuming standard deviations of 1, 2, or 3, differences between
# the two means of 2, 1, or 0, and a sample size of 10 in each group.
ciTableMean()
# Diff=2 Diff=1 Diff=0
#SD=1 [ 1.1, 2.9] [ 0.1, 1.9] [-0.9, 0.9]
#SD=2 [ 0.1, 3.9] [-0.9, 2.9] [-1.9, 1.9]
#SD=3 [-0.8, 4.8] [-1.8, 3.8] [-2.8, 2.8]
#==========
# Show how a potential confidence interval for a mean will look assuming
# standard deviations of 1, 2, or 5, a sample mean of 5, 3, or 1, and
# a sample size of 15.
ciTableMean(n1 = 15, diff.or.mean = c(5, 3, 1), SD = c(1, 2, 5), sample.type = "one")
# Mean=5 Mean=3 Mean=1
#SD=1 [ 4.4, 5.6] [ 2.4, 3.6] [ 0.4, 1.6]
#SD=2 [ 3.9, 6.1] [ 1.9, 4.1] [-0.1, 2.1]
#SD=5 [ 2.2, 7.8] [ 0.2, 5.8] [-1.8, 3.8]
#==========
# The data frame EPA.09.Ex.16.1.sulfate.df contains sulfate concentrations
# (ppm) at one background and one downgradient well. The estimated
# mean and standard deviation for the background well are 536 and 27 ppm,
# respectively, based on a sample size of n = 8 quarterly samples taken over
# 2 years. A two-sided 95% confidence interval for this mean is [514, 559],
# which has a half-width of 23 ppm.
#
# The estimated mean and standard deviation for the downgradient well are
# 608 and 18 ppm, respectively, based on a sample size of n = 6 quarterly
# samples. A two-sided 95% confidence interval for the difference between
# this mean and the background mean is [44, 100] ppm.
#
# Suppose we want to design a future sampling program and are interested in
# the size of the confidence interval for the difference between the two means.
# We will use ciTableMean to generate a table of possible confidence intervals
# by varying the assumed standard deviation and assumed differences between
# the means.
# Look at the data
#-----------------
EPA.09.Ex.16.1.sulfate.df
# Month Year Well.type Sulfate.ppm
#1 Jan 1995 Background 560
#2 Apr 1995 Background 530
#3 Jul 1995 Background 570
#4 Oct 1995 Background 490
#5 Jan 1996 Background 510
#6 Apr 1996 Background 550
#7 Jul 1996 Background 550
#8 Oct 1996 Background 530
#9 Jan 1995 Downgradient NA
#10 Apr 1995 Downgradient NA
#11 Jul 1995 Downgradient 600
#12 Oct 1995 Downgradient 590
#13 Jan 1996 Downgradient 590
#14 Apr 1996 Downgradient 630
#15 Jul 1996 Downgradient 610
#16 Oct 1996 Downgradient 630
# Compute the estimated mean and standard deviation for the
# background well.
#-----------------------------------------------------------
Sulfate.back <- with(EPA.09.Ex.16.1.sulfate.df,
Sulfate.ppm[Well.type == "Background"])
enorm(Sulfate.back, ci = TRUE)
#Results of Distribution Parameter Estimation
#--------------------------------------------
#
#Assumed Distribution: Normal
#
#Estimated Parameter(s): mean = 536.2500
# sd = 26.6927
#
#Estimation Method: mvue
#
#Data: Sulfate.back
#
#Sample Size: 8
#
#Confidence Interval for: mean
#
#Confidence Interval Method: Exact
#
#Confidence Interval Type: two-sided
#
#Confidence Level: 95%
#
#Confidence Interval: LCL = 513.9343
# UCL = 558.5657
# Compute the estimated mean and standard deviation for the
# downgradient well.
#----------------------------------------------------------
Sulfate.down <- with(EPA.09.Ex.16.1.sulfate.df,
Sulfate.ppm[Well.type == "Downgradient"])
enorm(Sulfate.down, ci = TRUE)
#Results of Distribution Parameter Estimation
#--------------------------------------------
#
#Assumed Distribution: Normal
#
#Estimated Parameter(s): mean = 608.33333
# sd = 18.34848
#
#Estimation Method: mvue
#
#Data: Sulfate.down
#
#Sample Size: 6
#
#Number NA/NaN/Inf's: 2
#
#Confidence Interval for: mean
#
#Confidence Interval Method: Exact
#
#Confidence Interval Type: two-sided
#
#Confidence Level: 95%
#
#Confidence Interval: LCL = 589.0778
# UCL = 627.5889
# Compute the estimated difference between the means and the confidence
# interval for the difference:
#----------------------------------------------------------------------
t.test(Sulfate.down, Sulfate.back, var.equal = TRUE)
#Results of Hypothesis Test
#--------------------------
#
#Null Hypothesis: difference in means = 0
#
#Alternative Hypothesis: True difference in means is not equal to 0
#
#Test Name: Two Sample t-test
#
#Estimated Parameter(s): mean of x = 608.3333
# mean of y = 536.2500
#
#Data: Sulfate.down and Sulfate.back
#
#Test Statistic: t = 5.660985
#
#Test Statistic Parameter: df = 12
#
#P-value: 0.0001054306
#
#95% Confidence Interval: LCL = 44.33974
# UCL = 99.82693
# Use ciTableMean to look how the confidence interval for the difference
# between the background and downgradient means in a future study using eight
# quarterly samples at each well varies with assumed value of the pooled standard
# deviation and the observed difference between the sample means.
#--------------------------------------------------------------------------------
# Our current estimate of the pooled standard deviation is 24 ppm:
summary(lm(Sulfate.ppm ~ Well.type, data = EPA.09.Ex.16.1.sulfate.df))$sigma
#[1] 23.57759
# We can see that if this is overly optimistic and in our next study the
# pooled standard deviation is around 50 ppm, then if the observed difference
# between the means is 50 ppm, the lower end of the confidence interval for
# the difference between the two means will include 0, so we may want to
# increase our sample size.
ciTableMean(n1 = 8, n2 = 8, diff = c(100, 50, 0), SD = c(15, 25, 50), digits = 0)
# Diff=100 Diff=50 Diff=0
#SD=15 [ 84, 116] [ 34, 66] [-16, 16]
#SD=25 [ 73, 127] [ 23, 77] [-27, 27]
#SD=50 [ 46, 154] [ -4, 104] [-54, 54]
#==========
# Clean up
#---------
rm(Sulfate.back, Sulfate.down)
}
\keyword{ distribution }
\keyword{ design }
\keyword{ htest }
| /man/ciTableMean.Rd | no_license | alexkowa/EnvStats | R | false | false | 16,885 | rd | \name{ciTableMean}
\alias{ciTableMean}
\title{
Table of Confidence Intervals for Mean or Difference Between Two Means
}
\description{
Create a table of confidence intervals for the mean of a normal distribution
or the difference between two means following Bacchetti (2010), by varying
the estimated standard deviation and the estimated mean or differene between
the two estimated means given the sample size(s).
}
\usage{
ciTableMean(n1 = 10, n2 = n1, diff.or.mean = 2:0, SD = 1:3,
sample.type = "two.sample", ci.type = "two.sided", conf.level = 0.95,
digits = 1)
}
\arguments{
\item{n1}{
positive integer greater than 1 specifying the sample size when \cr
\code{sample.type="one.sample"} or the sample size for group 1 when \cr
\code{sample.type="two.sample"}. The default value is \code{n1=10}.
}
\item{n2}{
positive integer greater than 1 specifying the sample size for group 2 when
\code{sample.type="two.sample"}. The default value is \code{n2=n1}, i.e.,
equal sample sizes. This argument is ignored when \code{sample.type="one.sample"}.
}
\item{diff.or.mean}{
numeric vector indicating either the assumed difference between the two sample means
when \code{sample.type="two.sample"} or the value of the sample mean when
\code{sample.type="one.sample"}. The default value is \code{diff.or.mean=2:0}.
Missing (\code{NA}), undefined (\code{NaN}), an infinite
(\code{-Inf}, \code{Inf}) values are not allowed.
}
\item{SD}{
numeric vector of positive values specifying the assumed estimated standard
deviation. The default value is \code{SD=1:3}.
Missing (\code{NA}), undefined (\code{NaN}), an infinite
(\code{-Inf}, \code{Inf}) values are not allowed.
}
\item{sample.type}{
character string specifying whether to create confidence intervals for the difference
between two means (\code{sample.type="two.sample"}; the default) or confidence
intervals for a single mean (\code{sample.type="one.sample"}).
}
\item{ci.type}{
character string indicating what kind of confidence interval to compute. The
possible values are \code{"two-sided"} (the default), \code{"lower"}, and
\code{"upper"}.
}
\item{conf.level}{
a scalar between 0 and 1 indicating the confidence level of the confidence interval.
The default value is \code{conf.level=0.95}.
}
\item{digits}{
positive integer indicating how many decimal places to display in the table. The
default value is \code{digits=1}.
}
}
\details{
Following Bacchetti (2010) (see NOTE below), the function \code{ciTableMean}
allows you to perform sensitivity analyses while planning future studies by
producing a table of confidence intervals for the mean or the difference
between two means by varying the estimated standard deviation and the
estimated mean or differene between the two estimated means given the
sample size(s).
\emph{One Sample Case} (\code{sample.type="one.sample"}) \cr
Let \eqn{\underline{x} = (x_1, x_2, \ldots, x_n)} be a vector of
\eqn{n} observations from an \link[stats:Normal]{normal (Gaussian) distribution} with
parameters \code{mean=}\eqn{\mu} and \code{sd=}\eqn{\sigma}.
The usual confidence interval for \eqn{\mu} is constructed as follows.
If \code{ci.type="two-sided"}, the \eqn{(1-\alpha)}100\% confidence interval for
\eqn{\mu} is given by:
\deqn{[\hat{\mu} - t(n-1, 1-\alpha/2) \frac{\hat{\sigma}}{\sqrt{n}}, \, \hat{\mu} + t(n-1, 1-\alpha/2) \frac{\hat{\sigma}}{\sqrt{n}}] \;\;\;\;\;\; (1)}
where
\deqn{\hat{\mu} = \bar{x} = \frac{1}{n} \sum_{i=1}^n x_i \;\;\;\;\;\; (2)}
\deqn{\hat{\sigma}^2 = s^2 = \frac{1}{n-1} \sum_{i=1}^n (x_i - \bar{x})^2 \;\;\;\;\;\; (3)}
and \eqn{t(\nu, p)} is the \eqn{p}'th quantile of
\link[stats:TDist]{Student's t-distribution} with \eqn{\nu} degrees of freedom
(Zar, 2010; Gilbert, 1987; Ott, 1995; Helsel and Hirsch, 1992).
If \code{ci.type="lower"}, the \eqn{(1-\alpha)}100\% confidence interval for
\eqn{\mu} is given by:
\deqn{[\hat{\mu} - t(n-1, 1-\alpha) \frac{\hat{\sigma}}{\sqrt{n}}, \, \infty] \;\;\;\; (4)}
and if \code{ci.type="upper"}, the confidence interval is given by:
\deqn{[-\infty, \, \hat{\mu} + t(n-1, 1-\alpha/2) \frac{\hat{\sigma}}{\sqrt{n}}] \;\;\;\; (5)}
For the one-sample case, the argument \code{n1} corresponds to \eqn{n} in
Equation (1), the argument \cr
\code{diff.or.mean} corresponds to
\eqn{\hat{\mu} = \bar{x}} in Equation (2), and the argument \code{SD} corresponds to
\eqn{\hat{\sigma} = s} in Equation (3).
\cr
\emph{Two Sample Case} (\code{sample.type="two.sample"}) \cr
Let \eqn{\underline{x}_1 = (x_{11}, x_{21}, \ldots, x_{n_11})} be a vector of
\eqn{n_1} observations from an \link[stats:Normal]{normal (Gaussian) distribution}
with parameters \code{mean=}\eqn{\mu_1} and \code{sd=}\eqn{\sigma}, and let
\eqn{\underline{x}_2 = (x_{12}, x_{22}, \ldots, x_{n_22})} be a vector of
\eqn{n_2} observations from an \link[stats:Normal]{normal (Gaussian) distribution}
with parameters \code{mean=}\eqn{\mu_2} and \code{sd=}\eqn{\sigma}.
The usual confidence interval for the difference between the two population means
\eqn{\mu_1 - \mu_2} is constructed as follows.
If \code{ci.type="two-sided"}, the \eqn{(1-\alpha)}100\% confidence interval for
\eqn{\mu_1 - \mu_2} is given by:
\deqn{[(\hat{\mu}_1 - \hat{\mu}_2) - t(n_1 + n_2 -2, 1-\alpha/2) \hat{\sigma}\sqrt{\frac{1}{n_1} + \frac{1}{n_2}}, \; (\hat{\mu}_1 - \hat{\mu}_2) + t(n_1 + n_2 -2, 1-\alpha/2) \hat{\sigma}\sqrt{\frac{1}{n_1} + \frac{1}{n_2}}] \;\;\;\;\;\; (6)}
where
\deqn{\hat{\mu}_1 = \bar{x}_1 = \frac{1}{n_1} \sum_{i=1}^{n_1} x_{i1} \;\;\;\;\;\; (7)}
\deqn{\hat{\mu}_2 = \bar{x}_2 = \frac{1}{n_2} \sum_{i=1}^{n_2} x_{i2} \;\;\;\;\;\; (8)}
\deqn{\hat{\sigma}^2 = s_p^2 = \frac{(n_1-1) s_1^2 + (n_2-1)s_2^2}{n_1 + n_2 - 2} \;\;\;\;\;\; (9)}
\deqn{s_1^2 = \frac{1}{n_1-1} \sum_{i=1}^{n_1} (x_{i1} - \bar{x}_1)^2 \;\;\;\;\;\; (10)}
\deqn{s_2^2 = \frac{1}{n_2-1} \sum_{i=1}^{n_2} (x_{i2} - \bar{x}_2)^2 \;\;\;\;\;\; (11)}
and \eqn{t(\nu, p)} is the \eqn{p}'th quantile of
\link[stats:TDist]{Student's t-distribution} with \eqn{\nu} degrees of freedom
(Zar, 2010; Gilbert, 1987; Ott, 1995; Helsel and Hirsch, 1992).
If \code{ci.type="lower"}, the \eqn{(1-\alpha)}100\% confidence interval for
\eqn{\mu_1 - \mu_2} is given by:
\deqn{[(\hat{\mu}_1 - \hat{\mu}_2) - t(n_1 + n_2 -2, 1-\alpha) \hat{\sigma}\sqrt{\frac{1}{n_1} + \frac{1}{n_2}}, \; \infty] \;\;\;\;\;\; (12)}
and if \code{ci.type="upper"}, the confidence interval is given by:
\deqn{[-\infty, \; (\hat{\mu}_1 - \hat{\mu}_2) - t(n_1 + n_2 -2, 1-\alpha) \hat{\sigma}\sqrt{\frac{1}{n_1} + \frac{1}{n_2}}] \;\;\;\;\;\; (13)}
For the two-sample case, the arguments \code{n1} and \code{n2} correspond to
\eqn{n_1} and \eqn{n_2} in Equation (6), the argument \code{diff.or.mean} corresponds
to \eqn{\hat{\mu_1} - \hat{\mu_2} = \bar{x}_1 - \bar{x}_2} in Equations (7) and (8),
and the argument \code{SD} corresponds to \eqn{\hat{\sigma} = s_p} in Equation (9).
}
\value{
a data frame with the rows varying the standard deviation and the columns
varying the estimated mean or difference between the means. Elements of the
data frame are character strings indicating the confidence intervals.
}
\references{
Bacchetti, P. (2010). Current sample size conventions: Flaws, Harms, and
Alternatives. \emph{BMC Medicine} \bold{8}, 17--23.
Berthouex, P.M., and L.C. Brown. (2002).
\emph{Statistics for Environmental Engineers}. Second Edition.
Lewis Publishers, Boca Raton, FL.
Gilbert, R.O. (1987). \emph{Statistical Methods for Environmental Pollution Monitoring}.
Van Nostrand Reinhold, New York, NY.
Helsel, D.R., and R.M. Hirsch. (1992).
\emph{Statistical Methods in Water Resources Research}.
Elsevier, New York, NY.
Millard, S.P., and N.K. Neerchal. (2001). \emph{Environmental Statistics with S-PLUS}.
CRC Press, Boca Raton, FL.
Ott, W.R. (1995). \emph{Environmental Statistics and Data Analysis}.
Lewis Publishers, Boca Raton, FL.
Zar, J.H. (2010). \emph{Biostatistical Analysis}. Fifth Edition.
Prentice-Hall, Upper Saddle River, NJ.
}
\author{
Steven P. Millard (\email{EnvStats@ProbStatInfo.com})
}
\note{
Bacchetti (2010) presents strong arguments against the current convention in
scientific research for computing sample size that is based on formulas that
use a fixed Type I error (usually 5\%) and a fixed minimal power (often 80\%)
without regard to costs. He notes that a key input to these formulas is a
measure of variability (usually a standard deviation) that is difficult to
measure accurately "unless there is so much preliminary data that the study
isn't really needed." Also, study designers often avoid defining what a
scientifically meaningful difference is by presenting sample size results in
terms of the effect size (i.e., the difference of interest divided by the
elusive standard deviation). Bacchetti (2010) encourages study designers to use
simple tables in a sensitivity analysis to see what results of a study may look
like for low, moderate, and high rates of variability and large, intermediate,
and no underlying differences in the populations or processes being studied.
}
\seealso{
\code{\link{enorm}}, \code{\link{t.test}}, \code{\link{ciTableProp}},
\code{\link{ciNormHalfWidth}}, \code{\link{ciNormN}},
\code{\link{plotCiNormDesign}}.
}
\examples{
# Show how potential confidence intervals for the difference between two means
# will look assuming standard deviations of 1, 2, or 3, differences between
# the two means of 2, 1, or 0, and a sample size of 10 in each group.
ciTableMean()
# Diff=2 Diff=1 Diff=0
#SD=1 [ 1.1, 2.9] [ 0.1, 1.9] [-0.9, 0.9]
#SD=2 [ 0.1, 3.9] [-0.9, 2.9] [-1.9, 1.9]
#SD=3 [-0.8, 4.8] [-1.8, 3.8] [-2.8, 2.8]
#==========
# Show how a potential confidence interval for a mean will look assuming
# standard deviations of 1, 2, or 5, a sample mean of 5, 3, or 1, and
# a sample size of 15.
ciTableMean(n1 = 15, diff.or.mean = c(5, 3, 1), SD = c(1, 2, 5), sample.type = "one")
# Mean=5 Mean=3 Mean=1
#SD=1 [ 4.4, 5.6] [ 2.4, 3.6] [ 0.4, 1.6]
#SD=2 [ 3.9, 6.1] [ 1.9, 4.1] [-0.1, 2.1]
#SD=5 [ 2.2, 7.8] [ 0.2, 5.8] [-1.8, 3.8]
#==========
# The data frame EPA.09.Ex.16.1.sulfate.df contains sulfate concentrations
# (ppm) at one background and one downgradient well. The estimated
# mean and standard deviation for the background well are 536 and 27 ppm,
# respectively, based on a sample size of n = 8 quarterly samples taken over
# 2 years. A two-sided 95% confidence interval for this mean is [514, 559],
# which has a half-width of 23 ppm.
#
# The estimated mean and standard deviation for the downgradient well are
# 608 and 18 ppm, respectively, based on a sample size of n = 6 quarterly
# samples. A two-sided 95% confidence interval for the difference between
# this mean and the background mean is [44, 100] ppm.
#
# Suppose we want to design a future sampling program and are interested in
# the size of the confidence interval for the difference between the two means.
# We will use ciTableMean to generate a table of possible confidence intervals
# by varying the assumed standard deviation and assumed differences between
# the means.
# Look at the data
#-----------------
EPA.09.Ex.16.1.sulfate.df
# Month Year Well.type Sulfate.ppm
#1 Jan 1995 Background 560
#2 Apr 1995 Background 530
#3 Jul 1995 Background 570
#4 Oct 1995 Background 490
#5 Jan 1996 Background 510
#6 Apr 1996 Background 550
#7 Jul 1996 Background 550
#8 Oct 1996 Background 530
#9 Jan 1995 Downgradient NA
#10 Apr 1995 Downgradient NA
#11 Jul 1995 Downgradient 600
#12 Oct 1995 Downgradient 590
#13 Jan 1996 Downgradient 590
#14 Apr 1996 Downgradient 630
#15 Jul 1996 Downgradient 610
#16 Oct 1996 Downgradient 630
# Compute the estimated mean and standard deviation for the
# background well.
#-----------------------------------------------------------
Sulfate.back <- with(EPA.09.Ex.16.1.sulfate.df,
Sulfate.ppm[Well.type == "Background"])
enorm(Sulfate.back, ci = TRUE)
#Results of Distribution Parameter Estimation
#--------------------------------------------
#
#Assumed Distribution: Normal
#
#Estimated Parameter(s): mean = 536.2500
# sd = 26.6927
#
#Estimation Method: mvue
#
#Data: Sulfate.back
#
#Sample Size: 8
#
#Confidence Interval for: mean
#
#Confidence Interval Method: Exact
#
#Confidence Interval Type: two-sided
#
#Confidence Level: 95%
#
#Confidence Interval: LCL = 513.9343
# UCL = 558.5657
# Compute the estimated mean and standard deviation for the
# downgradient well.
#----------------------------------------------------------
Sulfate.down <- with(EPA.09.Ex.16.1.sulfate.df,
Sulfate.ppm[Well.type == "Downgradient"])
enorm(Sulfate.down, ci = TRUE)
#Results of Distribution Parameter Estimation
#--------------------------------------------
#
#Assumed Distribution: Normal
#
#Estimated Parameter(s): mean = 608.33333
# sd = 18.34848
#
#Estimation Method: mvue
#
#Data: Sulfate.down
#
#Sample Size: 6
#
#Number NA/NaN/Inf's: 2
#
#Confidence Interval for: mean
#
#Confidence Interval Method: Exact
#
#Confidence Interval Type: two-sided
#
#Confidence Level: 95%
#
#Confidence Interval: LCL = 589.0778
# UCL = 627.5889
# Compute the estimated difference between the means and the confidence
# interval for the difference:
#----------------------------------------------------------------------
t.test(Sulfate.down, Sulfate.back, var.equal = TRUE)
#Results of Hypothesis Test
#--------------------------
#
#Null Hypothesis: difference in means = 0
#
#Alternative Hypothesis: True difference in means is not equal to 0
#
#Test Name: Two Sample t-test
#
#Estimated Parameter(s): mean of x = 608.3333
# mean of y = 536.2500
#
#Data: Sulfate.down and Sulfate.back
#
#Test Statistic: t = 5.660985
#
#Test Statistic Parameter: df = 12
#
#P-value: 0.0001054306
#
#95% Confidence Interval: LCL = 44.33974
# UCL = 99.82693
# Use ciTableMean to look how the confidence interval for the difference
# between the background and downgradient means in a future study using eight
# quarterly samples at each well varies with assumed value of the pooled standard
# deviation and the observed difference between the sample means.
#--------------------------------------------------------------------------------
# Our current estimate of the pooled standard deviation is 24 ppm:
summary(lm(Sulfate.ppm ~ Well.type, data = EPA.09.Ex.16.1.sulfate.df))$sigma
#[1] 23.57759
# We can see that if this is overly optimistic and in our next study the
# pooled standard deviation is around 50 ppm, then if the observed difference
# between the means is 50 ppm, the lower end of the confidence interval for
# the difference between the two means will include 0, so we may want to
# increase our sample size.
ciTableMean(n1 = 8, n2 = 8, diff = c(100, 50, 0), SD = c(15, 25, 50), digits = 0)
# Diff=100 Diff=50 Diff=0
#SD=15 [ 84, 116] [ 34, 66] [-16, 16]
#SD=25 [ 73, 127] [ 23, 77] [-27, 27]
#SD=50 [ 46, 154] [ -4, 104] [-54, 54]
#==========
# Clean up
#---------
rm(Sulfate.back, Sulfate.down)
}
\keyword{ distribution }
\keyword{ design }
\keyword{ htest }
|
library("MASS", lib.loc="/usr/lib/R/library")
library("amen", lib.loc="/usr/lib/R/library")
sim <- 10
n <- 100
maxRank <- 100
Sigma_ab <- matrix(c(1,0.5,0.5,1),2,2)
Sigma_eps <- matrix(c(1,0.9,0.9,1),2,2)
sample_ab <- mvrnorm(n = n, rep(0, 2), Sigma_ab)
sample_eps <- mvrnorm(n = n*(n-1)/(2), rep(0, 2), Sigma_eps)
beta <- 1
Y <- matrix(rep(0,n*n), n, n)
for (i in 1:n){
for (j in 1:n){
if(i == j){
Y[i,i] <- NA
} else if (i < j){
Y[i,j] <- beta + sample_ab[i] + sample_ab[j + n] + sample_eps[(i-1)*(n - (i/2)) + j - i]
} else {
Y[i,j] <- beta + sample_ab[i] + sample_ab[j + n] + sample_eps[(j-1)*(n - (j/2)) + i - j + n*(n-1)/(2)]
}
}
}
Yrank <- matrix(rep(0,n*n), n, n)
for (i in 1:n){
Yrank[i,i] <- NA
lRank <- order(Y[i, ], na.last = NA, decreasing = TRUE)
lRank <- lRank[Y[i, lRank[]] > 0]
indice <- min(maxRank, length(lRank))
for (j in 1:indice){
Yrank[i,lRank[j]] <- indice - j + 1
}
}
fit<-ame(Y,R=2,model = "frn",odmax=4)
summary(fit)
| /Rfiles/SimulationNoCov.R | no_license | mfouda/Dissertation_Scripts | R | false | false | 1,001 | r | library("MASS", lib.loc="/usr/lib/R/library")
library("amen", lib.loc="/usr/lib/R/library")
sim <- 10
n <- 100
maxRank <- 100
Sigma_ab <- matrix(c(1,0.5,0.5,1),2,2)
Sigma_eps <- matrix(c(1,0.9,0.9,1),2,2)
sample_ab <- mvrnorm(n = n, rep(0, 2), Sigma_ab)
sample_eps <- mvrnorm(n = n*(n-1)/(2), rep(0, 2), Sigma_eps)
beta <- 1
Y <- matrix(rep(0,n*n), n, n)
for (i in 1:n){
for (j in 1:n){
if(i == j){
Y[i,i] <- NA
} else if (i < j){
Y[i,j] <- beta + sample_ab[i] + sample_ab[j + n] + sample_eps[(i-1)*(n - (i/2)) + j - i]
} else {
Y[i,j] <- beta + sample_ab[i] + sample_ab[j + n] + sample_eps[(j-1)*(n - (j/2)) + i - j + n*(n-1)/(2)]
}
}
}
Yrank <- matrix(rep(0,n*n), n, n)
for (i in 1:n){
Yrank[i,i] <- NA
lRank <- order(Y[i, ], na.last = NA, decreasing = TRUE)
lRank <- lRank[Y[i, lRank[]] > 0]
indice <- min(maxRank, length(lRank))
for (j in 1:indice){
Yrank[i,lRank[j]] <- indice - j + 1
}
}
fit<-ame(Y,R=2,model = "frn",odmax=4)
summary(fit)
|
#' get_mentionees
#'
#' This function takes a tibble of tweets and records which users were mentioned,
#' and how many times. Mentions include replies and retweets, but not quotes.
#' @return a tibble with two columns noting the user ID and how many times they were mentioned
#' @examples get_mentionees(tweets)
#' @export
get_mentionees= function(tweets){
#build tibble to record mentionees
mentionees<- tibble("user_ID"=character(), "count"= numeric())
#Then, iterate through each row of the tweets and record the mentionees
for (i in 1:nrow(tweets)){
#this if statement checks that there are actually mentions in the tweet
if(!is.na(tweets$mentions_user_id[i])){
#create list of all unique ID's mentioned in the tweet
IDlist<- unique(strsplit(tweets$mentions_user_id[[i]], fixed=T, split=" "))
#Then, iterate through this list and update the mentionees tibble accordingly
#If the ID is already in the tibble, add one to the count value for it. If not, add it to the tibble
for (j in 1:length(IDlist)){
ID= IDlist[[j]]
if(ID%in%mentionees$user_ID){
mentionees$count[which(mentionees$user_ID==ID)]<-mentionees$count[which(mentionees$user_ID==ID)]+1
}
else{
mentionees=rbind(mentionees, tibble("user_ID"=ID, "count"=1))
}
}
}
}
#last, sort the output so that higher mentions come first
mentionees<-mentionees[order(mentionees$count, decreasing = TRUE),]
return(mentionees)
}
| /R/get_mentionees.R | no_license | llsigerson/twittnet | R | false | false | 1,509 | r | #' get_mentionees
#'
#' This function takes a tibble of tweets and records which users were mentioned,
#' and how many times. Mentions include replies and retweets, but not quotes.
#' @return a tibble with two columns noting the user ID and how many times they were mentioned
#' @examples get_mentionees(tweets)
#' @export
get_mentionees= function(tweets){
#build tibble to record mentionees
mentionees<- tibble("user_ID"=character(), "count"= numeric())
#Then, iterate through each row of the tweets and record the mentionees
for (i in 1:nrow(tweets)){
#this if statement checks that there are actually mentions in the tweet
if(!is.na(tweets$mentions_user_id[i])){
#create list of all unique ID's mentioned in the tweet
IDlist<- unique(strsplit(tweets$mentions_user_id[[i]], fixed=T, split=" "))
#Then, iterate through this list and update the mentionees tibble accordingly
#If the ID is already in the tibble, add one to the count value for it. If not, add it to the tibble
for (j in 1:length(IDlist)){
ID= IDlist[[j]]
if(ID%in%mentionees$user_ID){
mentionees$count[which(mentionees$user_ID==ID)]<-mentionees$count[which(mentionees$user_ID==ID)]+1
}
else{
mentionees=rbind(mentionees, tibble("user_ID"=ID, "count"=1))
}
}
}
}
#last, sort the output so that higher mentions come first
mentionees<-mentionees[order(mentionees$count, decreasing = TRUE),]
return(mentionees)
}
|
library(raster)
library(rgdal)
library(foreign)
args <- commandArgs(trailingOnly = TRUE)
Pts <- read.dbf(file="Samples_Bands.dbf")
Lin.grid <- raster("boundary2.img")
# Lin.grid
# coordinates(Lin.grid)[!is.na(values(Lin.grid)),]
Lin.grid <- SpatialPixels(SpatialPoints(coordinates(Lin.grid)[!is.na(values(Lin.grid)),]))
proj4string(Lin.grid)=CRS("+proj=tmerc +lat_0=0 +lon_0=117 +k=1 +x_0=500000 +y_0=0 +ellps=krass +towgs84=0,0,0,0,0,0,0 +units=m +no_defs")
# Traditional Statistic exploration
d <- density(Pts$CDENSITY)
plot(d, main="Kernel Density of Carbon Density Samples")
polygon(d, col="blue")
# Q-Q plot
qqnorm(Pts$CDENSITY, main="Normal Q-Q Plot of Carbon Density Samples")
qqline(Pts$CDENSITY, col=2)
# log transformation and add 0.0001 to zero
d <- density(Pts$Log_CD)
plot(d, main="Kernel Density of Carbon Density Samples")
polygon(d, col="blue")
# Q-Q plot
qqnorm(Pts$Log_CD, main="Normal Q-Q Plot of Carbon Density Samples")
qqline(Pts$Log_CD, col=2)
# geo-statistical exploration
require(gstat)
require(sp)
class(Pts)
coordinates(Pts) = ~X+Y
proj4string(Pts)=CRS("+proj=tmerc +lat_0=0 +lon_0=117 +k=1 +x_0=500000 +y_0=0 +ellps=krass +towgs84=0,0,0,0,0,0,0 +units=m +no_defs")
Pts
summary(Pts)
bubble(Pts, "CDENSITY",
col=c("#00ff0088", "#00ff0088"), main = "Carbon Density")
# omini direction
Pts.vgm = variogram(CDENSITY~1, Pts)
Pts.vgm
Pts.vgm.fit = fit.variogram(Pts.vgm, model=vgm(1, "Sph", 58110, 1))
Pts.vgm.fit
plot(Pts.vgm, Pts.vgm.fit)
# directional with tolerence 45 degree start from 9.5
Pts.dir = variogram(CDENSITY~1, Pts, alpha = c(32, 77, 122, 167))
Pts.dir
Pts.dir.fit = fit.variogram(Pts.dir, model = vgm(psill=15.74, model="Sph", range=58110, nugget=279, anis=c(9.84, 0.95)))
Pts.dir.fit
plot(Pts.dir, Pts.dir.fit)
# simulation for
lzn.condsim = krige(CDENSITY~1, Pts, Lin.grid, model = Pts.vgm.fit,
nmax = 30, nsim = 1) #simulation result, nsim is the simulation times
save(lzn.condsim, file = args[1])
# spplot(lzn.condsim, main = "indicator simulations for 4 deciles") #plot simulation result
| /rscripts/Ana2.R | no_license | the-fool/fastGIS | R | false | false | 2,122 | r | library(raster)
library(rgdal)
library(foreign)
args <- commandArgs(trailingOnly = TRUE)
Pts <- read.dbf(file="Samples_Bands.dbf")
Lin.grid <- raster("boundary2.img")
# Lin.grid
# coordinates(Lin.grid)[!is.na(values(Lin.grid)),]
Lin.grid <- SpatialPixels(SpatialPoints(coordinates(Lin.grid)[!is.na(values(Lin.grid)),]))
proj4string(Lin.grid)=CRS("+proj=tmerc +lat_0=0 +lon_0=117 +k=1 +x_0=500000 +y_0=0 +ellps=krass +towgs84=0,0,0,0,0,0,0 +units=m +no_defs")
# Traditional Statistic exploration
d <- density(Pts$CDENSITY)
plot(d, main="Kernel Density of Carbon Density Samples")
polygon(d, col="blue")
# Q-Q plot
qqnorm(Pts$CDENSITY, main="Normal Q-Q Plot of Carbon Density Samples")
qqline(Pts$CDENSITY, col=2)
# log transformation and add 0.0001 to zero
d <- density(Pts$Log_CD)
plot(d, main="Kernel Density of Carbon Density Samples")
polygon(d, col="blue")
# Q-Q plot
qqnorm(Pts$Log_CD, main="Normal Q-Q Plot of Carbon Density Samples")
qqline(Pts$Log_CD, col=2)
# geo-statistical exploration
require(gstat)
require(sp)
class(Pts)
coordinates(Pts) = ~X+Y
proj4string(Pts)=CRS("+proj=tmerc +lat_0=0 +lon_0=117 +k=1 +x_0=500000 +y_0=0 +ellps=krass +towgs84=0,0,0,0,0,0,0 +units=m +no_defs")
Pts
summary(Pts)
bubble(Pts, "CDENSITY",
col=c("#00ff0088", "#00ff0088"), main = "Carbon Density")
# omini direction
Pts.vgm = variogram(CDENSITY~1, Pts)
Pts.vgm
Pts.vgm.fit = fit.variogram(Pts.vgm, model=vgm(1, "Sph", 58110, 1))
Pts.vgm.fit
plot(Pts.vgm, Pts.vgm.fit)
# directional with tolerence 45 degree start from 9.5
Pts.dir = variogram(CDENSITY~1, Pts, alpha = c(32, 77, 122, 167))
Pts.dir
Pts.dir.fit = fit.variogram(Pts.dir, model = vgm(psill=15.74, model="Sph", range=58110, nugget=279, anis=c(9.84, 0.95)))
Pts.dir.fit
plot(Pts.dir, Pts.dir.fit)
# simulation for
lzn.condsim = krige(CDENSITY~1, Pts, Lin.grid, model = Pts.vgm.fit,
nmax = 30, nsim = 1) #simulation result, nsim is the simulation times
save(lzn.condsim, file = args[1])
# spplot(lzn.condsim, main = "indicator simulations for 4 deciles") #plot simulation result
|
\name{interaction_investigator}
\alias{interaction_investigator}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Explore Pairwise Interactions in BART Model
}
\description{
Explore the pairwise interaction counts for a BART model to learn about interactions fit by the model. This function includes an option to generate a plot of the pairwise interaction counts.
}
\usage{
interaction_investigator(bart_machine, plot = TRUE,
num_replicates_for_avg = 5, num_trees_bottleneck = 20,
num_var_plot = 50, cut_bottom = NULL, bottom_margin = 10)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{bart_machine}{
An object of class ``bartMachine''.
}
\item{plot}{
If TRUE, a plot of the pairwise interaction counts is generated.
}
\item{num_replicates_for_avg}{
The number of replicates of BART to be used to generate pairwise interaction inclusion counts.
Averaging across multiple BART models improves stability of the estimates.
}
\item{num_trees_bottleneck}{
Number of trees to be used in the sum-of-trees model for computing pairwise interactions counts.
A small number of trees should be used to force the variables to compete for entry into the model.
}
\item{num_var_plot}{
Number of variables to be shown on the plot. If ``Inf,'' all variables are plotted (not recommended if
the number of predictors is large). Default is 50.
}
\item{cut_bottom}{
A display parameter between 0 and 1 that controls where the y-axis is plotted. A value of 0 would begin the y-axis at 0; a value of 1 begins
the y-axis at the minimum of the average pairwise interaction inclusion count (the smallest bar in the bar plot). Values between 0 and 1 begin the
y-axis as a percentage of that minimum.
}
\item{bottom_margin}{
A display parameter that adjusts the bottom margin of the graph if labels are clipped. The scale of this parameter is the same as set with \code{par(mar = c(....))} in R.
Higher values allow for more space if the crossed covariate names are long. Note that making this parameter too large will prevent plotting and the plot function in R will throw an error.
}
}
\details{
An interaction between two variables is considered to occur whenever a path from any node of a tree to
any of its terminal node contains splits using those two variables. See Kapelner and Bleich, 2013, Section 4.11.
}
\value{
\item{interaction_counts_avg}{For each of the \eqn{p \times p}{p times p} interactions, what is the average count across all \code{num_replicates_for_avg}
BART model replicates' post burn-in Gibbs samples in all trees.}
\item{interaction_counts_sd}{For each of the \eqn{p \times p}{p times p} interactions, what is the average sd of the interaction counts across the \code{num_replicates_for_avg}
BART models replicates.}
}
\references{
A Kapelner and J Bleich. bartMachine: Machine Learning with Bayesian Additive Regression Trees, arXiv preprints, 2013
}
\author{
Adam Kapelner and Justin Bleich
}
\note{
In the plot, the red bars correspond to the standard error of the variable inclusion proportion estimates (since multiple replicates were used).
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{investigate_var_importance}}
}
\examples{
\dontrun{
#generate Friedman data
set.seed(11)
n = 200
p = 10
X = data.frame(matrix(runif(n * p), ncol = p))
y = 10 * sin(pi* X[ ,1] * X[,2]) +20 * (X[,3] -.5)^2 + 10 * X[ ,4] + 5 * X[,5] + rnorm(n)
##build BART regression model
bart_machine = bartMachine(X, y, num_trees = 20)
#investigate interactions
interaction_investigator(bart_machine)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /dartMachine/man/interaction_investigator.Rd | permissive | theodds/dartMachine | R | false | false | 3,775 | rd | \name{interaction_investigator}
\alias{interaction_investigator}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Explore Pairwise Interactions in BART Model
}
\description{
Explore the pairwise interaction counts for a BART model to learn about interactions fit by the model. This function includes an option to generate a plot of the pairwise interaction counts.
}
\usage{
interaction_investigator(bart_machine, plot = TRUE,
num_replicates_for_avg = 5, num_trees_bottleneck = 20,
num_var_plot = 50, cut_bottom = NULL, bottom_margin = 10)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{bart_machine}{
An object of class ``bartMachine''.
}
\item{plot}{
If TRUE, a plot of the pairwise interaction counts is generated.
}
\item{num_replicates_for_avg}{
The number of replicates of BART to be used to generate pairwise interaction inclusion counts.
Averaging across multiple BART models improves stability of the estimates.
}
\item{num_trees_bottleneck}{
Number of trees to be used in the sum-of-trees model for computing pairwise interactions counts.
A small number of trees should be used to force the variables to compete for entry into the model.
}
\item{num_var_plot}{
Number of variables to be shown on the plot. If ``Inf,'' all variables are plotted (not recommended if
the number of predictors is large). Default is 50.
}
\item{cut_bottom}{
A display parameter between 0 and 1 that controls where the y-axis is plotted. A value of 0 would begin the y-axis at 0; a value of 1 begins
the y-axis at the minimum of the average pairwise interaction inclusion count (the smallest bar in the bar plot). Values between 0 and 1 begin the
y-axis as a percentage of that minimum.
}
\item{bottom_margin}{
A display parameter that adjusts the bottom margin of the graph if labels are clipped. The scale of this parameter is the same as set with \code{par(mar = c(....))} in R.
Higher values allow for more space if the crossed covariate names are long. Note that making this parameter too large will prevent plotting and the plot function in R will throw an error.
}
}
\details{
An interaction between two variables is considered to occur whenever a path from any node of a tree to
any of its terminal node contains splits using those two variables. See Kapelner and Bleich, 2013, Section 4.11.
}
\value{
\item{interaction_counts_avg}{For each of the \eqn{p \times p}{p times p} interactions, what is the average count across all \code{num_replicates_for_avg}
BART model replicates' post burn-in Gibbs samples in all trees.}
\item{interaction_counts_sd}{For each of the \eqn{p \times p}{p times p} interactions, what is the average sd of the interaction counts across the \code{num_replicates_for_avg}
BART models replicates.}
}
\references{
A Kapelner and J Bleich. bartMachine: Machine Learning with Bayesian Additive Regression Trees, arXiv preprints, 2013
}
\author{
Adam Kapelner and Justin Bleich
}
\note{
In the plot, the red bars correspond to the standard error of the variable inclusion proportion estimates (since multiple replicates were used).
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{investigate_var_importance}}
}
\examples{
\dontrun{
#generate Friedman data
set.seed(11)
n = 200
p = 10
X = data.frame(matrix(runif(n * p), ncol = p))
y = 10 * sin(pi* X[ ,1] * X[,2]) +20 * (X[,3] -.5)^2 + 10 * X[ ,4] + 5 * X[,5] + rnorm(n)
##build BART regression model
bart_machine = bartMachine(X, y, num_trees = 20)
#investigate interactions
interaction_investigator(bart_machine)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
# With Timestamps
factorSummary <- function(dt) {
require(data.table)
#Check NA function for metadata tables
countNA <- function(a) {
return(sum(is.na(a)))
}
#Count levels in factor
levelCount <- function(v) {
ifelse(class(v)=="factor",length(levels(v)),0)
}
# Collect in Table
summ <- data.frame(colname=names(dt),
NAs=sapply(dt,countNA),
class=do.call(rbind,sapply(dt,class))[,1],
levels=do.call(rbind,sapply(dt,levelCount))[,1]) %>%
mutate(perc.NAs=round(NAs/nrow(dt),digits=2)) %>%
select(colname,NAs,perc.NAs,class,levels)
setDT(summ)
return(summ)
}
# Without Timestamps
factorSummary2 <- function(dt) {
require(data.table)
#Check NA function for metadata tables
countNA <- function(a) {
return(sum(is.na(a)))
}
#Count levels in factor
levelCount <- function(v) {
ifelse(class(v)=="factor",length(levels(v)),0)
}
# Collect in Table
summ <- data.frame(colname=names(dt),
NAs=sapply(dt,countNA),
class=sapply(dt,class),
levels=sapply(dt,levelCount)) %>%
mutate(perc.NAs=round(NAs/nrow(dt),digits=2)) %>%
select(colname,NAs,perc.NAs,class,levels)
setDT(summ)
return(summ)
}
| /factorSummary.R | no_license | ajphilpotts/DataVisualisationR | R | false | false | 1,318 | r | # With Timestamps
factorSummary <- function(dt) {
require(data.table)
#Check NA function for metadata tables
countNA <- function(a) {
return(sum(is.na(a)))
}
#Count levels in factor
levelCount <- function(v) {
ifelse(class(v)=="factor",length(levels(v)),0)
}
# Collect in Table
summ <- data.frame(colname=names(dt),
NAs=sapply(dt,countNA),
class=do.call(rbind,sapply(dt,class))[,1],
levels=do.call(rbind,sapply(dt,levelCount))[,1]) %>%
mutate(perc.NAs=round(NAs/nrow(dt),digits=2)) %>%
select(colname,NAs,perc.NAs,class,levels)
setDT(summ)
return(summ)
}
# Without Timestamps
factorSummary2 <- function(dt) {
require(data.table)
#Check NA function for metadata tables
countNA <- function(a) {
return(sum(is.na(a)))
}
#Count levels in factor
levelCount <- function(v) {
ifelse(class(v)=="factor",length(levels(v)),0)
}
# Collect in Table
summ <- data.frame(colname=names(dt),
NAs=sapply(dt,countNA),
class=sapply(dt,class),
levels=sapply(dt,levelCount)) %>%
mutate(perc.NAs=round(NAs/nrow(dt),digits=2)) %>%
select(colname,NAs,perc.NAs,class,levels)
setDT(summ)
return(summ)
}
|
#######################################################################
# stream - Infrastructure for Data Stream Mining
# Copyright (C) 2013 Michael Hahsler, Matthew Bolanos, John Forrest
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
## DSD - Data Stream Data interface
## all DSD classes have these functions
## and an additional function to create the DSD
##
## A new DSD class (e.g., myDSD) needs the following:
## 1. a constructor function. myDSD <- function(PARAMETERS) which
## returns an object with the class c("DSD_myDSD","DSD_R","DSD")
## 2. get_points.myDSD <- function(x, n=1, ...)
##
## See DSD_Gaussian_Static.R for an example
DSD <- function(...) stop("DSD is an abstract class and cannot be instantiated!")
DSD_R <- function(...) stop("DSD_R is an abstract class and cannot be instantiated!")
get_points <- function(x, n=1, outofpoints=c("stop", "warn", "ignore"), ...)
UseMethod("get_points")
get_points.default <- function(x, n=1,
outofpoints=c("stop", "warn", "ignore"), ...) {
stop(gettextf("get_points not implemented for class '%s'.",
paste(class(x), collapse=", ")))
}
### in case the stream can be reset (e.g., a stream from a file)
reset_stream <- function(dsd, pos=1) UseMethod("reset_stream")
reset_stream.DSD <- function(dsd, pos=1) {
stop(gettextf("reset_stream not implemented for class '%s'.",
paste(class(dsd), collapse=", ")))
}
### end of interface
#############################################################
### helper
print.DSD <- function(x, ...) {
.nodots(...)
k <- x[["k"]]
if(is.null(k)) k <- NA
d <- x[["d"]]
if(is.null(d)) d <- NA
cat(.line_break(x$description))
cat("Class:", paste(class(x), collapse=", "), "\n")
cat(paste('With', k, 'clusters', 'in', d, 'dimensions', '\n'))
}
summary.DSD <- function(object, ...) print(object)
plot.DSD <- function(x, n = 500, col= NULL, pch= NULL,
..., method="pairs", dim=NULL, alpha=.6) {
## method can be pairs, plot or pc (projection with PCA)
d <- get_points(x, n, cluster = !is.null(x$class))
assignment <- attr(d, "cluster")
### stream has no assignments!
if(length(assignment)==0) assignment <- rep(1L, nrow(d))
noise <- is.na(assignment)
### assignment is not numeric
if(!is.numeric(assignment)) assignment <- as.integer(as.factor(assignment))
### add alpha shading to color
if(is.null(col)) {
col <- rgb(cbind(t(col2rgb(assignment)/255)), alpha=alpha)
}else{
if(length(col)==1L) col <- rep(col, length(assignment))
}
col[noise] <- .noise_col
if(is.null(pch)) {
#pch <- rep(1, n)
pch <- as.integer(assignment)
pch[noise] <- .noise_pch
}
if(!is.null(dim)) d <- d[,dim, drop = FALSE]
if(ncol(d)>2 && method=="pairs") {
pairs(d, col=col, pch=pch, ...)
} else if(ncol(d)>2 && method=="pc") {
## we assume Euclidean here
p <- prcomp(d)
plot(p$x, col=col, pch=pch, ...)
title(sub = paste("Explains ",
round(sum(p$sdev[1:2]) / sum(p$sdev)* 100, 2),
"% of the point variability", sep=""))
} else if(ncol(d) == 1) {
plot(d[[1]], rep(0, length(d[[1]])), col = assignment, pch= pch,
ylab = "", xlab = colnames(d)[1], ...)
} else {
if(ncol(d)>2) d <- d[,1:2, drop = FALSE]
plot(d, col=col, pch=pch, ...)
}
}
| /R/DSD.R | no_license | shuxiangzhang/stream | R | false | false | 3,905 | r | #######################################################################
# stream - Infrastructure for Data Stream Mining
# Copyright (C) 2013 Michael Hahsler, Matthew Bolanos, John Forrest
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
## DSD - Data Stream Data interface
## all DSD classes have these functions
## and an additional function to create the DSD
##
## A new DSD class (e.g., myDSD) needs the following:
## 1. a constructor function. myDSD <- function(PARAMETERS) which
## returns an object with the class c("DSD_myDSD","DSD_R","DSD")
## 2. get_points.myDSD <- function(x, n=1, ...)
##
## See DSD_Gaussian_Static.R for an example
DSD <- function(...) stop("DSD is an abstract class and cannot be instantiated!")
DSD_R <- function(...) stop("DSD_R is an abstract class and cannot be instantiated!")
get_points <- function(x, n=1, outofpoints=c("stop", "warn", "ignore"), ...)
UseMethod("get_points")
get_points.default <- function(x, n=1,
outofpoints=c("stop", "warn", "ignore"), ...) {
stop(gettextf("get_points not implemented for class '%s'.",
paste(class(x), collapse=", ")))
}
### in case the stream can be reset (e.g., a stream from a file)
reset_stream <- function(dsd, pos=1) UseMethod("reset_stream")
reset_stream.DSD <- function(dsd, pos=1) {
stop(gettextf("reset_stream not implemented for class '%s'.",
paste(class(dsd), collapse=", ")))
}
### end of interface
#############################################################
### helper
print.DSD <- function(x, ...) {
.nodots(...)
k <- x[["k"]]
if(is.null(k)) k <- NA
d <- x[["d"]]
if(is.null(d)) d <- NA
cat(.line_break(x$description))
cat("Class:", paste(class(x), collapse=", "), "\n")
cat(paste('With', k, 'clusters', 'in', d, 'dimensions', '\n'))
}
summary.DSD <- function(object, ...) print(object)
plot.DSD <- function(x, n = 500, col= NULL, pch= NULL,
..., method="pairs", dim=NULL, alpha=.6) {
## method can be pairs, plot or pc (projection with PCA)
d <- get_points(x, n, cluster = !is.null(x$class))
assignment <- attr(d, "cluster")
### stream has no assignments!
if(length(assignment)==0) assignment <- rep(1L, nrow(d))
noise <- is.na(assignment)
### assignment is not numeric
if(!is.numeric(assignment)) assignment <- as.integer(as.factor(assignment))
### add alpha shading to color
if(is.null(col)) {
col <- rgb(cbind(t(col2rgb(assignment)/255)), alpha=alpha)
}else{
if(length(col)==1L) col <- rep(col, length(assignment))
}
col[noise] <- .noise_col
if(is.null(pch)) {
#pch <- rep(1, n)
pch <- as.integer(assignment)
pch[noise] <- .noise_pch
}
if(!is.null(dim)) d <- d[,dim, drop = FALSE]
if(ncol(d)>2 && method=="pairs") {
pairs(d, col=col, pch=pch, ...)
} else if(ncol(d)>2 && method=="pc") {
## we assume Euclidean here
p <- prcomp(d)
plot(p$x, col=col, pch=pch, ...)
title(sub = paste("Explains ",
round(sum(p$sdev[1:2]) / sum(p$sdev)* 100, 2),
"% of the point variability", sep=""))
} else if(ncol(d) == 1) {
plot(d[[1]], rep(0, length(d[[1]])), col = assignment, pch= pch,
ylab = "", xlab = colnames(d)[1], ...)
} else {
if(ncol(d)>2) d <- d[,1:2, drop = FALSE]
plot(d, col=col, pch=pch, ...)
}
}
|
library(shiny)
shinyUI(fluidPage(
tags$body(
background="s10.jpg"
),
#tags$link(rel = "stylesheet", type = "text/css", href = "bootstrap.css"),
#tags$head(tags$script("alert('Hello!');")),
tags$head(tags$style("body{ color: black; }")),
tags$style(HTML("
@import url('//fonts.googleapis.com/css?family=Lobster|Cabin:400,700');
h1 {
font-family: 'Lobster', cursive;
font-weight: 500;
line-height: 1.1;
color: #48ca3b;
}
")),
img(src='karvy-analytics.jpg', align = "right"),theme = "bootstrap.css",
headerPanel("Overview of Star Ratings"),
sidebarLayout(
sidebarPanel(
selectInput("Input1","Choose Part", c("PART C"="PART C",
"PART D"= "PART D",
"MA PD"="MA PD"
),selected ="PART C",selectize = TRUE ),
# selectInput("Input11","Measures", c("Breast.Cancer.Screening"=2,
# "Colorectal.Cancer.Screening"=3),selectize = TRUE ),
conditionalPanel(
condition = "input.Input1 == 'PART C'",
selectInput("Input2","Choose Contract_ID", c("H1651"="H1651",
"H2461"="H2461",
"H5256"="H5256",
"H5264"="H5264")),selected ="H1651",selectize = TRUE ),
conditionalPanel(
condition = "input.Input1 == 'PART D'",
selectInput("Input3","Choose Contract_ID", c("H0141"="H0141",
"H0901"="H0901",
"H1291"="H1291",
"H1419"="H1419")),selected ="H0141",selectize = TRUE ),
conditionalPanel(
condition = "input.Input1 == 'MA PD'",
selectInput("Input4","Choose Contract_ID", c("H0084"="H0084",
"H0104"="H0104",
"H0108"="H0108",
"H0117"="H0117")),selected ="H0084",selectize = TRUE ),
selectInput("Input11","Measures", c("Breast.Cancer.Screening"=2,
"Colorectal.Cancer.Screening"=3,
"Cardiovascular.Care...Cholesterol.Screening"=4,
"Diabetes.Care...Cholesterol.Screening"=5,
"Glaucoma.Testing"=6,
"Annual.Flu.Vaccine"=7,
"Improving.or.Maintaining.Physical.Health"=8,
"Improving.or.Maintaining.Mental.Health"=9,
"Monitoring.Physical.Activity"=10,
"Adult.BMI.Assessment"=11,
"Care.for.Older.Adults...Medication.Review"=12,
"Care.for.Older.Adults...Functional.Status.Assessment "=13,
"Care.for.Older.Adults...Pain.Screening"=14,
" Osteoporosis.Management.in.Women.who.had.a.Fracture "=15,
"Diabetes.Care...Eye.Exam "=16,
"Diabetes.Care...Kidney.Disease.Monitoring "=17,
"Diabetes.Care...Blood.Sugar.Controlled"=18,
"Diabetes.Care...Cholesterol.Controlled"=19,
"Controlling.Blood.Pressure"=20,
"Rheumatoid.Arthritis.Management"=21,
"Improving.Bladder.Control"=22,
"Reducing.the.Risk.of.Falling"=23 ,
"Plan.All.Cause.Readmissions"=24,
"Getting.Needed.Care"=25 ,
"Getting.Appointments.and.Care.Quickly" =26,
"Customer.Service"=27,
"Overall.Rating.of.Health.Care.Quality" =28,
"Overall.Rating.of.Plan Care.Coordination"=29 ,
"Complaints.about.the.Health.Plan"=30,
"Beneficiary.Access.and.Performance.Problems"=31,
"Members.Choosing.to.Leave.the.Plan"=32 ,
"Improvement"=33,
"Plan.Makes.Timely.Decisions.about.Appeals"=34,
"Reviewing.Appeals.Decisions"=35,
"Call.Center...Foreign.Language.Interpreter.and.TTY.TDD.Availability"=36,
"Enrollment.Timeliness"=37,
"Call.Center...Pharmacy.Hold.Time"=38,
"Call.Center...Foreign.Language.Interpreter.and.TTY.TDD.Availability"=39,
"Appeals.Auto.Forward"=40 ,
"Appeals.Upheld Enrollment.Timeliness"=41,
"Complaints.about.the.Drug.Plan"=42 ,
"Beneficiary.Access.and.Performance.Problems"=43,
"Members.Choosing.to.Leave.the.Plan"=44,
"Drug.Plan.Quality.Improvement"=45,
"Getting.Information.From.Drug.Plan"=46 ,
"Rating.of.Drug.Plan"=47,
"Getting.Needed.Prescription.Drugs"=48,
"MPF.Price.Accuracy"=49 ,
"High.Risk.Medication"=50 ,
"Diabetes.Treatment"=51,
"Part.D.Medication.Adherence.for.Oral.Diabetes.Medications"=52,
"Part.D.Medication.Adherence.for.Hypertension..RAS.antagonists."=53,
"Part.D.Medication.Adherence.for.Cholesterol..Statins."=54,
"Special Needs Plan (SNP) Care Management"=55),selectize = TRUE )
),
mainPanel(
tabsetPanel(
tabPanel("Contract Details",
conditionalPanel(
condition = "input.Input1 == 'PART C'",
DT::dataTableOutput('contents2'),
plotOutput("contents5",width = "25%")),
conditionalPanel(
condition = "input.Input1 == 'PART D'",
DT::dataTableOutput('contents3'),
plotOutput("contents6",width = "25%")),
conditionalPanel(
condition = "input.Input1 == 'MA PD'",
DT::dataTableOutput('contents4'),
plotOutput("contents7"))),
tabPanel("Measures Rating visualization",
conditionalPanel(
condition = "input.Input1 == 'PART C'",
scatterD3Output("phonePlot1", height = "700px")),
conditionalPanel(
condition = "input.Input1 == 'PART D'",
scatterD3Output("phonePlot2", height = "700px")),
conditionalPanel(
condition = "input.Input1 == 'MA PD'",
scatterD3Output("phonePlot3", height = "700px"))),
tabPanel("Measures Raw Score Visualization",
conditionalPanel(
condition = "input.Input1 == 'PART C'",
plotlyOutput("phonePlot4")),
conditionalPanel(
condition = "input.Input1 == 'PART D'",
plotlyOutput("phonePlot5")),
conditionalPanel(
condition = "input.Input1 == 'MA PD'",
plotlyOutput("phonePlot6"))
)
)
)
)
)
)
| /ui.R | no_license | LeelaAmbati/Data-science | R | false | false | 8,407 | r | library(shiny)
shinyUI(fluidPage(
tags$body(
background="s10.jpg"
),
#tags$link(rel = "stylesheet", type = "text/css", href = "bootstrap.css"),
#tags$head(tags$script("alert('Hello!');")),
tags$head(tags$style("body{ color: black; }")),
tags$style(HTML("
@import url('//fonts.googleapis.com/css?family=Lobster|Cabin:400,700');
h1 {
font-family: 'Lobster', cursive;
font-weight: 500;
line-height: 1.1;
color: #48ca3b;
}
")),
img(src='karvy-analytics.jpg', align = "right"),theme = "bootstrap.css",
headerPanel("Overview of Star Ratings"),
sidebarLayout(
sidebarPanel(
selectInput("Input1","Choose Part", c("PART C"="PART C",
"PART D"= "PART D",
"MA PD"="MA PD"
),selected ="PART C",selectize = TRUE ),
# selectInput("Input11","Measures", c("Breast.Cancer.Screening"=2,
# "Colorectal.Cancer.Screening"=3),selectize = TRUE ),
conditionalPanel(
condition = "input.Input1 == 'PART C'",
selectInput("Input2","Choose Contract_ID", c("H1651"="H1651",
"H2461"="H2461",
"H5256"="H5256",
"H5264"="H5264")),selected ="H1651",selectize = TRUE ),
conditionalPanel(
condition = "input.Input1 == 'PART D'",
selectInput("Input3","Choose Contract_ID", c("H0141"="H0141",
"H0901"="H0901",
"H1291"="H1291",
"H1419"="H1419")),selected ="H0141",selectize = TRUE ),
conditionalPanel(
condition = "input.Input1 == 'MA PD'",
selectInput("Input4","Choose Contract_ID", c("H0084"="H0084",
"H0104"="H0104",
"H0108"="H0108",
"H0117"="H0117")),selected ="H0084",selectize = TRUE ),
selectInput("Input11","Measures", c("Breast.Cancer.Screening"=2,
"Colorectal.Cancer.Screening"=3,
"Cardiovascular.Care...Cholesterol.Screening"=4,
"Diabetes.Care...Cholesterol.Screening"=5,
"Glaucoma.Testing"=6,
"Annual.Flu.Vaccine"=7,
"Improving.or.Maintaining.Physical.Health"=8,
"Improving.or.Maintaining.Mental.Health"=9,
"Monitoring.Physical.Activity"=10,
"Adult.BMI.Assessment"=11,
"Care.for.Older.Adults...Medication.Review"=12,
"Care.for.Older.Adults...Functional.Status.Assessment "=13,
"Care.for.Older.Adults...Pain.Screening"=14,
" Osteoporosis.Management.in.Women.who.had.a.Fracture "=15,
"Diabetes.Care...Eye.Exam "=16,
"Diabetes.Care...Kidney.Disease.Monitoring "=17,
"Diabetes.Care...Blood.Sugar.Controlled"=18,
"Diabetes.Care...Cholesterol.Controlled"=19,
"Controlling.Blood.Pressure"=20,
"Rheumatoid.Arthritis.Management"=21,
"Improving.Bladder.Control"=22,
"Reducing.the.Risk.of.Falling"=23 ,
"Plan.All.Cause.Readmissions"=24,
"Getting.Needed.Care"=25 ,
"Getting.Appointments.and.Care.Quickly" =26,
"Customer.Service"=27,
"Overall.Rating.of.Health.Care.Quality" =28,
"Overall.Rating.of.Plan Care.Coordination"=29 ,
"Complaints.about.the.Health.Plan"=30,
"Beneficiary.Access.and.Performance.Problems"=31,
"Members.Choosing.to.Leave.the.Plan"=32 ,
"Improvement"=33,
"Plan.Makes.Timely.Decisions.about.Appeals"=34,
"Reviewing.Appeals.Decisions"=35,
"Call.Center...Foreign.Language.Interpreter.and.TTY.TDD.Availability"=36,
"Enrollment.Timeliness"=37,
"Call.Center...Pharmacy.Hold.Time"=38,
"Call.Center...Foreign.Language.Interpreter.and.TTY.TDD.Availability"=39,
"Appeals.Auto.Forward"=40 ,
"Appeals.Upheld Enrollment.Timeliness"=41,
"Complaints.about.the.Drug.Plan"=42 ,
"Beneficiary.Access.and.Performance.Problems"=43,
"Members.Choosing.to.Leave.the.Plan"=44,
"Drug.Plan.Quality.Improvement"=45,
"Getting.Information.From.Drug.Plan"=46 ,
"Rating.of.Drug.Plan"=47,
"Getting.Needed.Prescription.Drugs"=48,
"MPF.Price.Accuracy"=49 ,
"High.Risk.Medication"=50 ,
"Diabetes.Treatment"=51,
"Part.D.Medication.Adherence.for.Oral.Diabetes.Medications"=52,
"Part.D.Medication.Adherence.for.Hypertension..RAS.antagonists."=53,
"Part.D.Medication.Adherence.for.Cholesterol..Statins."=54,
"Special Needs Plan (SNP) Care Management"=55),selectize = TRUE )
),
mainPanel(
tabsetPanel(
tabPanel("Contract Details",
conditionalPanel(
condition = "input.Input1 == 'PART C'",
DT::dataTableOutput('contents2'),
plotOutput("contents5",width = "25%")),
conditionalPanel(
condition = "input.Input1 == 'PART D'",
DT::dataTableOutput('contents3'),
plotOutput("contents6",width = "25%")),
conditionalPanel(
condition = "input.Input1 == 'MA PD'",
DT::dataTableOutput('contents4'),
plotOutput("contents7"))),
tabPanel("Measures Rating visualization",
conditionalPanel(
condition = "input.Input1 == 'PART C'",
scatterD3Output("phonePlot1", height = "700px")),
conditionalPanel(
condition = "input.Input1 == 'PART D'",
scatterD3Output("phonePlot2", height = "700px")),
conditionalPanel(
condition = "input.Input1 == 'MA PD'",
scatterD3Output("phonePlot3", height = "700px"))),
tabPanel("Measures Raw Score Visualization",
conditionalPanel(
condition = "input.Input1 == 'PART C'",
plotlyOutput("phonePlot4")),
conditionalPanel(
condition = "input.Input1 == 'PART D'",
plotlyOutput("phonePlot5")),
conditionalPanel(
condition = "input.Input1 == 'MA PD'",
plotlyOutput("phonePlot6"))
)
)
)
)
)
)
|
library(rstan)
library(ggplot2)
util = new.env()
source("stan_utility.R", local = util)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
#############
## Scope out your problem
#############
df = read.csv("data.csv")
head(df)
#############
## Model!
#############
## 1. Create a new Stan file with your proposed model. We'll start with pooled.stan
#############
## Check the model with fake data! (skip if short on time)
## 1. Create a new Stan program (or use R, or some other language) and
## generate fake data according to the data-generating process described by
## your model.
## 2. Follow the rest of this workflow with your fake data!
#############
# To use Stan to generate fake data, we use the `generated quantities` block
# and the 'Fixed_param' algorithm. The following snippet will generate
# 200 fake data points.
# gen.model = stan_model("gen.stan")
# gen.data = sampling(data=d, algorithm='Fixed_param', warmup=0, iter=200)
# gendf = as.data.frame(gen.data)
# dataset.1 = gendf[1,]
#############
## Fit the model
#############
model = stan_model("../pooled.stan")
fit = sampling(model, list(log_radon=df$log_radon,
basement = df$basement,
N=nrow(df)))
#############
## Check diagnostics
#############
util$check_all_diagnostics(fit)
#############
## Check & graph fit
#############
print(fit)
sample = extract(fit)
# This line just plots our data by basement indicator
ggplot(df, aes(basement, log_radon)) + geom_count() +
# This next snippet will select every 10th posterior sample and plot the
# fit lines corresponding to that sample. It's a good way to get a sense of
# the uncertainty / distribution of the posterior
sapply(seq(1, nrow(sample$alpha), 10), function(i) {
geom_abline(intercept = sample$alpha[i],
slope = sample$basement_effect[i], alpha=0.03)
})
## This snippet makes the unpooled and hierarchical fit checking graph
## This will fail if you try to use it on the pooled model with a single alpha,
## so I'm starting with it commented out.
# estimates = apply(sample$a, 2, mean)
# sd = apply(sample$a, 2, sd)
# fit.df = data.frame(estimates, 1:85,
# estimates + 2*sd,
# estimates - 2*sd,
# sd)
# names(fit.df) = c("a", "county", "upper", "lower", "sd")
# fit.df = fit.df[order(fit.df$a),]
#
# ggplot(fit.df, aes(x=1:85, y=a)) + geom_pointrange(aes(ymin = lower, ymax = upper)) +
# ylim(-1.1, 3.5)
#############
## Check PPCs
#############
# Plot a single house's posterior predictive distribution (house 919 here)
ppcs = as.data.frame(sample$yppc)
ggplot(ppcs) + geom_density(aes(V919)) + geom_vline(xintercept=df$log_radon[919])
# Plot an entire replication's distribution against the actual data's distribution
rep = 2000
ppcs = data.frame(log_radon = df$log_radon,
model = sample$yppc[rep,])
library(reshape2)
ppcs = reshape2::melt(ppcs)
ggplot(ppcs, aes(x=value, linetype=variable)) + geom_density(alpha=0.2)
# Shinystan!
#launch_shinystan(fit) | /workflow.R | permissive | dougmet/intro-bayesian-workflow | R | false | false | 3,112 | r | library(rstan)
library(ggplot2)
util = new.env()
source("stan_utility.R", local = util)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
#############
## Scope out your problem
#############
df = read.csv("data.csv")
head(df)
#############
## Model!
#############
## 1. Create a new Stan file with your proposed model. We'll start with pooled.stan
#############
## Check the model with fake data! (skip if short on time)
## 1. Create a new Stan program (or use R, or some other language) and
## generate fake data according to the data-generating process described by
## your model.
## 2. Follow the rest of this workflow with your fake data!
#############
# To use Stan to generate fake data, we use the `generated quantities` block
# and the 'Fixed_param' algorithm. The following snippet will generate
# 200 fake data points.
# gen.model = stan_model("gen.stan")
# gen.data = sampling(data=d, algorithm='Fixed_param', warmup=0, iter=200)
# gendf = as.data.frame(gen.data)
# dataset.1 = gendf[1,]
#############
## Fit the model
#############
model = stan_model("../pooled.stan")
fit = sampling(model, list(log_radon=df$log_radon,
basement = df$basement,
N=nrow(df)))
#############
## Check diagnostics
#############
util$check_all_diagnostics(fit)
#############
## Check & graph fit
#############
print(fit)
sample = extract(fit)
# This line just plots our data by basement indicator
ggplot(df, aes(basement, log_radon)) + geom_count() +
# This next snippet will select every 10th posterior sample and plot the
# fit lines corresponding to that sample. It's a good way to get a sense of
# the uncertainty / distribution of the posterior
sapply(seq(1, nrow(sample$alpha), 10), function(i) {
geom_abline(intercept = sample$alpha[i],
slope = sample$basement_effect[i], alpha=0.03)
})
## This snippet makes the unpooled and hierarchical fit checking graph
## This will fail if you try to use it on the pooled model with a single alpha,
## so I'm starting with it commented out.
# estimates = apply(sample$a, 2, mean)
# sd = apply(sample$a, 2, sd)
# fit.df = data.frame(estimates, 1:85,
# estimates + 2*sd,
# estimates - 2*sd,
# sd)
# names(fit.df) = c("a", "county", "upper", "lower", "sd")
# fit.df = fit.df[order(fit.df$a),]
#
# ggplot(fit.df, aes(x=1:85, y=a)) + geom_pointrange(aes(ymin = lower, ymax = upper)) +
# ylim(-1.1, 3.5)
#############
## Check PPCs
#############
# Plot a single house's posterior predictive distribution (house 919 here)
ppcs = as.data.frame(sample$yppc)
ggplot(ppcs) + geom_density(aes(V919)) + geom_vline(xintercept=df$log_radon[919])
# Plot an entire replication's distribution against the actual data's distribution
rep = 2000
ppcs = data.frame(log_radon = df$log_radon,
model = sample$yppc[rep,])
library(reshape2)
ppcs = reshape2::melt(ppcs)
ggplot(ppcs, aes(x=value, linetype=variable)) + geom_density(alpha=0.2)
# Shinystan!
#launch_shinystan(fit) |
# This function takes data as input and produced a network. It is used inside bootnet:
estimateNetwork <- function(
data,
prepFun, # Fun to produce the correlation or covariance matrix
prepArgs = list(), # list with arguments for the correlation function
estFun, # function that results in a network
estArgs # arguments sent to the graph estimation function (if missing automatically sample size is included)
){
# Compute input:
input <- do.call(prepFun, c(list(data), prepArgs))
# Compute network:
Res <- do.call(estFun, c(list(input),estArgs))
# Return network:
return(Res)
} | /bootnet/R/estimateNetwork.R | no_license | ingted/R-Examples | R | false | false | 605 | r | # This function takes data as input and produced a network. It is used inside bootnet:
estimateNetwork <- function(
data,
prepFun, # Fun to produce the correlation or covariance matrix
prepArgs = list(), # list with arguments for the correlation function
estFun, # function that results in a network
estArgs # arguments sent to the graph estimation function (if missing automatically sample size is included)
){
# Compute input:
input <- do.call(prepFun, c(list(data), prepArgs))
# Compute network:
Res <- do.call(estFun, c(list(input),estArgs))
# Return network:
return(Res)
} |
# Packages installer
options(repos = c("https://pbil.univ-lyon1.fr/CRAN/"))
cran_packages <- c('tidyverse', 'shiny', 'shinyWidgets', 'DT', 'ggpubr', 'reshape2', 'cowplot', 'shinydashboard', 'shinybusy')
new_cran_packages <- cran_packages[!(cran_packages %in% installed.packages()[,"Package"])]
if(length(new_cran_packages)) install.packages(new_cran_packages)
| /Source/install_packages.R | no_license | WANGDI0212/retinoblastoma-retina-markers | R | false | false | 363 | r | # Packages installer
options(repos = c("https://pbil.univ-lyon1.fr/CRAN/"))
cran_packages <- c('tidyverse', 'shiny', 'shinyWidgets', 'DT', 'ggpubr', 'reshape2', 'cowplot', 'shinydashboard', 'shinybusy')
new_cran_packages <- cran_packages[!(cran_packages %in% installed.packages()[,"Package"])]
if(length(new_cran_packages)) install.packages(new_cran_packages)
|
# Script to generate a normalized coverage vs. position along transcript plot.
#
# @author Tim Fennell
# Parse the arguments
args <- commandArgs(trailing = TRUE)
metricsFile <- args[1]
outputFile <- args[2]
bamName <- args[3]
subtitle <- ifelse(length(args) < 4, "", args[4])
# Figure out where the metrics and the histogram are in the file and parse them out
startFinder <- scan(metricsFile, what="character", sep="\n", quiet=TRUE, blank.lines.skip=FALSE)
firstBlankLine=0
for (i in 1:length(startFinder)) {
if (startFinder[i] == "") {
if (firstBlankLine==0) {
firstBlankLine=i+1
} else {
secondBlankLine=i+1
break
}
}
}
data <- read.table(metricsFile, header=T, sep="\t", skip=secondBlankLine, check.names=FALSE)
# The histogram has a normalized_position and normalized_coverage column for each metric "level"
# This code parses out the distinct levels so we can output one graph per level
headers <- sapply(sub(".normalized_coverage","",names(data),fixed=TRUE), "[[" ,1)
## Duplicated header names cause this to barf. KT & Yossi report that this is going to be extremely difficult to
## resolve and it's unlikely that anyone cares anyways. Trap this situation and avoid the PDF so it won't cause
## the workflow to fail
if (any(duplicated(headers))) {
print(paste("Not creating insert size PDF as there are duplicated header names:", headers[which(duplicated(headers))]))
} else {
pdf(outputFile)
levels <- c()
for (i in 2:length(headers)) {
if (!(headers[i] %in% levels)) {
levels[length(levels)+1] <- headers[i]
}
}
# Some constants that are used below
COLORS = c("royalblue", "#FFAAAA", "palegreen3");
# For each level, plot of the normalized coverage by GC
for (i in 1:length(levels)) {
# Reconstitutes the histogram column header for this level
nc <- paste(levels[i], "normalized_coverage", sep=".")
plot(x=data$normalized_position, y=as.matrix(data[nc]),
type="o",
xlab="Normalized Distance Along Transcript",
ylab="Normalized Coverage",
xlim=c(0, 100),
ylim=range(0, max(data[nc])),
col="royalblue",
main=paste("RNA-Seq Coverage vs. Transcript Position\n", levels[i], " ", ifelse(subtitle=="", "", paste("(", subtitle, ")", sep="")), "\nin file ", bamName,sep=""))
# Add a horizontal line at coverage=1
abline(h=1, col="lightgrey");
}
dev.off();
} | /src/main/resources/picard/analysis/rnaSeqCoverage.R | permissive | broadinstitute/picard | R | false | false | 2,603 | r | # Script to generate a normalized coverage vs. position along transcript plot.
#
# @author Tim Fennell
# Parse the arguments
args <- commandArgs(trailing = TRUE)
metricsFile <- args[1]
outputFile <- args[2]
bamName <- args[3]
subtitle <- ifelse(length(args) < 4, "", args[4])
# Figure out where the metrics and the histogram are in the file and parse them out
startFinder <- scan(metricsFile, what="character", sep="\n", quiet=TRUE, blank.lines.skip=FALSE)
firstBlankLine=0
for (i in 1:length(startFinder)) {
if (startFinder[i] == "") {
if (firstBlankLine==0) {
firstBlankLine=i+1
} else {
secondBlankLine=i+1
break
}
}
}
data <- read.table(metricsFile, header=T, sep="\t", skip=secondBlankLine, check.names=FALSE)
# The histogram has a normalized_position and normalized_coverage column for each metric "level"
# This code parses out the distinct levels so we can output one graph per level
headers <- sapply(sub(".normalized_coverage","",names(data),fixed=TRUE), "[[" ,1)
## Duplicated header names cause this to barf. KT & Yossi report that this is going to be extremely difficult to
## resolve and it's unlikely that anyone cares anyways. Trap this situation and avoid the PDF so it won't cause
## the workflow to fail
if (any(duplicated(headers))) {
print(paste("Not creating insert size PDF as there are duplicated header names:", headers[which(duplicated(headers))]))
} else {
pdf(outputFile)
levels <- c()
for (i in 2:length(headers)) {
if (!(headers[i] %in% levels)) {
levels[length(levels)+1] <- headers[i]
}
}
# Some constants that are used below
COLORS = c("royalblue", "#FFAAAA", "palegreen3");
# For each level, plot of the normalized coverage by GC
for (i in 1:length(levels)) {
# Reconstitutes the histogram column header for this level
nc <- paste(levels[i], "normalized_coverage", sep=".")
plot(x=data$normalized_position, y=as.matrix(data[nc]),
type="o",
xlab="Normalized Distance Along Transcript",
ylab="Normalized Coverage",
xlim=c(0, 100),
ylim=range(0, max(data[nc])),
col="royalblue",
main=paste("RNA-Seq Coverage vs. Transcript Position\n", levels[i], " ", ifelse(subtitle=="", "", paste("(", subtitle, ")", sep="")), "\nin file ", bamName,sep=""))
# Add a horizontal line at coverage=1
abline(h=1, col="lightgrey");
}
dev.off();
} |
prep.rstar.us <- function(){
#------------------------------------------------------------------------------#
# File: prepare.rstar.data.us.R
#
# Description: This file (1) compiles and (2) prepares the data used in
# HLW for the US.
#------------------------------------------------------------------------------#
Loc <- paste0(Root, "C:/FinMetricsProject/Code")
# Load time series library
if (!require("tis")) {install.packages("tis"); library('tis')}
#------------------------------------------------------------------------------#
# Get Raw Data
#------------------------------------------------------------------------------#
# Set the start and end dates of the data used in the estimation
data.start <- c(1960,1)
data.end <- c(2016,3)
# Import data using the function getFRED() in utilities.R
# If the connection does not work, try the old URL:
# "https://research.stlouisfed.org/fred2/data/GDPC1.txt"
# NOTE: The getFRED() function requires the wget command line utility;
# users can also manually download the text files.
gdp.us <- read.csv("C:/FinMetricsProject/rawData/GDPC1.csv")
price.index.us <- read.csv("C:/FinMetricsProject/rawData/PCEPILFE.csv")
ny.discount.us <- read.csv("C:/FinMetricsProject/rawData/INTDSRUSM193N.csv")
fed.funds.us <- read.csv("C:/FinMetricsProject/rawData/FEDFUNDS.csv")
#------------------------------------------------------------------------------#
# Prepare Data
#------------------------------------------------------------------------------#
# Take log of real GDP
gdp.log <- log(gdp.us$GDPC1)
gdp.log <- cbind(gdp.us, gdp.log)
gdp.log <- gdp.log[-2]
# Create an annualized inflation series using the price index
# First convert data.frame to time series object
price.index.us$DATE <- as.Date(as.character(price.index.us$DATE),format="%Y-%m-%d")
price.index <- xts(price.index.us, order.by = as.POSIXct(price.index.us$DATE))
#Calculate inflation
inflation <- 400*log(price.index.us/lag(price.index.us,k=12))
# Inflation expectations measure: 4-quarter moving average of past inflation
inflation.expectations <- (inflation + lagpad(inflation,1) + lagpad(inflation,2) + Llagpad(inflation,3))/4
# Express interest rate data on a 365-day basis
ny.discount.eff <- 100*((1+ny.discount.us/36000)^365 -1)
fed.funds.eff <- 100*((1+fed.funds.us/36000)^365 -1)
# NY Fed discount rate is used prior to 1965; thereafter, use the effective federal funds rate
interest <- mergeSeries(window(ny.discount.eff, end = c(1964,4)),window(fed.funds.eff, start = c(1965,1)))
#------------------------------------------------------------------------------#
# Output Data
#------------------------------------------------------------------------------#
data.out <- window(cbind(gdp.log, inflation, inflation.expectations, interest),start = data.start, end = data.end)
write.table(data.out,file = 'inputData/rstar.data.us.csv', sep = ',',
col.names = TRUE, quote = FALSE, na = '.', row.names = FALSE)
}
| /Code/prepare.rstar.data.us.R | no_license | acemacdonald/FinMetricsProject | R | false | false | 3,015 | r | prep.rstar.us <- function(){
#------------------------------------------------------------------------------#
# File: prepare.rstar.data.us.R
#
# Description: This file (1) compiles and (2) prepares the data used in
# HLW for the US.
#------------------------------------------------------------------------------#
Loc <- paste0(Root, "C:/FinMetricsProject/Code")
# Load time series library
if (!require("tis")) {install.packages("tis"); library('tis')}
#------------------------------------------------------------------------------#
# Get Raw Data
#------------------------------------------------------------------------------#
# Set the start and end dates of the data used in the estimation
data.start <- c(1960,1)
data.end <- c(2016,3)
# Import data using the function getFRED() in utilities.R
# If the connection does not work, try the old URL:
# "https://research.stlouisfed.org/fred2/data/GDPC1.txt"
# NOTE: The getFRED() function requires the wget command line utility;
# users can also manually download the text files.
gdp.us <- read.csv("C:/FinMetricsProject/rawData/GDPC1.csv")
price.index.us <- read.csv("C:/FinMetricsProject/rawData/PCEPILFE.csv")
ny.discount.us <- read.csv("C:/FinMetricsProject/rawData/INTDSRUSM193N.csv")
fed.funds.us <- read.csv("C:/FinMetricsProject/rawData/FEDFUNDS.csv")
#------------------------------------------------------------------------------#
# Prepare Data
#------------------------------------------------------------------------------#
# Take log of real GDP
gdp.log <- log(gdp.us$GDPC1)
gdp.log <- cbind(gdp.us, gdp.log)
gdp.log <- gdp.log[-2]
# Create an annualized inflation series using the price index
# First convert data.frame to time series object
price.index.us$DATE <- as.Date(as.character(price.index.us$DATE),format="%Y-%m-%d")
price.index <- xts(price.index.us, order.by = as.POSIXct(price.index.us$DATE))
#Calculate inflation
inflation <- 400*log(price.index.us/lag(price.index.us,k=12))
# Inflation expectations measure: 4-quarter moving average of past inflation
inflation.expectations <- (inflation + lagpad(inflation,1) + lagpad(inflation,2) + Llagpad(inflation,3))/4
# Express interest rate data on a 365-day basis
ny.discount.eff <- 100*((1+ny.discount.us/36000)^365 -1)
fed.funds.eff <- 100*((1+fed.funds.us/36000)^365 -1)
# NY Fed discount rate is used prior to 1965; thereafter, use the effective federal funds rate
interest <- mergeSeries(window(ny.discount.eff, end = c(1964,4)),window(fed.funds.eff, start = c(1965,1)))
#------------------------------------------------------------------------------#
# Output Data
#------------------------------------------------------------------------------#
data.out <- window(cbind(gdp.log, inflation, inflation.expectations, interest),start = data.start, end = data.end)
write.table(data.out,file = 'inputData/rstar.data.us.csv', sep = ',',
col.names = TRUE, quote = FALSE, na = '.', row.names = FALSE)
}
|
is.installed <- function(mypkg){
is.element(mypkg, installed.packages()[,1])
}
library(rkafka1.0)
producer1=rkafka.startProducer("127.0.0.1:9092")
condition=rJava::.jinstanceof(producer1,"com/musigma/producer/MuProducer")
RUnit::checkTrue(condition,"check1")
simpleConsumer1=rkafka.createSimpleConsumer("127.0.0.1","9092","10000","10000","test")
condition=rJava::.jinstanceof(simpleConsumer1,"com/musigma/consumer/MuSimpleConsumer")
RUnit::checkTrue(condition,"check2")
| /src/tests/unitTests.R | no_license | zamaterian/rkafka | R | false | false | 478 | r | is.installed <- function(mypkg){
is.element(mypkg, installed.packages()[,1])
}
library(rkafka1.0)
producer1=rkafka.startProducer("127.0.0.1:9092")
condition=rJava::.jinstanceof(producer1,"com/musigma/producer/MuProducer")
RUnit::checkTrue(condition,"check1")
simpleConsumer1=rkafka.createSimpleConsumer("127.0.0.1","9092","10000","10000","test")
condition=rJava::.jinstanceof(simpleConsumer1,"com/musigma/consumer/MuSimpleConsumer")
RUnit::checkTrue(condition,"check2")
|
testlist <- list(x = c(1277774848L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result) | /diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609962876-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 309 | r | testlist <- list(x = c(1277774848L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result) |
## Cachematrix and its helper function make CacheMatrix calculate the inverse of a given
## matrix, either retrieving an already calculated results from cache, or making the
## calculation for the first time and saving the result to cache
## makeCacheMatrix takes a matrix as an input and returns a list of 4 functions
## usage e.g.:
## m = matrix(c(2,4,3,4,4,9,2,1,3),nrow=3)
## m1 <- makeCacheMatrix(m)
## OBS.: for some reason, m <- makeCacheMatrix(m) does not work
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinv <- function(inv) i <<- inv
getinv <- function() i
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve takes a matrix as an input and returns its inverse, either from cache,
## if possible, or calculating the inverse for the first time and saving it to cache
## usage e.g. cacheSolve(m1)
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinv()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinv(i)
i
}
| /cachematrix.R | no_license | rrrafi/ProgrammingAssignment2 | R | false | false | 1,333 | r | ## Cachematrix and its helper function make CacheMatrix calculate the inverse of a given
## matrix, either retrieving an already calculated results from cache, or making the
## calculation for the first time and saving the result to cache
## makeCacheMatrix takes a matrix as an input and returns a list of 4 functions
## usage e.g.:
## m = matrix(c(2,4,3,4,4,9,2,1,3),nrow=3)
## m1 <- makeCacheMatrix(m)
## OBS.: for some reason, m <- makeCacheMatrix(m) does not work
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinv <- function(inv) i <<- inv
getinv <- function() i
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve takes a matrix as an input and returns its inverse, either from cache,
## if possible, or calculating the inverse for the first time and saving it to cache
## usage e.g. cacheSolve(m1)
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinv()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinv(i)
i
}
|
library(datarobot)
### Name: GetModelRecommendation
### Title: Retrieve a model recommendedation from DataRobot for your
### project.
### Aliases: GetModelRecommendation
### ** Examples
## Not run:
##D projectId <- "5984b4d7100d2b31c1166529"
##D GetModelRecommendation(projectId)
## End(Not run)
| /data/genthat_extracted_code/datarobot/examples/GetModelRecommendation.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 310 | r | library(datarobot)
### Name: GetModelRecommendation
### Title: Retrieve a model recommendedation from DataRobot for your
### project.
### Aliases: GetModelRecommendation
### ** Examples
## Not run:
##D projectId <- "5984b4d7100d2b31c1166529"
##D GetModelRecommendation(projectId)
## End(Not run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeScatterPlots.R
\name{qualityScatter}
\alias{qualityScatter}
\title{plots a scatter of the VAF between two samples}
\usage{
qualityScatter(q1, q2, ps = NA, covScale = 100, maxCex = 1.5,
minCov = 10, main = "", xlab = "variant frequency: sample1",
ylab = "variant frequency: sample2", plotFlagged = T, cpus = 1,
verbose = T, print = F, printRedCut = 0.99, printOnlyNovel = F,
plotPosition = F, genome = "hg19", xlim = c(0, 1), ylim = c(0,
1), outputHighlighted = F, frame.plot = F, legend = T,
redCut = 0.75, forceCol = NA, add = F, GoI = c(), printCex = 1,
doPlot = T, minSomaticP = 0, ignoreFlagsInOne = c("Svr", "Mv",
"Nab"), ignoreFlagsInBoth = c("Srr"), flagOpacity = 0.4,
severityWidth = 0.5, cosmicWidth = 3, ...)
}
\arguments{
\item{q1}{data.frame. The variants of the x sample, taken from the loaded data$allVariants$variants$variants$mySample}
\item{q2}{data.frame. The variants of the y sample, taken from the loaded data$allVariants$variants$variants$mySample}
\item{ps}{numeric. p-values of the variants. Default NA calculates them, but supplying them directly can save time.}
\item{covScale}{numeric. The coverage where the point size is one. Larger coverage scale will make the points smaller. Default 100.}
\item{maxCex}{numeric. Cutoff for maximum point size. Default 1.5.}
\item{minCov}{numeric. Variants with coverage below this level are not plotted.}
\item{plotFlagged}{logical. If flagged variants shouldbe plotted. Default T.}
\item{cpus}{numeric. The maximum number of cpus to run on.}
\item{verbose}{logical. If some diagnostic outputs should be printed to terminal. Default T.}
\item{print}{logical. If gene names of significantly different VAFs should be printed on the plot. Default F.}
\item{printRedCut}{numeric. The redness (0 to 1) above which gene names are printed if print is TRUE. Default 0.99.}
\item{printOnlyNovel}{logical. Only print gene names if the VAF is small in one sample if print is TRUE. Default FALSE.}
\item{plotPosition}{logical. Show the genomic position of each variant with a thin line to the top of the plot, as well as colour coding. Default FALSE.}
\item{genome}{character. The genome aligned to. Default 'hg19'.}
\item{outputHighlighted}{logical. Prints the printed genes to terminal. Default FALSE.}
\item{legend}{logical. If the legend should be included. Default TRUE.}
\item{redCut}{numeric. Sets how significantly different the VAFs have to be fo the dot to be coloured red. Default 0.75.}
\item{forceCol}{colour. Forces all the dots to this colour. Default NA does colour by significance.}
\item{add}{logical. If TRUE, the dots are added to the existing plot. Default FALSE.}
\item{GoI}{character. vector of genes of interest that always get their gene name printed on the plot. Default c().}
\item{printCex}{numeric. A scaling factor for the size of the printed gene names. Default 1.}
\item{doPlot}{numeric. If FALSE, the plot isn't made, but p values are returned. Default TRUE.}
\item{minSomaticP}{numeric. Variants with a somaticP below this cut in both samples are excluded. Default 0 includes all points.}
\item{ignoreFlagsInOne}{character. Variants with this flag in one (but not both) sample are plotted even if plotFlagged is FALSE. Default c('Svr', 'Mv', 'Nab').}
\item{ignoreFlagsInBoth}{character. Variants with this flag are plotted even if plotFlagged is FALSE. Default c('Srr').}
\item{flagOpacity}{numeric. The opacity of the flagged variants. Default 0.4.}
\item{severityWidth}{numeric. A scaling factor for the size of the orange circle for protein altering SNVs. Default 0.5.}
\item{cosmicWidth}{numeric. A scaling factor for the size of the green circle around COSMIC census genes. Default 3.}
\item{...}{remaining arguments are passed to plot(...)}
}
\description{
plots a scatter of the VAF between two samples
}
\details{
This function is a wrapped for the base heatmap() function. It got nicer default colours, doesn't normalise rows or columns by deafult, and has some support for differential expression data. Also prints a colour scale at the side.
}
\examples{
#random matrix to plot, centered around 0. Plot in 'DE' colours.
mx = matrix(rt(400, df=10), nrow=100)
makeHeatmap(mx, col='DE')
#random matrix to plot, between 0 and 1. Plot in default and sunset colours.
mx = matrix(runif(400), nrow=100)
makeHeatmap(mx)
makeHeatmap(mx, col='sunset')
}
| /man/qualityScatter.Rd | permissive | shulp2211/superFreq | R | false | true | 4,471 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeScatterPlots.R
\name{qualityScatter}
\alias{qualityScatter}
\title{plots a scatter of the VAF between two samples}
\usage{
qualityScatter(q1, q2, ps = NA, covScale = 100, maxCex = 1.5,
minCov = 10, main = "", xlab = "variant frequency: sample1",
ylab = "variant frequency: sample2", plotFlagged = T, cpus = 1,
verbose = T, print = F, printRedCut = 0.99, printOnlyNovel = F,
plotPosition = F, genome = "hg19", xlim = c(0, 1), ylim = c(0,
1), outputHighlighted = F, frame.plot = F, legend = T,
redCut = 0.75, forceCol = NA, add = F, GoI = c(), printCex = 1,
doPlot = T, minSomaticP = 0, ignoreFlagsInOne = c("Svr", "Mv",
"Nab"), ignoreFlagsInBoth = c("Srr"), flagOpacity = 0.4,
severityWidth = 0.5, cosmicWidth = 3, ...)
}
\arguments{
\item{q1}{data.frame. The variants of the x sample, taken from the loaded data$allVariants$variants$variants$mySample}
\item{q2}{data.frame. The variants of the y sample, taken from the loaded data$allVariants$variants$variants$mySample}
\item{ps}{numeric. p-values of the variants. Default NA calculates them, but supplying them directly can save time.}
\item{covScale}{numeric. The coverage where the point size is one. Larger coverage scale will make the points smaller. Default 100.}
\item{maxCex}{numeric. Cutoff for maximum point size. Default 1.5.}
\item{minCov}{numeric. Variants with coverage below this level are not plotted.}
\item{plotFlagged}{logical. If flagged variants shouldbe plotted. Default T.}
\item{cpus}{numeric. The maximum number of cpus to run on.}
\item{verbose}{logical. If some diagnostic outputs should be printed to terminal. Default T.}
\item{print}{logical. If gene names of significantly different VAFs should be printed on the plot. Default F.}
\item{printRedCut}{numeric. The redness (0 to 1) above which gene names are printed if print is TRUE. Default 0.99.}
\item{printOnlyNovel}{logical. Only print gene names if the VAF is small in one sample if print is TRUE. Default FALSE.}
\item{plotPosition}{logical. Show the genomic position of each variant with a thin line to the top of the plot, as well as colour coding. Default FALSE.}
\item{genome}{character. The genome aligned to. Default 'hg19'.}
\item{outputHighlighted}{logical. Prints the printed genes to terminal. Default FALSE.}
\item{legend}{logical. If the legend should be included. Default TRUE.}
\item{redCut}{numeric. Sets how significantly different the VAFs have to be fo the dot to be coloured red. Default 0.75.}
\item{forceCol}{colour. Forces all the dots to this colour. Default NA does colour by significance.}
\item{add}{logical. If TRUE, the dots are added to the existing plot. Default FALSE.}
\item{GoI}{character. vector of genes of interest that always get their gene name printed on the plot. Default c().}
\item{printCex}{numeric. A scaling factor for the size of the printed gene names. Default 1.}
\item{doPlot}{numeric. If FALSE, the plot isn't made, but p values are returned. Default TRUE.}
\item{minSomaticP}{numeric. Variants with a somaticP below this cut in both samples are excluded. Default 0 includes all points.}
\item{ignoreFlagsInOne}{character. Variants with this flag in one (but not both) sample are plotted even if plotFlagged is FALSE. Default c('Svr', 'Mv', 'Nab').}
\item{ignoreFlagsInBoth}{character. Variants with this flag are plotted even if plotFlagged is FALSE. Default c('Srr').}
\item{flagOpacity}{numeric. The opacity of the flagged variants. Default 0.4.}
\item{severityWidth}{numeric. A scaling factor for the size of the orange circle for protein altering SNVs. Default 0.5.}
\item{cosmicWidth}{numeric. A scaling factor for the size of the green circle around COSMIC census genes. Default 3.}
\item{...}{remaining arguments are passed to plot(...)}
}
\description{
plots a scatter of the VAF between two samples
}
\details{
This function is a wrapped for the base heatmap() function. It got nicer default colours, doesn't normalise rows or columns by deafult, and has some support for differential expression data. Also prints a colour scale at the side.
}
\examples{
#random matrix to plot, centered around 0. Plot in 'DE' colours.
mx = matrix(rt(400, df=10), nrow=100)
makeHeatmap(mx, col='DE')
#random matrix to plot, between 0 and 1. Plot in default and sunset colours.
mx = matrix(runif(400), nrow=100)
makeHeatmap(mx)
makeHeatmap(mx, col='sunset')
}
|
library("org.Gg.eg.db")
library("org.Hs.eg.db")
degenes.gg.table<-read.table('gallus/gimme/line7u_vs_i.cuffref.tophits.nucl.txt',
stringsAsFactors=F, sep="\t", header=T)
ggannots<-select(org.Gg.eg.db,
keys=degenes.table$geneID,
columns=c("SYMBOL","ENTREZID", "PATH"),
keytype="ENSEMBL")
degenes.hs.table<-read.table('human/gimme/line7u_vs_i.cuffref.tophits.nucl.txt',
stringsAsFactors=F, sep="\t", header=T)
hsannots<-select(org.Hs.eg.db,
keys=degenes.table$geneID,
columns=c("SYMBOL","ENTREZID", "PATH"),
keytype="ENSEMBL")
write.table(ggannots,
'human_gallus/line7u_vs_i.degenes.cuffref.tophits.nucl.gg.annots.txt',
sep='\t', quote=F, row.names=F, col.names=F)
write.table(hsannots,
'human_gallus/line7u_vs_i.degenes.cuffref.tophits.nucl.hs.annots.txt',
sep='\t', quote=F, row.names=F, col.names=F)
write.table(data.frame(rownames(degenes.gg.table), degenes.gg.table$geneID),
'human_gallus/line7u_vs_i.degenes.cuffref.tophits.nucl.gg.txt',
sep='\t', quote=F, row.names=F, col.names=F)
write.table(data.frame(rownames(degenes.hs.table), degenes.hs.table$geneID),
'human_gallus/line7u_vs_i.degenes.cuffref.tophits.nucl.hs.txt',
sep='\t', quote=F, row.names=F, col.names=F) | /combine_kegg_annots.R | no_license | likit/RNASeq-methods-comparison | R | false | false | 1,417 | r | library("org.Gg.eg.db")
library("org.Hs.eg.db")
degenes.gg.table<-read.table('gallus/gimme/line7u_vs_i.cuffref.tophits.nucl.txt',
stringsAsFactors=F, sep="\t", header=T)
ggannots<-select(org.Gg.eg.db,
keys=degenes.table$geneID,
columns=c("SYMBOL","ENTREZID", "PATH"),
keytype="ENSEMBL")
degenes.hs.table<-read.table('human/gimme/line7u_vs_i.cuffref.tophits.nucl.txt',
stringsAsFactors=F, sep="\t", header=T)
hsannots<-select(org.Hs.eg.db,
keys=degenes.table$geneID,
columns=c("SYMBOL","ENTREZID", "PATH"),
keytype="ENSEMBL")
write.table(ggannots,
'human_gallus/line7u_vs_i.degenes.cuffref.tophits.nucl.gg.annots.txt',
sep='\t', quote=F, row.names=F, col.names=F)
write.table(hsannots,
'human_gallus/line7u_vs_i.degenes.cuffref.tophits.nucl.hs.annots.txt',
sep='\t', quote=F, row.names=F, col.names=F)
write.table(data.frame(rownames(degenes.gg.table), degenes.gg.table$geneID),
'human_gallus/line7u_vs_i.degenes.cuffref.tophits.nucl.gg.txt',
sep='\t', quote=F, row.names=F, col.names=F)
write.table(data.frame(rownames(degenes.hs.table), degenes.hs.table$geneID),
'human_gallus/line7u_vs_i.degenes.cuffref.tophits.nucl.hs.txt',
sep='\t', quote=F, row.names=F, col.names=F) |
#' Table 7_12
#'
#' Real Consumption Expenditure, Real Income, Real Wealth, and Real Interest Rates for the U.S., 1947 to 2000
#' Sources: C, Yd, and quarterly and annual chain-type price indexes (1996 = 100): Bureau of Economic Analysis, U.S. Department of Commerce (http://www.bea.doc.gov/bea/dn1.htm). Nominal annual yield on 3-month Treasury securities: Economic Report of the President, 2002. Nominal wealth = end-ofyear nominal net worth of households and nonprofits (from Federal Reserve flow of funds data: http://www. federalreserve.gov).
#'
#' @docType data
#' @usage data('Table7_12')
#' @format
#' \itemize{
#' \item \strong{Year}
#' \item \strong{C: }real consumption expenditures in billions of chained 1996 dollars.
#' \item \strong{Yd: } real personal disposable income in billions of chained 1996 dollars
#' \item \strong{wealth: } real wealth in billions of chained 1996 dollars
#' \item \strong{interest: }nominal annual yield on 3-month Treasury securities–inflation rate (measured by the annual % change in annual chained price index).
#' }
'Table7_12'
| /R/table7_12.R | no_license | brunoruas2/gujarati | R | false | false | 1,077 | r | #' Table 7_12
#'
#' Real Consumption Expenditure, Real Income, Real Wealth, and Real Interest Rates for the U.S., 1947 to 2000
#' Sources: C, Yd, and quarterly and annual chain-type price indexes (1996 = 100): Bureau of Economic Analysis, U.S. Department of Commerce (http://www.bea.doc.gov/bea/dn1.htm). Nominal annual yield on 3-month Treasury securities: Economic Report of the President, 2002. Nominal wealth = end-ofyear nominal net worth of households and nonprofits (from Federal Reserve flow of funds data: http://www. federalreserve.gov).
#'
#' @docType data
#' @usage data('Table7_12')
#' @format
#' \itemize{
#' \item \strong{Year}
#' \item \strong{C: }real consumption expenditures in billions of chained 1996 dollars.
#' \item \strong{Yd: } real personal disposable income in billions of chained 1996 dollars
#' \item \strong{wealth: } real wealth in billions of chained 1996 dollars
#' \item \strong{interest: }nominal annual yield on 3-month Treasury securities–inflation rate (measured by the annual % change in annual chained price index).
#' }
'Table7_12'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.