content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
epc.df <- read.delim("household_power_consumption.txt", sep=";", na.strings="?",as.is=TRUE)
epc.df <- epc.df[ epc.df$Date=="1/2/2007" | epc.df$Date=="2/2/2007",]
epc.df$Date_Time <- strptime( paste( epc.df$Date, epc.df$Time), format="%d/%m/%Y %H:%M:%S")
png("plot2.png")
with( epc.df, plot(Date_Time, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)"))
dev.off()
| /plot2.R | no_license | RonWilkinson/ExData_Plotting1 | R | false | false | 404 | r | epc.df <- read.delim("household_power_consumption.txt", sep=";", na.strings="?",as.is=TRUE)
epc.df <- epc.df[ epc.df$Date=="1/2/2007" | epc.df$Date=="2/2/2007",]
epc.df$Date_Time <- strptime( paste( epc.df$Date, epc.df$Time), format="%d/%m/%Y %H:%M:%S")
png("plot2.png")
with( epc.df, plot(Date_Time, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)"))
dev.off()
|
# calculates model averages for sloop data using
# data held in memory
sloop_model_average <- function (start_year) {
suppressMessages(conflict_prefer("filter", "dplyr"))
suppressMessages(conflict_prefer("here", "here"))
ModSet <- ModList
# reading intervals
intervals.ch <- scan(paste0(site_species, "_surveys.txt"), nlines=1)
intervals.ch[intervals.ch ==0] <- NA
intervals <- na.omit(intervals.ch) %>% round(1)
year <- round(c(start_year, cumsum(intervals) + start_year),0)
# separate encounter history into yearly sessions
sep.history <- input.ch[1] %>%
separate(ch, into = str_c("Session", 1:length(sec.periods)),
sep=cumsum(sec.periods) )
sep.history[] <- sapply(sep.history, as.numeric)
sep.history[sep.history==0] <- NA
raw.counts <- colSums(!is.na(sep.history))
# count known individual captured - normally known as 'Mt+1'
Mt1 <- colSums(!is.na(sep.history))
# create matrix for estimates
estm <- matrix(0, ncol=length(ModSet[[1]]$results$derived$`N Population Size`$estimate),
nrow=nrow(ModSet$model.table))
# extract models weights
wt <- ModSet$model.table$weight
# create empty list for vcv matrices
vcv <- vector("list", length=nrow(ModSet$model.table))
# loop over each model
for(i in 1:nrow(ModSet$model.table)){
mod.num <- as.numeric(row.names(ModSet$model.table))
x <- ModSet[[mod.num[i]]]$results
estm[i, ] <- x$derived$`N Population Size`$estimate
temp <- x$derived.vcv
vcv[[i]] <- as.matrix(temp$'N Population Size')
}
# if have NaN in vcv matrix, model.average will error.
# can change NaN's to zeros using rapply
vcv <- rapply(vcv, f=function(x) ifelse(is.nan(x), 0, x), how="replace")
# model.average function with what extracted in loop
mod.ave <- model.average(list(estimate=estm, weight=wt, vcv=vcv))
mod.ave$vcv <- NULL
estimate <- mod.ave$estimate
se <- mod.ave$se
# correction for Mt+1
f0 <- mod.ave$estimate-Mt1
C <- exp(1.96*sqrt(log(1+(mod.ave$se/f0) ^2)))
lcl <- Mt1+f0/C
ucl <- Mt1+f0*C
average.results <- data.frame(estimate, se, lcl, ucl, Mt1)
average.results$year <- year
# top model
top.model <- str_remove(as.character(AICctable[1,1]),paste0(site_species,"."))
# rename Mt1
average.results <- average.results %>% rename(individuals = Mt1)
# manual colours for legend
# colors <- c("model average" = "black", "no. individuals" = "forestgreen")
colors <- c( "top model" = "purple", "model average" = "black",
"no. individuals" = "forestgreen")
shapes <- c("top model" = 16, "model average" = 16, "no. individuals" = 10)
ave.graph <<- ggplot() +
geom_smooth(data = top.estimates, aes(x=year, y=estimate), fill=NA, colour="red", linetype="dotted")+
geom_point(data = top.estimates, aes(x=year -0.1, y=estimate, colour="top model",
shape ="top model"), size=3)+
geom_errorbar(data = top.estimates, aes(x=year -0.1, ymin=lcl,
ymax=ucl, colour="top model"),
width=0.08)+
geom_point(data = average.results, aes(x=year , y=Mt1, colour="no. individuals",
shape ="no. individuals"), size=3)+
geom_point(data = average.results, aes(x=year , y=estimate, colour="model average",
shape = "model average"),
size=3)+
geom_errorbar(data = average.results, aes(x=year , ymin=lcl,
ymax=ucl, colour="model average"),
width=0.08)+
ggtitle(site_species)+
labs(subtitle = paste("Model average vs", top.model, "(top-ranked model)"))+
scale_shape_manual(values = shapes )+
scale_color_manual(values = colors)+
theme(plot.subtitle = element_text(face=3, size=10))+
labs(color = "Legend", shape = "Legend")+
guides(color = guide_legend(override.aes = list(linetype = 0)))+
scale_y_continuous(breaks = pretty_breaks(10))+
scale_x_continuous(breaks = seq(from=min(top.estimates$year)-1,
to=max(top.estimates$year), by =1))+
xlab("\nYear")+
ylab("Estimate\n")
# outputs to return
outputs <- list(average.results, ave.graph)
# data analysis and estimator type
average.results$analysis <- "robust design"
average.results$estimator <- "model average"
average.results$site_species <- site_species
# write files to folder
ggsave(paste0(site_species, "_average_robust", ".png"), width=10, height =10, units ="cm", scale =2)
write.csv(average.results, paste0(site_species, "_average_robust", ".csv"), row.names = FALSE)
return(outputs)
}
| /R/sloop_model_average.R | no_license | NathanWhitmore/GAOSrmark | R | false | false | 4,730 | r | # calculates model averages for sloop data using
# data held in memory
sloop_model_average <- function (start_year) {
suppressMessages(conflict_prefer("filter", "dplyr"))
suppressMessages(conflict_prefer("here", "here"))
ModSet <- ModList
# reading intervals
intervals.ch <- scan(paste0(site_species, "_surveys.txt"), nlines=1)
intervals.ch[intervals.ch ==0] <- NA
intervals <- na.omit(intervals.ch) %>% round(1)
year <- round(c(start_year, cumsum(intervals) + start_year),0)
# separate encounter history into yearly sessions
sep.history <- input.ch[1] %>%
separate(ch, into = str_c("Session", 1:length(sec.periods)),
sep=cumsum(sec.periods) )
sep.history[] <- sapply(sep.history, as.numeric)
sep.history[sep.history==0] <- NA
raw.counts <- colSums(!is.na(sep.history))
# count known individual captured - normally known as 'Mt+1'
Mt1 <- colSums(!is.na(sep.history))
# create matrix for estimates
estm <- matrix(0, ncol=length(ModSet[[1]]$results$derived$`N Population Size`$estimate),
nrow=nrow(ModSet$model.table))
# extract models weights
wt <- ModSet$model.table$weight
# create empty list for vcv matrices
vcv <- vector("list", length=nrow(ModSet$model.table))
# loop over each model
for(i in 1:nrow(ModSet$model.table)){
mod.num <- as.numeric(row.names(ModSet$model.table))
x <- ModSet[[mod.num[i]]]$results
estm[i, ] <- x$derived$`N Population Size`$estimate
temp <- x$derived.vcv
vcv[[i]] <- as.matrix(temp$'N Population Size')
}
# if have NaN in vcv matrix, model.average will error.
# can change NaN's to zeros using rapply
vcv <- rapply(vcv, f=function(x) ifelse(is.nan(x), 0, x), how="replace")
# model.average function with what extracted in loop
mod.ave <- model.average(list(estimate=estm, weight=wt, vcv=vcv))
mod.ave$vcv <- NULL
estimate <- mod.ave$estimate
se <- mod.ave$se
# correction for Mt+1
f0 <- mod.ave$estimate-Mt1
C <- exp(1.96*sqrt(log(1+(mod.ave$se/f0) ^2)))
lcl <- Mt1+f0/C
ucl <- Mt1+f0*C
average.results <- data.frame(estimate, se, lcl, ucl, Mt1)
average.results$year <- year
# top model
top.model <- str_remove(as.character(AICctable[1,1]),paste0(site_species,"."))
# rename Mt1
average.results <- average.results %>% rename(individuals = Mt1)
# manual colours for legend
# colors <- c("model average" = "black", "no. individuals" = "forestgreen")
colors <- c( "top model" = "purple", "model average" = "black",
"no. individuals" = "forestgreen")
shapes <- c("top model" = 16, "model average" = 16, "no. individuals" = 10)
ave.graph <<- ggplot() +
geom_smooth(data = top.estimates, aes(x=year, y=estimate), fill=NA, colour="red", linetype="dotted")+
geom_point(data = top.estimates, aes(x=year -0.1, y=estimate, colour="top model",
shape ="top model"), size=3)+
geom_errorbar(data = top.estimates, aes(x=year -0.1, ymin=lcl,
ymax=ucl, colour="top model"),
width=0.08)+
geom_point(data = average.results, aes(x=year , y=Mt1, colour="no. individuals",
shape ="no. individuals"), size=3)+
geom_point(data = average.results, aes(x=year , y=estimate, colour="model average",
shape = "model average"),
size=3)+
geom_errorbar(data = average.results, aes(x=year , ymin=lcl,
ymax=ucl, colour="model average"),
width=0.08)+
ggtitle(site_species)+
labs(subtitle = paste("Model average vs", top.model, "(top-ranked model)"))+
scale_shape_manual(values = shapes )+
scale_color_manual(values = colors)+
theme(plot.subtitle = element_text(face=3, size=10))+
labs(color = "Legend", shape = "Legend")+
guides(color = guide_legend(override.aes = list(linetype = 0)))+
scale_y_continuous(breaks = pretty_breaks(10))+
scale_x_continuous(breaks = seq(from=min(top.estimates$year)-1,
to=max(top.estimates$year), by =1))+
xlab("\nYear")+
ylab("Estimate\n")
# outputs to return
outputs <- list(average.results, ave.graph)
# data analysis and estimator type
average.results$analysis <- "robust design"
average.results$estimator <- "model average"
average.results$site_species <- site_species
# write files to folder
ggsave(paste0(site_species, "_average_robust", ".png"), width=10, height =10, units ="cm", scale =2)
write.csv(average.results, paste0(site_species, "_average_robust", ".csv"), row.names = FALSE)
return(outputs)
}
|
library(tidyverse)
tx <-as.numeric(Sys.getenv("SGE_TASK_ID"))
gc.correct <- function(coverage, bias) {
i <- seq(min(bias, na.rm=TRUE), max(bias, na.rm=TRUE), by = 0.001)
coverage.trend <- loess(coverage ~ bias)
coverage.model <- loess(predict(coverage.trend, i) ~ i)
coverage.pred <- predict(coverage.model, bias)
coverage.corrected <- coverage - coverage.pred + median(coverage)
}
fragpath <- "../fragments"
fragfiles <- list.files(fragpath, pattern=".rds",full.name=TRUE)
fragfile <- fragfiles[tx]
id <- strsplit(basename(fragfile), "\\.")[[1]][1]
outdir <- "." ####
filename <- file.path(outdir, paste0(id, "_bin_100kb.rds"))
if(file.exists(filename)) q('no')
library(GenomicRanges)
library(rtracklayer)
library(Homo.sapiens)
library(BSgenome.Hsapiens.UCSC.hg19)
library(Rsamtools)
class(Homo.sapiens)
library(devtools)
library(biovizBase)
load("./filters.hg19.rda")
library(RCurl)
ABurl <- getURL('https://raw.githubusercontent.com/Jfortin1/HiC_AB_Compartments/master/data/hic_compartments_100kb_ebv_2014.txt', ssl.verifyhost=FALSE, ssl.verifypeer=FALSE)
AB <- read.table(textConnection(ABurl), header=TRUE)
AB <- makeGRangesFromDataFrame(AB, keep.extra.columns=TRUE)
chromosomes <- GRanges(paste0("chr", 1:22),
IRanges(0, seqlengths(Hsapiens)[1:22]))
tcmeres <- gaps.hg19[grepl("centromere|telomere", gaps.hg19$type)]
arms <- GenomicRanges::setdiff(chromosomes, tcmeres)
arms <- arms[-c(25,27,29,41,43)]
armlevels <- c("1p","1q","2p","2q","3p","3q","4p","4q","5p","5q","6p","6q",
"7p","7q","8p","8q", "9p", "9q","10p","10q","11p","11q","12p",
"12q","13q","14q","15q","16p","16q","17p","17q","18p","18q",
"19p", "19q","20p","20q","21q","22q")
arms$arm <- armlevels
AB <- AB[-queryHits(findOverlaps(AB, gaps.hg19))]
AB <- AB[queryHits(findOverlaps(AB, arms))]
AB$arm <- armlevels[subjectHits(findOverlaps(AB, arms))]
seqinfo(AB) <- seqinfo(Hsapiens)[seqlevels(seqinfo(AB))]
AB <- trim(AB)
AB$gc <- GCcontent(Hsapiens, AB)
## These bins had no coverage
AB <- AB[-c(8780, 13665)]
fragments <- readRDS(fragfile)
#
### Filters
fragments <- fragments[-queryHits(findOverlaps(fragments, filters.hg19))]
w.all <- width(fragments)
fragments <- fragments[which(w.all >= 100 & w.all <= 220)]
w <- width(fragments)
frag.list <- split(fragments, w)
counts <- sapply(frag.list, function(x) countOverlaps(AB, x))
if(min(w) > 100) {
m0 <- matrix(0, ncol=min(w) - 100, nrow=nrow(counts),
dimnames=list(rownames(counts), 100:(min(w)-1)))
counts <- cbind(m0, counts)
}
olaps <- findOverlaps(fragments, AB)
bin.list <- split(fragments[queryHits(olaps)], subjectHits(olaps))
bingc <- rep(NA, length(bin.list))
bingc[unique(subjectHits(olaps))] <- sapply(bin.list, function(x) mean(x$gc))
### Get modes
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
modes <- Mode(w)
medians <- median(w)
q25 <- quantile(w, 0.25)
q75 <- quantile(w, 0.75)
short <- rowSums(counts[,1:51])
long <- rowSums(counts[,52:121])
ratio <- short/long
short.corrected=gc.correct(short, bingc)
long.corrected=gc.correct(long, bingc)
nfrags.corrected=gc.correct(short+long, bingc)
ratio.corrected=gc.correct(ratio, bingc)
AB$short <- short
AB$long <- long
AB$short.corrected <- short.corrected
AB$long.corrected <- long.corrected
AB$nfrags.corrected <- nfrags.corrected
AB$ratio.corrected <- ratio.corrected
AB$mode <- modes
AB$mean <- round(mean(w), 2)
AB$median <- medians
AB$quantile.25 <- q25
AB$quantile.75 <- q75
AB$frag.gc <- bingc
for(i in 1:ncol(counts)) elementMetadata(AB)[,colnames(counts)[i]] <- counts[,i]
saveRDS(AB, filename)
q('no')
| /03-bin_compartments.r | no_license | Shicheng-Guo/mDELLFI | R | false | false | 3,678 | r | library(tidyverse)
tx <-as.numeric(Sys.getenv("SGE_TASK_ID"))
gc.correct <- function(coverage, bias) {
i <- seq(min(bias, na.rm=TRUE), max(bias, na.rm=TRUE), by = 0.001)
coverage.trend <- loess(coverage ~ bias)
coverage.model <- loess(predict(coverage.trend, i) ~ i)
coverage.pred <- predict(coverage.model, bias)
coverage.corrected <- coverage - coverage.pred + median(coverage)
}
fragpath <- "../fragments"
fragfiles <- list.files(fragpath, pattern=".rds",full.name=TRUE)
fragfile <- fragfiles[tx]
id <- strsplit(basename(fragfile), "\\.")[[1]][1]
outdir <- "." ####
filename <- file.path(outdir, paste0(id, "_bin_100kb.rds"))
if(file.exists(filename)) q('no')
library(GenomicRanges)
library(rtracklayer)
library(Homo.sapiens)
library(BSgenome.Hsapiens.UCSC.hg19)
library(Rsamtools)
class(Homo.sapiens)
library(devtools)
library(biovizBase)
load("./filters.hg19.rda")
library(RCurl)
ABurl <- getURL('https://raw.githubusercontent.com/Jfortin1/HiC_AB_Compartments/master/data/hic_compartments_100kb_ebv_2014.txt', ssl.verifyhost=FALSE, ssl.verifypeer=FALSE)
AB <- read.table(textConnection(ABurl), header=TRUE)
AB <- makeGRangesFromDataFrame(AB, keep.extra.columns=TRUE)
chromosomes <- GRanges(paste0("chr", 1:22),
IRanges(0, seqlengths(Hsapiens)[1:22]))
tcmeres <- gaps.hg19[grepl("centromere|telomere", gaps.hg19$type)]
arms <- GenomicRanges::setdiff(chromosomes, tcmeres)
arms <- arms[-c(25,27,29,41,43)]
armlevels <- c("1p","1q","2p","2q","3p","3q","4p","4q","5p","5q","6p","6q",
"7p","7q","8p","8q", "9p", "9q","10p","10q","11p","11q","12p",
"12q","13q","14q","15q","16p","16q","17p","17q","18p","18q",
"19p", "19q","20p","20q","21q","22q")
arms$arm <- armlevels
AB <- AB[-queryHits(findOverlaps(AB, gaps.hg19))]
AB <- AB[queryHits(findOverlaps(AB, arms))]
AB$arm <- armlevels[subjectHits(findOverlaps(AB, arms))]
seqinfo(AB) <- seqinfo(Hsapiens)[seqlevels(seqinfo(AB))]
AB <- trim(AB)
AB$gc <- GCcontent(Hsapiens, AB)
## These bins had no coverage
AB <- AB[-c(8780, 13665)]
fragments <- readRDS(fragfile)
#
### Filters
fragments <- fragments[-queryHits(findOverlaps(fragments, filters.hg19))]
w.all <- width(fragments)
fragments <- fragments[which(w.all >= 100 & w.all <= 220)]
w <- width(fragments)
frag.list <- split(fragments, w)
counts <- sapply(frag.list, function(x) countOverlaps(AB, x))
if(min(w) > 100) {
m0 <- matrix(0, ncol=min(w) - 100, nrow=nrow(counts),
dimnames=list(rownames(counts), 100:(min(w)-1)))
counts <- cbind(m0, counts)
}
olaps <- findOverlaps(fragments, AB)
bin.list <- split(fragments[queryHits(olaps)], subjectHits(olaps))
bingc <- rep(NA, length(bin.list))
bingc[unique(subjectHits(olaps))] <- sapply(bin.list, function(x) mean(x$gc))
### Get modes
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
modes <- Mode(w)
medians <- median(w)
q25 <- quantile(w, 0.25)
q75 <- quantile(w, 0.75)
short <- rowSums(counts[,1:51])
long <- rowSums(counts[,52:121])
ratio <- short/long
short.corrected=gc.correct(short, bingc)
long.corrected=gc.correct(long, bingc)
nfrags.corrected=gc.correct(short+long, bingc)
ratio.corrected=gc.correct(ratio, bingc)
AB$short <- short
AB$long <- long
AB$short.corrected <- short.corrected
AB$long.corrected <- long.corrected
AB$nfrags.corrected <- nfrags.corrected
AB$ratio.corrected <- ratio.corrected
AB$mode <- modes
AB$mean <- round(mean(w), 2)
AB$median <- medians
AB$quantile.25 <- q25
AB$quantile.75 <- q75
AB$frag.gc <- bingc
for(i in 1:ncol(counts)) elementMetadata(AB)[,colnames(counts)[i]] <- counts[,i]
saveRDS(AB, filename)
q('no')
|
num_samp = 50000
lambda = 0.2
x <- rexp(num_samp, lambda)
##Create a SCatter plot below
data <- rexp(num_samp, 0.2)
q <- data.frame(X = seq(1, num_samp , 1), Y = sort(data))
plot(q)
#Sys.sleep(1)
collection <- split(x, ceiling(seq_along(x)/100))
some_means <- c()
sds <- c()
for (i in 1:5){
hx <- dexp(collection[[i]])
some_means[[i]] <- mean(hx)
sds[[i]] <- sd(hx)
plot(collection[[i]], hx, xlab="X Values sampled from Exp-Distribution", ylab=paste("Probability Density Function for ", i, " vector"), cex=0.4)
#Sys.sleep(1)
plot.ecdf(hx, xlab="X Values sampled from Exp-Distribution", ylab="Cumulative Density Function", cex=0.4)
#Sys.sleep(1)
}
all_means <- c()
for(i in 1:500){
all_means[[i]] <- mean(collection[[i]])
}
tab = table(round(all_means))
plot(tab, "h", xlab="Value", ylab="Frequency", xlim=c(3,7))
pdata <- rep(0, 100);
for(i in 1:500){
val=round(all_means[i], 0);
if(val <= 100){
pdata[val] = pdata[val] + 1/ 100;
}
}
xcols <- c(0:99)
str(pdata)
str(xcols)
plot(xcols, pdata, "l", xlab="X", ylab="f(X)", xlim=c(0,8))
cdata <- rep(0, 100)
cdata[1] <- pdata[1]
for(i in 2:100){
cdata[i] = cdata[i-1] + pdata[i]
}
plot(xcols, cdata, "o", col="blue", xlab="X", ylab="F(X)", xlim=c(0,10));
#Plotting Pdf and cdf and cdf and cdf and cdf
#hx <- dexp(x)
#plot(x, hx, xlab="X Values sampled from Exp-Distribution", ylab="Probability Density Function", cex=0.4)
#plot.ecdf(hx, xlab="X Values sampled from Exp-Distribution", ylab="Cumulative Density Function", cex=0.4)
print("The mean of Exp-Dist is 1/(lambda) = 5 here. In the case of sampled values, we get the mean to be = ")
print(mean(x))
print("The standard_deviation of Exp-Dist is 1/(lambda) = 5 here. In the case of sampled values, we get the to be = ")
print(sd(x))
##num_samp = 50000
##lambda = 0.2
##x <- rexp(num_samp, lambda)
###Create a SCatter plot below
##x <- seq(0, 20, length=num_samp)
##y <- dexp(x)
##plot(x, y)
###plots the pdf of Exponential Distribution
##x <- data.frame(X = seq(1, num_samp , 1), Y = sort(data, decreasing=T))
##plot(x)
| /a10/160392.r | no_license | mayanksha/CS251 | R | false | false | 2,070 | r | num_samp = 50000
lambda = 0.2
x <- rexp(num_samp, lambda)
##Create a SCatter plot below
data <- rexp(num_samp, 0.2)
q <- data.frame(X = seq(1, num_samp , 1), Y = sort(data))
plot(q)
#Sys.sleep(1)
collection <- split(x, ceiling(seq_along(x)/100))
some_means <- c()
sds <- c()
for (i in 1:5){
hx <- dexp(collection[[i]])
some_means[[i]] <- mean(hx)
sds[[i]] <- sd(hx)
plot(collection[[i]], hx, xlab="X Values sampled from Exp-Distribution", ylab=paste("Probability Density Function for ", i, " vector"), cex=0.4)
#Sys.sleep(1)
plot.ecdf(hx, xlab="X Values sampled from Exp-Distribution", ylab="Cumulative Density Function", cex=0.4)
#Sys.sleep(1)
}
all_means <- c()
for(i in 1:500){
all_means[[i]] <- mean(collection[[i]])
}
tab = table(round(all_means))
plot(tab, "h", xlab="Value", ylab="Frequency", xlim=c(3,7))
pdata <- rep(0, 100);
for(i in 1:500){
val=round(all_means[i], 0);
if(val <= 100){
pdata[val] = pdata[val] + 1/ 100;
}
}
xcols <- c(0:99)
str(pdata)
str(xcols)
plot(xcols, pdata, "l", xlab="X", ylab="f(X)", xlim=c(0,8))
cdata <- rep(0, 100)
cdata[1] <- pdata[1]
for(i in 2:100){
cdata[i] = cdata[i-1] + pdata[i]
}
plot(xcols, cdata, "o", col="blue", xlab="X", ylab="F(X)", xlim=c(0,10));
#Plotting Pdf and cdf and cdf and cdf and cdf
#hx <- dexp(x)
#plot(x, hx, xlab="X Values sampled from Exp-Distribution", ylab="Probability Density Function", cex=0.4)
#plot.ecdf(hx, xlab="X Values sampled from Exp-Distribution", ylab="Cumulative Density Function", cex=0.4)
print("The mean of Exp-Dist is 1/(lambda) = 5 here. In the case of sampled values, we get the mean to be = ")
print(mean(x))
print("The standard_deviation of Exp-Dist is 1/(lambda) = 5 here. In the case of sampled values, we get the to be = ")
print(sd(x))
##num_samp = 50000
##lambda = 0.2
##x <- rexp(num_samp, lambda)
###Create a SCatter plot below
##x <- seq(0, 20, length=num_samp)
##y <- dexp(x)
##plot(x, y)
###plots the pdf of Exponential Distribution
##x <- data.frame(X = seq(1, num_samp , 1), Y = sort(data, decreasing=T))
##plot(x)
|
\name{mfboot-package}
\alias{mfboot-package}
\alias{mfboot}
\docType{package}
\title{
What the package does (short line)
~~ package title ~~
}
\description{
More about what it does (maybe more than one line)
~~ A concise (1-5 lines) description of the package ~~
}
\details{
\tabular{ll}{
Package: \tab mfboot\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2015-02-11\cr
License: \tab What license is it under?\cr
}
~~ An overview of how to use the package, including the most important functions ~~
}
\author{
Who wrote it
Maintainer: Who to complain to <yourfault@somewhere.net>
~~ The author and/or maintainer of the package ~~
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file KEYWORDS in the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
| /man/mfboot-package.Rd | no_license | kristang/mfboot | R | false | false | 1,023 | rd | \name{mfboot-package}
\alias{mfboot-package}
\alias{mfboot}
\docType{package}
\title{
What the package does (short line)
~~ package title ~~
}
\description{
More about what it does (maybe more than one line)
~~ A concise (1-5 lines) description of the package ~~
}
\details{
\tabular{ll}{
Package: \tab mfboot\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2015-02-11\cr
License: \tab What license is it under?\cr
}
~~ An overview of how to use the package, including the most important functions ~~
}
\author{
Who wrote it
Maintainer: Who to complain to <yourfault@somewhere.net>
~~ The author and/or maintainer of the package ~~
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file KEYWORDS in the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
|
#'Create confidence interval
#'
#'This function, given a sample set, applies a specified function to the set, and creates a confidence interval based on a specified alpha value. It also creates a histogram of the distribution and plots the interval.
#'
#'@param iter number of iterations
#'@param x sample
#'@param fun function to be used
#'@param alpha alpha value for confidence interval
#'@param cx graph modifier
#'
#'@return invisible list of vectors/lists in function and a histogram with interval
#'
#'@examples
#'myboot2(x=sam, alpha=0.05, iter=10000, fun = "mean")
#'myboot2(x=sam, alpha=0.3, iter=10000, fun = "sd")
#'
#'@export
myboot2<-function(iter=10000,x,fun="mean",alpha=0.05,cx=1.5,...){ #Notice where the ... is repeated in the code
n=length(x) #sample size
y=sample(x,n*iter,replace=TRUE)
rs.mat=matrix(y,nr=n,nc=iter,byrow=TRUE)
xstat=apply(rs.mat,2,fun) # xstat is a vector and will have iter values in it
ci=quantile(xstat,c(alpha/2,1-alpha/2))# Nice way to form a confidence interval
# A histogram follows
# The object para will contain the parameters used to make the histogram
para=hist(xstat,freq=FALSE,las=1,
main=paste("Histogram of Bootstrap sample statistics","\n","alpha=",alpha," iter=",iter,sep=""),
...)
#mat will be a matrix that contains the data, this is done so that I can use apply()
mat=matrix(x,nr=length(x),nc=1,byrow=TRUE)
#pte is the point estimate
#This uses whatever fun is
pte=apply(mat,2,fun)
abline(v=pte,lwd=3,col="Black")# Vertical line
segments(ci[1],0,ci[2],0,lwd=4) #Make the segment for the ci
text(ci[1],0,paste("(",round(ci[1],2),sep=""),col="Red",cex=cx)
text(ci[2],0,paste(round(ci[2],2),")",sep=""),col="Red",cex=cx)
# plot the point estimate 1/2 way up the density
text(pte,max(para$density)/2,round(pte,2),cex=cx)
invisible(list(ci=ci,fun=fun,x=x,xstat=xstat))# Some output to use if necessary
}
| /R/myboot2.R | no_license | eric7chen/MATH4753chen0122 | R | false | false | 1,936 | r | #'Create confidence interval
#'
#'This function, given a sample set, applies a specified function to the set, and creates a confidence interval based on a specified alpha value. It also creates a histogram of the distribution and plots the interval.
#'
#'@param iter number of iterations
#'@param x sample
#'@param fun function to be used
#'@param alpha alpha value for confidence interval
#'@param cx graph modifier
#'
#'@return invisible list of vectors/lists in function and a histogram with interval
#'
#'@examples
#'myboot2(x=sam, alpha=0.05, iter=10000, fun = "mean")
#'myboot2(x=sam, alpha=0.3, iter=10000, fun = "sd")
#'
#'@export
myboot2<-function(iter=10000,x,fun="mean",alpha=0.05,cx=1.5,...){ #Notice where the ... is repeated in the code
n=length(x) #sample size
y=sample(x,n*iter,replace=TRUE)
rs.mat=matrix(y,nr=n,nc=iter,byrow=TRUE)
xstat=apply(rs.mat,2,fun) # xstat is a vector and will have iter values in it
ci=quantile(xstat,c(alpha/2,1-alpha/2))# Nice way to form a confidence interval
# A histogram follows
# The object para will contain the parameters used to make the histogram
para=hist(xstat,freq=FALSE,las=1,
main=paste("Histogram of Bootstrap sample statistics","\n","alpha=",alpha," iter=",iter,sep=""),
...)
#mat will be a matrix that contains the data, this is done so that I can use apply()
mat=matrix(x,nr=length(x),nc=1,byrow=TRUE)
#pte is the point estimate
#This uses whatever fun is
pte=apply(mat,2,fun)
abline(v=pte,lwd=3,col="Black")# Vertical line
segments(ci[1],0,ci[2],0,lwd=4) #Make the segment for the ci
text(ci[1],0,paste("(",round(ci[1],2),sep=""),col="Red",cex=cx)
text(ci[2],0,paste(round(ci[2],2),")",sep=""),col="Red",cex=cx)
# plot the point estimate 1/2 way up the density
text(pte,max(para$density)/2,round(pte,2),cex=cx)
invisible(list(ci=ci,fun=fun,x=x,xstat=xstat))# Some output to use if necessary
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/adag.R
\name{HasTransformation}
\alias{HasTransformation}
\title{Check to see if transformation is in the ADAG}
\usage{
HasTransformation(adag, transformation)
}
\arguments{
\item{adag}{ADAG object}
\item{transformation}{Transformation object}
}
\value{
If the ADAG has the transformation
}
\description{
Check to see if transformation is in the ADAG
}
\seealso{
\code{\link{ADAG}}, \code{\link{Transformation}}
}
| /packages/pegasus-dax-r/Pegasus/DAX/man/HasTransformation.Rd | permissive | ryantanaka/pegasus | R | false | false | 502 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/adag.R
\name{HasTransformation}
\alias{HasTransformation}
\title{Check to see if transformation is in the ADAG}
\usage{
HasTransformation(adag, transformation)
}
\arguments{
\item{adag}{ADAG object}
\item{transformation}{Transformation object}
}
\value{
If the ADAG has the transformation
}
\description{
Check to see if transformation is in the ADAG
}
\seealso{
\code{\link{ADAG}}, \code{\link{Transformation}}
}
|
## ODE model class -------------------------------------------------------------------
#' Generate the model objects for use in Xs (models with sensitivities)
#'
#' @param f Something that can be converted to \link{eqnvec},
#' e.g. a named character vector with the ODE
#' @param deriv logical, generate sensitivities or not
#' @param forcings Character vector with the names of the forcings
#' @param events data.frame of events with columns "var" (character, the name of the state to be
#' affected), "time" (character or numeric, time point), "value" (character or numeric, value),
#' "method" (character, either
#' "replace" or "add"). See \link[deSolve]{events}. Events need to be defined here if they contain
#' parameters, like the event time or value. If both, time and value are purely numeric, they
#' can be specified in \code{\link{Xs}()}, too.
#' @param outputs Named character vector for additional output variables.
#' @param fixed Character vector with the names of parameters (initial values and dynamic) for which
#' no sensitivities are required (will speed up the integration).
#' @param estimate Character vector specifying parameters (initial values and dynamic) for which
#' sensitivities are returned. If estimate is specified, it overwrites `fixed`.
#' @param modelname Character, the name of the C file being generated.
#' @param solver Solver for which the equations are prepared.
#' @param gridpoints Integer, the minimum number of time points where the ODE is evaluated internally
#' @param verbose Print compiler output to R command line.
#' @param ... Further arguments being passed to funC.
#' @return list with \code{func} (ODE object) and \code{extended} (ODE+Sensitivities object)
#' @export
#' @example inst/examples/odemodel.R
#' @import cOde
odemodel <- function(f, deriv = TRUE, forcings=NULL, events = NULL, outputs = NULL, fixed = NULL, estimate = NULL, modelname = "odemodel", solver = c("deSolve", "Sundials"), gridpoints = NULL, verbose = FALSE, ...) {
if (is.null(gridpoints)) gridpoints <- 2
f <- as.eqnvec(f)
modelname_s <- paste0(modelname, "_s")
solver <- match.arg(solver)
func <- cOde::funC(f, forcings = forcings, events = events, outputs = outputs, fixed = fixed, modelname = modelname , solver = solver, nGridpoints = gridpoints, ...)
extended <- NULL
if (solver == "Sundials") {
# Sundials does not need "extended" by itself, but dMod relies on it.
extended <- func
attr(extended, "deriv") <- TRUE
attr(extended, "variables") <- c(attr(extended, "variables"), attr(extended, "variablesSens"))
attr(extended, "events") <- events
}
if (deriv && solver == "deSolve") {
mystates <- attr(func, "variables")
myparameters <- attr(func, "parameters")
if (is.null(estimate) & !is.null(fixed)) {
mystates <- setdiff(mystates, fixed)
myparameters <- setdiff(myparameters, fixed)
}
if (!is.null(estimate)) {
mystates <- intersect(mystates, estimate)
myparameters <- intersect(myparameters, estimate)
}
s <- sensitivitiesSymb(f,
states = mystates,
parameters = myparameters,
inputs = attr(func, "forcings"),
events = attr(func, "events"),
reduce = TRUE)
fs <- c(f, s)
outputs <- c(attr(s, "outputs"), attr(func, "outputs"))
events.sens <- attr(s, "events")
events.func <- attr(func, "events")
events <- NULL
if (!is.null(events.func)) {
if (is.data.frame(events.sens)) {
events <- rbind(events.sens, events.func, straingsAsFactors = FALSE)
} else {
events <- do.call(rbind, lapply(1:nrow(events.func), function(i) {
rbind(events.sens[[i]], events.func[i,], stringsAsFactors = FALSE)
}))
}
}
extended <- cOde::funC(fs, forcings = forcings, modelname = modelname_s, solver = solver, nGridpoints = gridpoints, events = events, outputs = outputs, ...)
}
out <- list(func = func, extended = extended)
attr(out, "class") <- "odemodel"
return(out)
}
## Function classes ------------------------------------------------------
#' dMod match function arguments
#'
#' The function is exported for dependency reasons
#'
#' @param arglist list
#' @param choices character
#'
#' @export
match.fnargs <- function(arglist, choices) {
# Catch the case of names == NULL
if (is.null(names(arglist))) names(arglist) <- rep("", length(arglist))
# exlude named arguments which are not in choices
arglist <- arglist[names(arglist) %in% c(choices, "")]
# determine available arguments
available <- choices %in% names(arglist)
if (!all(available)) names(arglist)[names(arglist) == ""] <- choices[!available]
if (any(duplicated(names(arglist)))) stop("duplicate arguments in prdfn/obsfn/parfn function call")
mapping <- match(choices, names(arglist))
return(mapping)
}
## Equation classes -------------------------------------------------------
#' Generate equation vector object
#'
#' @description The eqnvec object stores explicit algebraic equations, like the
#' right-hand sides of an ODE, observation functions or parameter transformations
#' as named character vectors.
#' @param ... mathematical expressions as characters to be coerced,
#' the right-hand sides of the equations
#' @return object of class \code{eqnvec}, basically a named character.
#' @example inst/examples/eqnvec.R
#' @seealso \link{eqnlist}
#' @export
eqnvec <- function(...) {
mylist <- list(...)
if (length(mylist) > 0) {
mynames <- paste0("eqn", 1:length(mylist))
is.available <- !is.null(names(mylist))
mynames[is.available] <- names(mylist)[is.available]
names(mylist) <- mynames
out <- unlist(mylist)
return(as.eqnvec(out))
} else {
return(NULL)
}
}
#' Generate eqnlist object
#'
#' @description The eqnlist object stores an ODE as a list of stoichiometric matrix,
#' rate expressions, state names and compartment volumes.
#' @export
#' @param smatrix Matrix of class numeric. The stoichiometric matrix,
#' one row per reaction/process and one column per state.
#' @param states Character vector. Names of the states.
#' @param rates Character vector. The rate expressions.
#' @param volumes Named character, volume parameters for states. Names must be a subset of the states.
#' Values can be either characters, e.g. "V1", or numeric values for the volume. If \code{volumes} is not
#' \code{NULL}, missing entries are treated as 1.
#' @param description Character vector. Description of the single processes.
#' @return An object of class \code{eqnlist}, basically a list.
#' @example inst/examples/eqnlist.R
eqnlist <- function(smatrix = NULL, states = colnames(smatrix), rates = NULL, volumes = NULL, description = NULL) {
# Dimension checks and preparations for non-empty argument list.
if (all(!is.null(c(smatrix, states, rates)))) {
#Dimension checks
d1 <- dim(smatrix)
l2 <- length(states)
l3 <- length(rates)
if (l2 != d1[2]) stop("Number of states does not coincide with number of columns of stoichiometric matrix")
if (l3 != d1[1]) stop("Number of rates does not coincide with number of rows of stoichiometric matrix")
# Prepare variables
smatrix <- as.matrix(smatrix)
colnames(smatrix) <- states
if (is.null(description)) {
description <- 1:nrow(smatrix)
}
}
out <- list(smatrix = smatrix,
states = as.character(states),
rates = as.character(rates),
volumes = volumes,
description = as.character(description))
class(out) <- c("eqnlist", "list")
return(out)
}
## Parameter classes --------------------------------------------------------
#' Parameter transformation function
#'
#' Generate functions that transform one parameter vector into another
#' by means of a transformation, pushing forward the jacobian matrix
#' of the original parameter.
#' Usually, this function is called internally, e.g. by \link{P}.
#' However, you can use it to add your own specialized parameter
#' transformations to the general framework.
#' @param p2p a transformation function for one condition, i.e. a function
#' \code{p2p(p, fixed, deriv)} which translates a parameter vector \code{p}
#' and a vector of fixed parameter values \code{fixed} into a new parameter
#' vector. If \code{deriv = TRUE}, the function should return an attribute
#' \code{deriv} with the Jacobian matrix of the parameter transformation.
#' @param parameters character vector, the parameters accepted by the function
#' @param condition character, the condition for which the transformation is defined
#' @return object of class \code{parfn}, i.e. a function \code{p(..., fixed, deriv,
#' conditions, env)}. The argument \code{pars} should be passed via the \code{...}
#' argument.
#'
#' Contains attributes "mappings", a list of \code{p2p}
#' functions, "parameters", the union of parameters acceted by the mappings and
#' "conditions", the total set of conditions.
#' @seealso \link{sumfn}, \link{P}
#' @example inst/examples/prediction.R
#' @export
parfn <- function(p2p, parameters = NULL, condition = NULL) {
force(condition)
mappings <- list()
mappings[[1]] <- p2p
names(mappings) <- condition
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = condition, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, "pars")]
pars <- arglist[[1]]
overlap <- test_conditions(conditions, condition)
# NULL if at least one argument is NULL
# character(0) if no overlap
# character if overlap
if (is.null(overlap)) conditions <- union(condition, conditions)
if (is.null(overlap) | length(overlap) > 0)
result <- p2p(pars = pars, fixed = fixed, deriv = deriv)
else
result <- NULL
# Initialize output object
length.out <- max(c(1, length(conditions)))
outlist <- structure(vector("list", length.out), names = conditions)
if (is.null(condition)) available <- 1:length.out else available <- match(condition, conditions)
for (C in available[!is.na(available)]) outlist[[C]] <- result
return(outlist)
}
attr(outfn, "mappings") <- mappings
attr(outfn, "parameters") <- parameters
attr(outfn, "conditions") <- condition
class(outfn) <- c("parfn", "fn")
return(outfn)
}
#' Generate a parameter frame
#'
#' @description A parameter frame is a data.frame where the rows correspond to different
#' parameter specifications. The columns are divided into three parts. (1) the meta-information
#' columns (e.g. index, value, constraint, etc.), (2) the attributes of an objective function
#' (e.g. data contribution and prior contribution) and (3) the parameters.
#' @seealso \link{profile}, \link{mstrust}
#' @param x data.frame.
#' @param parameters character vector, the names of the parameter columns.
#' @param metanames character vector, the names of the meta-information columns.
#' @param obj.attributes character vector, the names of the objective function attributes.
#' @return An object of class \code{parframe}, i.e. a data.frame with attributes for the
#' different names. Inherits from data.frame.
#' @details Parameter frames can be subsetted either by \code{[ , ]} or by \code{subset}. If
#' \code{[ , index]} is used, the names of the removed columns will also be removed from
#' the corresponding attributes, i.e. metanames, obj.attributes and parameters.
#' @example inst/examples/parlist.R
#' @export
parframe <- function(x = NULL, parameters = colnames(x), metanames = NULL, obj.attributes = NULL) {
if (!is.null(x)) {
rownames(x) <- NULL
out <- as.data.frame(x)
} else {
out <- data.frame()
}
attr(out, "parameters") <- parameters
attr(out, "metanames") <- metanames
attr(out, "obj.attributes") <- obj.attributes
class(out) <- c("parframe", "data.frame")
return(out)
}
#' Parameter list
#'
#' @description The special use of a parameter list is to save
#' the outcome of multiple optimization runs provided by \link{mstrust},
#' into one list.
#' @param ... Objects to be coerced to parameter list.
#' @export
#' @example inst/examples/parlist.R
#' @seealso \link{load.parlist}, \link{plot.parlist}
parlist <- function(...) {
mylist <- list(...)
return(as.parlist(mylist))
}
#' Parameter vector
#'
#' @description A parameter vector is a named numeric vector (the parameter values)
#' together with a "deriv" attribute
#' (the Jacobian of a parameter transformation by which the parameter vector was generated).
#' @param ... objects to be concatenated
#' @param deriv matrix with rownames (according to names of \code{...}) and colnames
#' according to the names of the parameter by which the parameter vector was generated.
#' @return An object of class \code{parvec}, i.e. a named numeric vector with attribute "deriv".
#' @example inst/examples/parvec.R
#' @export
parvec <- function(..., deriv = NULL) {
mylist <- list(...)
if (length(mylist) > 0) {
mynames <- paste0("par", 1:length(mylist))
is.available <- !is.null(names(mylist))
mynames[is.available] <- names(mylist)[is.available]
out <- as.numeric(unlist(mylist))
names(out) <- mynames
return(as.parvec(out, deriv = deriv))
} else {
return(NULL)
}
}
## Prediction classes ----------------------------------------------------
#' Prediction function
#'
#' @description A prediction function is a function \code{x(..., fixed, deriv, conditions)}.
#' Prediction functions are generated by \link{Xs}, \link{Xf} or \link{Xd}. For an example
#' see the last one.
#'
#' @param P2X transformation function as being produced by \link{Xs}.
#' @param parameters character vector with parameter names
#' @param condition character, the condition name
#' @details Prediction functions can be "added" by the "+" operator, see \link{sumfn}. Thereby,
#' predictions for different conditions are merged or overwritten. Prediction functions can
#' also be concatenated with other functions, e.g. observation functions (\link{obsfn}) or
#' parameter transformation functions (\link{parfn}) by the "*" operator, see \link{prodfn}.
#' @return Object of class \code{prdfn}, i.e. a function \code{x(..., fixed, deriv, conditions, env)}
#' which returns a \link{prdlist}. The arguments \code{times} and
#' \code{pars} (parameter values) should be passed via the \code{...} argument, in this order.
#' @example inst/examples/prediction.R
#' @export
prdfn <- function(P2X, parameters = NULL, condition = NULL) {
mycondition <- condition
mappings <- list()
mappings[[1]] <- P2X
names(mappings) <- condition
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = mycondition, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("times", "pars"))]
times <- arglist[[1]]
pars <- arglist[[2]]
# yields derivatives for all parameters in pars but not in fixed
pars <- c(as.parvec(pars[setdiff(names(pars), names(fixed))]),
fixed)
overlap <- test_conditions(conditions, condition)
# NULL if at least one argument is NULL
# character(0) if no overlap
# character if overlap
if (is.null(overlap)) conditions <- union(condition, conditions)
if (is.null(overlap) | length(overlap) > 0)
result <- P2X(times = times, pars = pars, deriv = deriv)
else
result <- NULL
# Initialize output object
length.out <- max(c(1, length(conditions)))
outlist <- structure(vector("list", length.out), names = conditions)
if (is.null(condition)) available <- 1:length.out else available <- match(condition, conditions)
for (C in available[!is.na(available)]) outlist[[C]] <- result
outlist <- as.prdlist(outlist)
#length.out <- max(c(1, length(conditions)))
#outlist <- as.prdlist(lapply(1:length.out, function(i) result), names = conditions)
#attr(outlist, "pars") <- pars
return(outlist)
}
attr(outfn, "mappings") <- mappings
attr(outfn, "parameters") <- parameters
attr(outfn, "conditions") <- mycondition
class(outfn) <- c("prdfn", "fn")
return(outfn)
}
#' Observation function
#'
#' @description An observation function is a function is that is concatenated
#' with a prediction function via \link{prodfn} to yield a new prediction function,
#' see \link{prdfn}. Observation functions are generated by \link{Y}. Handling
#' of the conditions is then organized by the \code{obsfn} object.
#' @param X2Y the low-level observation function generated e.g. by \link{Y}.
#' @param parameters character vector with parameter names
#' @param condition character, the condition name
#' @details Observation functions can be "added" by the "+" operator, see \link{sumfn}. Thereby,
#' observations for different conditions are merged or, overwritten. Observation functions can
#' also be concatenated with other functions, e.g. observation functions (\link{obsfn}) or
#' prediction functions (\link{prdfn}) by the "*" operator, see \link{prodfn}.
#' @return Object of class \code{obsfn}, i.e. a function \code{x(..., fixed, deriv, conditions, env)}
#' which returns a \link{prdlist}. The arguments \code{out} (prediction) and \code{pars} (parameter values)
#' should be passed via the \code{...} argument.
#' @example inst/examples/prediction.R
#' @export
obsfn <- function(X2Y, parameters = NULL, condition = NULL) {
mycondition <- condition
mappings <- list()
mappings[[1]] <- X2Y
names(mappings) <- condition
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = mycondition, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("out", "pars"))]
out <- arglist[[1]]
pars <- arglist[[2]]
# yields derivatives for all parameters in pars but not in fixed
pars <- c(as.parvec(pars[setdiff(names(pars), names(fixed))]),
fixed)
overlap <- test_conditions(conditions, condition)
# NULL if at least one argument is NULL
# character(0) if no overlap
# character if overlap
if (is.null(overlap)) conditions <- union(condition, conditions)
if (is.null(overlap) | length(overlap) > 0)
result <- X2Y(out = out, pars = pars)
else
result <- NULL
# Initialize output object
length.out <- max(c(1, length(conditions)))
outlist <- structure(vector("list", length.out), names = conditions)
if (is.null(condition)) available <- 1:length.out else available <- match(condition, conditions)
for (C in available[!is.na(available)]) outlist[[C]] <- result
outlist <- as.prdlist(outlist)
#length.out <- max(c(1, length(conditions)))
#outlist <- as.prdlist(lapply(1:length.out, function(i) result), names = conditions)
#attr(outlist, "pars") <- pars
return(outlist)
}
attr(outfn, "mappings") <- mappings
attr(outfn, "parameters") <- parameters
attr(outfn, "conditions") <- mycondition
class(outfn) <- c("obsfn", "fn")
return(outfn)
}
#' Prediction frame
#'
#' @description A prediction frame is used to store a model prediction in a matrix. The columns
#' of the matrix are "time" and one column per state. The prediction frame has attributes "deriv",
#' the matrix of sensitivities with respect to "outer parameters" (see \link{P}), an attribute
#' "sensitivities", the matrix of sensitivities with respect to the "inner parameters" (the model
#' parameters, left-hand-side of the parameter transformation) and an attributes "parameters", the
#' parameter vector of inner parameters to produce the prediction frame.
#'
#' Prediction frames are usually the constituents of prediction lists (\link{prdlist}). They are
#' produced by \link{Xs}, \link{Xd} or \link{Xf}. When you define your own prediction functions,
#' see \code{P2X} in \link{prdfn}, the result should be returned as a prediction frame.
#' @param prediction matrix of model prediction
#' @param deriv matrix of sensitivities wrt outer parameters
#' @param sensitivities matrix of sensitivitie wrt inner parameters
#' @param parameters names of the outer paramters
#' @return Object of class \code{prdframe}, i.e. a matrix with other matrices and vectors as attributes.
#' @export
prdframe <- function(prediction = NULL, deriv = NULL, sensitivities = NULL, parameters = NULL) {
out <- if (!is.null(prediction)) as.matrix(prediction) else matrix()
attr(out, "deriv") <- deriv
attr(out, "sensitivities") <- sensitivities
attr(out, "parameters") <- parameters
class(out) <- c("prdframe", "matrix")
return(out)
}
#' Prediction list
#'
#' @description A prediction list is used to store a list of model predictions
#' from different prediction functions or the same prediction function with different
#' parameter specifications. Each entry of the list is a \link{prdframe}.
#' @param ... objects of class \link{prdframe}
#' conditions.
#' @export
prdlist <- function(...) {
mylist <- list(...)
mynames <- names(mylist)
if (is.null(mynames)) mynames <- as.character(1:length(mylist))
as.prdlist(mylist, mynames)
}
## Data classes ----------------------------------------------------------------
#' Generate a datalist object
#'
#' @description The datalist object stores time-course data in a list of data.frames.
#' The names of the list serve as identifiers, e.g. of an experimental condition, etc.
#' @details Datalists can be plotted, see \link{plotData} and merged, see \link{sumdatalist}.
#' They are the basic structure when combining model prediction and data via the \link{normL2}
#' objective function.
#'
#' The standard columns of the datalist data frames are "name" (observable name),
#' "time" (time points), "value" (data value), "sigma" (uncertainty, can be NA), and
#' "lloq" (lower limit of quantification, \code{-Inf} by default).
#'
#' Datalists carry the attribute \code{condition.grid} which contains additional information about different
#' conditions, such as dosing information for the experiment. It can be conveniently accessed by the \link{covariates}-function.
#' Reassigning names to a datalist also renames the rows of the \code{condition.grid}.
#' @param ... data.frame objects to be coerced into a list and additional arguments
#' @return Object of class \code{datalist}.
#' @export
datalist <- function(...) {
mylist <- list(...)
mynames <- names(mylist)
if (is.null(mynames)) mynames <- as.character(1:length(mylist))
as.datalist(mylist, mynames)
}
## Objective classes ---------------------------------------------------------
#' Generate objective list
#'
#' @description An objective list contains an objective value, a gradient, and a Hessian matrix.
#'
#' Objective lists can contain additional numeric attributes that are preserved or
#' combined with the corresponding attributes of another objective list when
#' both are added by the "+" operator, see \link{sumobjlist}.
#'
#' Objective lists are returned by objective functions as being generated
#' by \link{normL2}, \link{constraintL2}, \link{priorL2} and \link{datapointL2}.
#' @param value numeric of length 1
#' @param gradient named numeric
#' @param hessian matrix with rownames and colnames according to gradient names
#' @return Object of class \code{objlist}
#' @export
objlist <- function(value, gradient, hessian) {
out <- list(value = value, gradient = gradient, hessian = hessian)
class(out) <- c("objlist", "list")
return(out)
}
#' Objective frame
#'
#' @description An objective frame is supposed to store the residuals of a model prediction
#' with respect to a data frame.
#' @param mydata data.frame as being generated by \link{res}.
#' @param deriv matrix of the derivatives of the residuals with respect to parameters.
#' @param deriv.err matrix of the derivatives of the error model.
#' @return An object of class \code{objframe}, i.e. a data frame with attribute "deriv".
#' @export
objframe <- function(mydata, deriv = NULL, deriv.err = NULL) {
# Check column names
mydata <- as.data.frame(mydata)
correct.names <- c("time", "name", "value", "prediction",
"sigma", "residual", "weighted.residual", "bloq")
ok <- all(correct.names %in% names(mydata))
if (!ok) stop("mydata does not have required names")
out <- mydata[, correct.names]
attr(out, "deriv") <- deriv
attr(out, "deriv.err") <- deriv.err
class(out) <- c("objframe", "data.frame")
return(out)
}
## General concatenation of functions ------------------------------------------
#' Direct sum of objective functions
#'
#' @param x1 function of class \code{objfn}
#' @param x2 function of class \code{objfn}
#' @details The objective functions are evaluated and their results as added. Sometimes,
#' the evaluation of an objective function depends on results that have been computed
#' internally in a preceding objective function. Therefore, environments are forwarded
#' and all evaluations take place in the same environment. The first objective function
#' in a sum of functions generates a new environment.
#' @return Object of class \code{objfn}.
#' @seealso \link{normL2}, \link{constraintL2}, \link{priorL2}, \link{datapointL2}
#' @aliases sumobjfn
#' @example inst/examples/objective.R
#' @export
"+.objfn" <- function(x1, x2) {
if (is.null(x1)) return(x2)
conditions.x1 <- attr(x1, "conditions")
conditions.x2 <- attr(x2, "conditions")
conditions12 <- union(conditions.x1, conditions.x2)
parameters.x1 <- attr(x1, "parameters")
parameters.x2 <- attr(x2, "parameters")
parameters12 <- union(parameters.x1, parameters.x2)
modelname.x1 <- attr(x1, "modelname")
modelname.x2 <- attr(x2, "modelname")
modelname12 <- union(modelname.x1, modelname.x2)
# objfn + objfn
if (inherits(x1, "objfn") & inherits(x2, "objfn")) {
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = conditions12, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("pars"))]
pars <- arglist[[1]]
# 1. If conditions.xi is null, always evaluate xi, but only once
# 2. If not null, evaluate at intersection with conditions
# 3. If not null & intersection is empty, don't evaluate xi at all
v1 <- v2 <- NULL
if (is.null(conditions.x1)) {
v1 <- x1(pars = pars, fixed = fixed, deriv = deriv, conditions = conditions.x1, env = env)
} else if (any(conditions %in% conditions.x1)) {
v1 <- x1(pars = pars, fixed = fixed, deriv = deriv, conditions = intersect(conditions, conditions.x1), env = env)
}
if (is.null(conditions.x2)) {
v2 <- x2(pars = pars, fixed = fixed, deriv = deriv, conditions = conditions.x2, env = env)
} else if (any(conditions %in% conditions.x2)) {
v2 <- x2(pars = pars, fixed = fixed, deriv = deriv, conditions = intersect(conditions, conditions.x2), env = attr(v1, "env"))
}
out <- v1 + v2
attr(out, "env") <- attr(v1, "env")
return(out)
}
class(outfn) <- c("objfn", "fn")
attr(outfn, "conditions") <- conditions12
attr(outfn, "parameters") <- parameters12
attr(outfn, "modelname") <- modelname12
return(outfn)
}
}
#' Multiplication of objective functions with scalars
#'
#' @description The \code{\%.*\%} operator allows to multiply objects of class objlist or objfn with
#' a scalar.
#'
#' @param x1 object of class objfn or objlist.
#' @param x2 numeric of length one.
#' @return An objective function or objlist object.
#'
#' @export
"%.*%" <- function(x1, x2) {
if (inherits(x2, "objlist")) {
out <- lapply(x2, function(x) {
x1*x
})
# Multiply attributes
out2.attributes <- attributes(x2)[sapply(attributes(x2), is.numeric)]
attr.names <- names(out2.attributes)
out.attributes <- lapply(attr.names, function(n) {
x1*attr(x2, n)
})
attributes(out) <- attributes(x2)
attributes(out)[attr.names] <- out.attributes
return(out)
} else if (inherits(x2, "objfn")) {
conditions12 <- attr(x2, "conditions")
parameters12 <- attr(x2, "parameters")
modelname12 <- attr(x2, "modelname")
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = conditions12, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("pars"))]
pars <- arglist[[1]]
v1 <- x1
v2 <- x2(pars = pars, fixed = fixed, deriv = deriv, conditions = conditions, env = attr(v1, "env"))
out <- v1 %.*% v2
attr(out, "env") <- attr(v2, "env")
return(out)
}
class(outfn) <- c("objfn", "fn")
attr(outfn, "conditions") <- conditions12
attr(outfn, "parameters") <- parameters12
attr(outfn, "modelname") <- modelname12
return(outfn)
} else {
x1*x2
}
}
#' Direct sum of functions
#'
#' Used to add prediction function, parameter transformation functions or observation functions.
#'
#' @param x1 function of class \code{obsfn}, \code{prdfn} or \code{parfn}
#' @param x2 function of class \code{obsfn}, \code{prdfn} or \code{parfn}
#' @details Each prediction function is associated to a number of conditions. Adding functions
#' means merging or overwriting the set of conditions.
#' @return Object of the same class as \code{x1} and \code{x2} which returns results for the
#' union of conditions.
#' @aliases sumfn
#' @seealso \link{P}, \link{Y}, \link{Xs}
#' @example inst/examples/prediction.R
#' @export
"+.fn" <- function(x1, x2) {
if (is.null(x1)) return(x2)
mappings.x1 <- attr(x1, "mappings")
mappings.x2 <- attr(x2, "mappings")
conditions.x1 <- attr(x1, "conditions")
conditions.x2 <- attr(x2, "conditions")
overlap <- intersect(conditions.x1, conditions.x2)
if (is.null(names(mappings.x1)) || is.null(names(mappings.x2))) stop("General transformations (NULL names) cannot be coerced.")
if (length(overlap) > 0) {
warning(paste("Condition", overlap, "existed and has been overwritten."))
mappings.x1 <- mappings.x1[!conditions.x1 %in% overlap]
conditions.x1 <- conditions.x1[!conditions.x1 %in% overlap]
}
conditions.x12 <- c(conditions.x1, conditions.x2)
mappings <- c(mappings.x1, mappings.x2)
# prdfn + prdfn
if (inherits(x1, "prdfn") & inherits(x2, "prdfn")) {
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = names(mappings), env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("times", "pars"))]
times <- arglist[[1]]
pars <- arglist[[2]]
if (is.null(conditions)) {
available <- names(mappings)
} else {
available <- intersect(names(mappings), conditions)
}
outlist <- structure(vector("list", length(conditions)), names = conditions)
#outpars <- structure(vector("list", length(conditions)), names = conditions)
for (C in available) {
outlist[[C]] <- mappings[[C]](times = times, pars = pars, deriv = deriv)
#outpars[[C]] <- attr(outlist[[C]], "pars")
#attr(outlist[[C]], "pars") <- NULL
}
out <- as.prdlist(outlist)
#attr(out, "pars") <- outpars
return(out)
}
class(outfn) <- c("prdfn", "fn")
}
# obsfn + obsfn
if (inherits(x1, "obsfn") & inherits(x2, "obsfn")) {
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = names(mappings), env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("out", "pars"))]
out <- arglist[[1]]
pars <- arglist[[2]]
if (is.null(conditions)) {
available <- names(mappings)
} else {
available <- intersect(names(mappings), conditions)
}
outlist <- structure(vector("list", length(conditions)), names = conditions)
for (C in available) {
outlist[[C]] <- mappings[[C]](out = out, pars = pars)
}
out <- as.prdlist(outlist)
return(out)
}
class(outfn) <- c("obsfn", "fn")
}
# parfn + parfn
if (inherits(x1, "parfn") & inherits(x2, "parfn")) {
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = names(mappings), env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("pars"))]
pars <- arglist[[1]]
if (is.null(conditions)) {
available <- names(mappings)
} else {
available <- intersect(names(mappings), conditions)
}
outlist <- structure(vector("list", length(conditions)), names = conditions)
for (C in available) {
outlist[[C]] <- mappings[[C]](pars = pars, fixed = fixed, deriv = deriv)
}
return(outlist)
}
class(outfn) <- c("parfn", "fn")
}
attr(outfn, "mappings") <- mappings
attr(outfn, "parameters") <- union(attr(x1, "parameters"), attr(x2, "parameters"))
attr(outfn, "conditions") <- conditions.x12
attr(outfn, "forcings") <- do.call(c, list(attr(x1, "forcings"), attr(x2, "forcings")))
return(outfn)
}
#' Direct sum of datasets
#'
#' Used to merge datasets with overlapping conditions.
#'
#' @param data1 dataset of class \code{datalist}
#' @param data2 dataset of class \code{datalist}
#' @details Each data list contains data frames for a number of conditions.
#' The direct sum of datalist is meant as merging the two data lists and
#' returning the overarching datalist.
#' @return Object of class \code{datalist} for the
#' union of conditions.
#' @aliases sumdatalist
#' @example inst/examples/sumdatalist.R
#' @export
"+.datalist" <- function(data1, data2) {
overlap <- names(data2)[names(data2) %in% names(data1)]
if (length(overlap) > 0) {
warning(paste("Condition", overlap, "existed and has been overwritten."))
data1 <- data1[!names(data1) %in% names(data2)]
}
conditions <- union(names(data1), names(data2))
data <- lapply(conditions, function(C) rbind(data1[[C]], data2[[C]]))
names(data) <- conditions
grid1 <- attr(data1, "condition.grid")
grid2 <- attr(data2, "condition.grid")
grid <- combine(grid1, grid2)
if (is.data.frame(grid)) grid <- grid[!duplicated(rownames(grid)), , drop = FALSE]
out <- as.datalist(data)
attr(out, "condition.grid") <- grid
return(out)
}
out_conditions <- function(c1, c2) {
if (!is.null(c1)) return(c1)
if (!is.null(c2)) return(c2)
return(NULL)
}
test_conditions <- function(c1, c2) {
if (is.null(c1)) return(NULL)
if (is.null(c2)) return(NULL)
return(intersect(c1, c2))
}
#' Concatenation of functions
#'
#' Used to concatenate observation functions, prediction functions and parameter transformation functions.
#'
#' @param p1 function of class \code{obsfn}, \code{prdfn}, \code{parfn} or \code{idfn}
#' @param p2 function of class \code{obsfn}, \code{prdfn}, \code{parfn} or \code{idfn}
#' @return Object of the same class as \code{x1} and \code{x2}.
#' @aliases prodfn
#' @example inst/examples/prediction.R
#' @export
"*.fn" <- function(p1, p2) {
# obsfn * obsfn -> obsfn
if (inherits(p1, "obsfn") & inherits(p2, "obsfn")) {
conditions.p1 <- attr(p1, "conditions")
conditions.p2 <- attr(p2, "conditions")
conditions.out <- out_conditions(conditions.p1, conditions.p2)
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = NULL, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("out", "pars"))]
out <- arglist[[1]]
pars <- arglist[[2]]
step1 <- p2(out = out, pars = pars, fixed = fixed, deriv = deriv, conditions = conditions)
step2 <- do.call(c, lapply(1:length(step1), function(i) p1(out = step1[[i]], pars = attr(step1[[i]], "parameters"), fixed = fixed, deriv = deriv, conditions = names(step1)[i])))
out <- as.prdlist(step2)
return(out)
}
# Generate mappings for observation function
l <- max(c(1, length(conditions.out)))
mappings <- lapply(1:l, function(i) {
mapping <- function(out, pars) {
outfn(out = out, pars = pars, conditions = conditions.out[i])[[1]]
}
m1 <- modelname(p1, conditions = conditions.p1[i])
m2 <- modelname(p2, conditions = conditions.p2[i])
attr(mapping, "modelname") <- union(m1, m2)
attr(mapping, "parameters") <- getParameters(p2, conditions = conditions.out[i])
return(mapping)
})
names(mappings) <- conditions.out
attr(outfn, "mappings") <- mappings
attr(outfn, "parameters") <- attr(p2, "parameters")
attr(outfn, "conditions") <- conditions.out
class(outfn) <- c("obsfn", "fn", "composed")
return(outfn)
}
# obsfn * parfn -> obsfn
if (inherits(p1, "obsfn") & inherits(p2, "parfn")) {
conditions.p1 <- attr(p1, "conditions")
conditions.p2 <- attr(p2, "conditions")
conditions.out <- out_conditions(conditions.p1, conditions.p2)
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = NULL, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("out", "pars"))]
out <- arglist[[1]]
pars <- arglist[[2]]
step1 <- p2(pars = pars, fixed = fixed, deriv = deriv, conditions = conditions)
step2 <- do.call(c, lapply(1:length(step1), function(i) p1(out = out, pars = step1[[i]], fixed = fixed, deriv = deriv, conditions = names(step1)[i])))
out <- as.prdlist(step2)
return(out)
}
# Generate mappings for observation function
l <- max(c(1, length(conditions.out)))
mappings <- lapply(1:l, function(i) {
mapping <- function(out, pars) {
outfn(out = out, pars = pars, conditions = conditions.out[i])[[1]]
}
m1 <- modelname(p1, conditions = conditions.p1[i])
m2 <- modelname(p2, conditions = conditions.p2[i])
attr(mapping, "modelname") <- union(m1, m2)
attr(mapping, "parameters") <- getParameters(p2, conditions = conditions.out[i])
return(mapping)
})
names(mappings) <- conditions.out
attr(outfn, "mappings") <- mappings
attr(outfn, "parameters") <- attr(p2, "parameters")
attr(outfn, "conditions") <- conditions.out
class(outfn) <- c("obsfn", "fn", "composed")
return(outfn)
}
# obsfn * prdfn -> prdfn
if (inherits(p1, "obsfn") & inherits(p2, "prdfn")) {
conditions.p1 <- attr(p1, "conditions")
conditions.p2 <- attr(p2, "conditions")
conditions.out <- out_conditions(conditions.p1, conditions.p2)
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = NULL, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("times", "pars"))]
times <- arglist[[1]]
pars <- arglist[[2]]
step1 <- p2(times = times, pars = pars, fixed = fixed, deriv = deriv, conditions = conditions)
step2 <- do.call(c, lapply(1:length(step1), function(i) p1(out = step1[[i]], pars = attr(step1[[i]], "parameters"), fixed = fixed, deriv = deriv, conditions = names(step1)[i])))
out <- as.prdlist(step2)
return(out)
}
# Generate mappings for prediction function
l <- max(c(1, length(conditions.out)))
mappings <- lapply(1:l, function(i) {
mapping <- function(times, pars, deriv = TRUE) {
outfn(times = times, pars = pars, deriv = deriv, conditions = conditions.out[i])[[1]]
}
m1 <- modelname(p1, conditions = conditions.p1[i])
m2 <- modelname(p2, conditions = conditions.p2[i])
attr(mapping, "modelname") <- union(m1, m2)
attr(mapping, "parameters") <- getParameters(p2, conditions = conditions.out[i])
return(mapping)
})
names(mappings) <- conditions.out
attr(outfn, "mappings") <- mappings
attr(outfn, "parameters") <- attr(p2, "parameters")
attr(outfn, "conditions") <- conditions.out
class(outfn) <- c("prdfn", "fn", "composed")
return(outfn)
}
# prdfn * parfn -> prdfn
if (inherits(p1, "prdfn") & inherits(p2, "parfn")) {
conditions.p1 <- attr(p1, "conditions")
conditions.p2 <- attr(p2, "conditions")
conditions.out <- out_conditions(conditions.p1, conditions.p2)
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = NULL, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("times", "pars"))]
times <- arglist[[1]]
pars <- arglist[[2]]
step1 <- p2(pars = pars, fixed = fixed, deriv = deriv, conditions = conditions)
step2 <- do.call(c, lapply(1:length(step1), function(i) p1(times = times, pars = step1[[i]], deriv = deriv, conditions = names(step1)[i])))
out <- as.prdlist(step2)
return(out)
}
# Generate mappings for prediction function
l <- max(c(1, length(conditions.out)))
mappings <- lapply(1:l, function(i) {
mapping <- function(times, pars, deriv = TRUE) {
outfn(times = times, pars = pars, deriv = deriv, conditions = conditions.out[i])[[1]]
}
attr(mapping, "parameters") <- getParameters(p2, conditions = conditions.out[i])
m1 <- modelname(p1, conditions = conditions.p1[i])
m2 <- modelname(p2, conditions = conditions.p2[i])
attr(mapping, "modelname") <- union(m1, m2)
return(mapping)
})
names(mappings) <- conditions.out
attr(outfn, "mappings") <- mappings
attr(outfn, "conditions") <- conditions.out
attr(outfn, "parameters") <- attr(p2, "parameters")
class(outfn) <- c("prdfn", "fn", "composed")
return(outfn)
}
# parfn * parfn -> parfn
if (inherits(p1, "parfn") & inherits(p2, "parfn")) {
conditions.p1 <- attr(p1, "conditions")
conditions.p2 <- attr(p2, "conditions")
conditions.out <- out_conditions(conditions.p1, conditions.p2)
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = NULL, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("pars"))]
pars <- arglist[[1]]
step1 <- p2(pars = pars, fixed = fixed, deriv = deriv, conditions = conditions)
step2 <- do.call(c, lapply(1:length(step1), function(i) p1(pars = step1[[i]], fixed = fixed, deriv = deriv, conditions = names(step1)[i])))
return(step2)
}
# Generate mappings for parameters function
l <- max(c(1, length(conditions.out)))
mappings <- lapply(1:l, function(i) {
mapping <- function(pars, fixed = NULL, deriv = TRUE) {
outfn(pars = pars, fixed = fixed, deriv = deriv, conditions = conditions.out[i])[[1]]
}
m1 <- modelname(p1, conditions = conditions.p1[i])
m2 <- modelname(p2, conditions = conditions.p2[i])
attr(mapping, "modelname") <- union(m1, m2)
attr(mapping, "parameters") <- getParameters(p2, conditions = conditions.out[i])
return(mapping)
})
names(mappings) <- conditions.out
attr(outfn, "mappings") <- mappings
attr(outfn, "parameters") <- attr(p2, "parameters")
attr(outfn, "conditions") <- conditions.out
class(outfn) <- c("parfn", "fn", "composed")
return(outfn)
}
# objfn * parfn -> objfn
if (inherits(p1, "objfn") & inherits(p2, "parfn")) {
conditions.p1 <- attr(p1, "conditions")
conditions.p2 <- attr(p2, "conditions")
conditions.out <- out_conditions(conditions.p1, conditions.p2)
outfn <- function(..., fixed = NULL, deriv=TRUE, conditions = NULL, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, "pars")]
pars <- arglist[[1]]
step1 <- p2(pars = pars, fixed = fixed, deriv = deriv, conditions = conditions)
step2 <- Reduce("+", lapply(1:length(step1), function(i) p1(pars = step1[[i]], fixed = NULL, deriv = deriv, conditions = names(step1)[i], env = env)))
return(step2)
}
attr(outfn, "conditions") <- conditions.out
class(outfn) <- c("objfn", "fn", "composed")
return(outfn)
}
# idfn * fn -> fn
if (inherits(p1, "idfn")) {
return(p2)
}
# fn * idfn -> fn
if (inherits(p2, "idfn")) {
return(p1)
}
}
## General purpose functions for different dMod classes ------------------------------
#' List, get and set controls for different functions
#'
#' @description Applies to objects of class \code{objfn},
#' \code{parfn}, \code{prdfn} and \code{obsfn}. Allows to manipulate
#' different arguments that have been set when creating the
#' objects.
#' @details If called without further arguments, \code{controls(x)} lists the
#' available controls within an object. Calling \code{controls()} with \code{name}
#' and \code{condition} returns the control value. The value can be overwritten. If
#' a list or data.frame ist returned, elements of those can be manipulated by the
#' \code{$}- or \code{[]}-operator.
#'
#' @param x function
#' @param ... arguments going to the appropriate S3 methods
#' @return Either a print-out or the values of the control.
#' @examples
#' ## parfn with condition
#' p <- P(eqnvec(x = "-a*x"), method = "implicit", condition = "C1")
#' controls(p)
#' controls(p, "C1", "keep.root")
#' controls(p, "C1", "keep.root") <- FALSE
#'
#' ## obsfn with NULL condition
#' g <- Y(g = eqnvec(y = "s*x"), f = NULL, states = "x", parameters = "s")
#' controls(g)
#' controls(g, NULL, "attach.input")
#' controls(g, NULL, "attach.input") <- FALSE
#' @export
controls <- function(x, ...) {
UseMethod("controls", x)
}
lscontrols_objfn <- function(x) {
names(environment(x)$controls)
}
lscontrols_fn <- function(x, condition = NULL) {
conditions <- attr(x, "conditions")
mappings <- attr(x, "mappings")
for (i in 1:length(mappings)) {
if (is.null(conditions) || is.null(condition) || conditions[i] %in% condition) {
cat(conditions[i], ":\n", sep = "")
print(names(environment(mappings[[i]])$controls))
}
}
}
#' @export
#' @rdname controls
#' @param name character, the name of the control
controls.objfn <- function(x, name = NULL, ...) {
if (is.null(name)) lscontrols_objfn(x) else environment(x)$controls[[name]]
}
#' @export
#' @rdname controls
#' @param condition character, the condition name
controls.fn <- function(x, condition = NULL, name = NULL, ...) {
if (is.null(name)) {
lscontrols_fn(x, condition)
} else {
mappings <- attr(x, "mappings")
if (is.null(condition)) y <- mappings[[1]] else y <- mappings[[condition]]
environment(y)$controls[[name]]
}
}
#' @export
#' @rdname controls
"controls<-" <- function(x, ..., value) {
UseMethod("controls<-", x)
}
#' @export
#' @param value the new value
#' @rdname controls
"controls<-.objfn" <- function(x, name, ..., value) {
environment(x)$controls[[name]] <- value
return(x)
}
#' @export
#' @rdname controls
"controls<-.fn" <- function(x, condition = NULL, name, ..., value) {
mappings <- attr(x, "mappings")
if (is.null(condition)) y <- mappings[[1]] else y <- mappings[[condition]]
environment(y)$controls[[name]] <- value
return(x)
}
#' Extract the derivatives of an object
#'
#' @param x object from which the derivatives should be extracted
#' @param ... additional arguments (not used right now)
#' @return The derivatives in a format that depends on the class of \code{x}.
#' This is
#' \code{parvec -> matrix},
#' \code{prdframe -> prdframe},
#' \code{prdlist -> prdlist},
#' \code{objlist -> named numeric}.
#' @export
getDerivs <- function(x, ...) {
UseMethod("getDerivs", x)
}
#' @export
#' @rdname getDerivs
getDerivs.parvec <- function(x, ...) {
attr(x, "deriv")
}
#' @export
#' @rdname getDerivs
getDerivs.prdframe <- function(x, ...) {
prdframe(prediction = attr(x, "deriv"), parameters = attr(x, "parameters"))
}
#' @export
#' @rdname getDerivs
getDerivs.prdlist <- function(x, ...) {
as.prdlist(
lapply(x, function(myx) {
getDerivs(myx)
}),
names = names(x)
)
}
#' @export
#' @rdname getDerivs
getDerivs.list <- function(x, ...) {
lapply(x, function(myx) getDerivs(myx))
}
#' @export
#' @rdname getDerivs
getDerivs.objlist <- function(x, ...) {
x$gradient
}
getEquations <- function(x, ...) {
UseMethod("getEquations", x)
}
#' Extract the parameters of an object
#'
#' @param ... objects from which the parameters should be extracted
#' @param conditions character vector specifying the conditions to
#' which \code{getParameters} is restricted
#' @return The parameters in a format that depends on the class of \code{x}.
#' @export
getParameters <- function(..., conditions = NULL) {
Reduce("union", lapply(list(...), function(x) {
UseMethod("getParameters", x)
}))
}
#' @export
#' @rdname getParameters
#' @param x object from which the parameters are extracted
getParameters.odemodel <- function(x, conditions = NULL) {
parameters <- c(
attr(x$func, "variables"),
attr(x$func, "parameters")
)
return(parameters)
}
#' @export
#' @rdname getParameters
getParameters.fn <- function(x, conditions = NULL) {
if (is.null(conditions)) {
parameters <- attr(x, "parameters")
} else {
mappings <- attr(x, "mappings")
mappings <- mappings[intersect(names(mappings), conditions)]
parameters <- Reduce("union",
lapply(mappings, function(m) attr(m, "parameters"))
)
}
return(parameters)
}
#' @export
#' @rdname getParameters
getParameters.parvec <- function(x, conditions = NULL) {
names(x)
}
#' @export
#' @rdname getParameters
getParameters.prdframe <- function(x, conditions = NULL) {
attr(x, "parameters")
}
#' @export
#' @rdname getParameters
getParameters.prdlist <- function(x, conditions = NULL) {
select <- 1:length(x)
if (!is.null(conditions)) select <- intersect(names(x), conditions)
lapply(x[select], function(myx) getParameters(myx))
}
#' @export
#' @rdname getParameters
getParameters.eqnlist <- function(x) {
unique(c(getSymbols(x$states), getSymbols(x$rates), getSymbols(x$volumes)))
}
#' @export
#' @rdname getParameters
getParameters.eventlist <- function(x) {
Reduce(union, lapply(x[c(1:3)], getSymbols))
}
#' Extract the conditions of an object
#'
#' @param x object from which the conditions should be extracted
#' @param ... additional arguments (not used right now)
#' @return The conditions in a format that depends on the class of \code{x}.
#' @export
getConditions <- function(x, ...) {
UseMethod("getConditions", x)
}
#' @export
#' @rdname getConditions
getConditions.list <- function(x, ...) {
names(x)
}
#' @export
#' @rdname getConditions
getConditions.fn <- function(x, ...) {
attr(x, "conditions")
}
#' Get and set modelname
#'
#' @description The modelname attribute refers to the name of a C file associated with
#' a dMod function object like prediction-, parameter transformation- or
#' objective functions.
#'
#' @param ... objects of type \code{prdfn}, \code{parfn}, \code{objfn}
#' @param conditions character vector of conditions
#' @return character vector of model names, corresponding to C files
#' in the local directory.
#'
#' @export
modelname <- function(..., conditions = NULL) {
Reduce("union", lapply(list(...), mname, conditions = conditions))
}
#' Get modelname from single object (used internally)
#'
#' @param x dMod object
#' @param conditions character vector of conditions
#' @export
mname <- function(x, conditions = NULL) {
UseMethod("mname", x)
}
#' @export
#' @rdname mname
mname.NULL <- function(x, conditions = NULL) NULL
#' @export
#' @rdname mname
mname.character <- function(x, conditions = NULL) {
mname(get(x), conditions = conditions)
}
#' @export
#' @rdname mname
mname.objfn <- function(x, conditions = NULL) {
attr(x, "modelname")
}
#' @export
#' @rdname mname
mname.fn <- function(x, conditions = NULL) {
mappings <- attr(x, "mappings")
select <- 1:length(mappings)
if (!is.null(conditions)) select <- intersect(names(mappings), conditions)
modelnames <- Reduce("union",
lapply(mappings[select], function(m) attr(m, "modelname"))
)
return(modelnames)
}
#' @export
#' @rdname modelname
#' @param x dMod object for which the model name should be set
#' @param value character, the new modelname (does not change the C file)
"modelname<-" <- function(x, ..., value) {
UseMethod("modelname<-", x)
}
#' @export
#' @rdname modelname
"modelname<-.fn" <- function(x, conditions = NULL, ..., value) {
mappings <- attr(x, "mappings")
select <- 1:length(mappings)
if (!is.null(conditions)) select <- intersect(names(mappings), conditions)
#if (length(value) > 1 && length(value) != length(mappings[select]))
# stop("Length of modelname vector should be either 1 or equal to the number of conditions.")
if (length(value) == 1) {
value <- rep(value, length.out = length(mappings[select]))
if (!is.null(conditions)) names(value) <- conditions
}
for (i in select) {
attr(attr(x, "mappings")[[i]], "modelname") <- value[i]
if (inherits(x, "prdfn")) {
extended <- environment(attr(x, "mappings")[[i]])[["extended"]]
if (!is.null(extended)) {
attr(environment(attr(x, "mappings")[[i]])[["extended"]], "modelname") <- value[i]
}
attr(environment(attr(x, "mappings")[[i]])[["func"]], "modelname") <- value[i]
}
}
return(x)
}
#' @export
#' @rdname modelname
"modelname<-.objfn" <- function(x, conditions = NULL, ..., value) {
attr(x, "modelname") <- value
return(x)
}
#' Extract the equations of an object
#'
#' @param x object from which the equations should be extracted
#' @param conditions character or numeric vector specifying the conditions to
#' which \code{getEquations} is restricted. If \code{conditions} has length one,
#' the result is not returned as a list.
#' @return The equations as list of \code{eqnvec} objects.
#' @export
getEquations <- function(x, conditions = NULL) {
UseMethod("getEquations", x)
}
#' @export
#' @rdname getEquations
getEquations.odemodel <- function(x, conditions = NULL) {
attr(x$func, "equations")
}
#' @export
#' @rdname getEquations
getEquations.prdfn <- function(x, conditions = NULL) {
mappings <- attr(x, "mappings")
if (is.null(conditions)) {
equations <- lapply(mappings, function(m) attr(m, "equations"))
return(equations)
}
if (!is.null(conditions)) {
mappings <- mappings[conditions]
equations <- lapply(mappings, function(m) attr(m, "equations"))
if (length(equations) == 1) {
return(equations[[1]])
} else {
return(equations)
}
}
}
#' @export
#' @rdname getEquations
getEquations.fn <- function(x, conditions = NULL) {
mappings <- attr(x, "mappings")
if (is.null(conditions)) {
equations <- lapply(mappings, function(m) attr(m, "equations"))
return(equations)
}
if (!is.null(conditions)) {
mappings <- mappings[conditions]
equations <- lapply(mappings, function(m) attr(m, "equations"))
if (length(equations) == 1) {
return(equations[[1]])
} else {
return(equations)
}
}
}
#' Extract the observables of an object
#'
#' @param x object from which the equations should be extracted
#' @param ... not used
#' @return The equations as a character.
#' @export
getObservables <- function(x, ...) {
UseMethod("getObservables", x)
}
| /R/classes.R | no_license | cran/dMod | R | false | false | 55,029 | r | ## ODE model class -------------------------------------------------------------------
#' Generate the model objects for use in Xs (models with sensitivities)
#'
#' @param f Something that can be converted to \link{eqnvec},
#' e.g. a named character vector with the ODE
#' @param deriv logical, generate sensitivities or not
#' @param forcings Character vector with the names of the forcings
#' @param events data.frame of events with columns "var" (character, the name of the state to be
#' affected), "time" (character or numeric, time point), "value" (character or numeric, value),
#' "method" (character, either
#' "replace" or "add"). See \link[deSolve]{events}. Events need to be defined here if they contain
#' parameters, like the event time or value. If both, time and value are purely numeric, they
#' can be specified in \code{\link{Xs}()}, too.
#' @param outputs Named character vector for additional output variables.
#' @param fixed Character vector with the names of parameters (initial values and dynamic) for which
#' no sensitivities are required (will speed up the integration).
#' @param estimate Character vector specifying parameters (initial values and dynamic) for which
#' sensitivities are returned. If estimate is specified, it overwrites `fixed`.
#' @param modelname Character, the name of the C file being generated.
#' @param solver Solver for which the equations are prepared.
#' @param gridpoints Integer, the minimum number of time points where the ODE is evaluated internally
#' @param verbose Print compiler output to R command line.
#' @param ... Further arguments being passed to funC.
#' @return list with \code{func} (ODE object) and \code{extended} (ODE+Sensitivities object)
#' @export
#' @example inst/examples/odemodel.R
#' @import cOde
odemodel <- function(f, deriv = TRUE, forcings=NULL, events = NULL, outputs = NULL, fixed = NULL, estimate = NULL, modelname = "odemodel", solver = c("deSolve", "Sundials"), gridpoints = NULL, verbose = FALSE, ...) {
if (is.null(gridpoints)) gridpoints <- 2
f <- as.eqnvec(f)
modelname_s <- paste0(modelname, "_s")
solver <- match.arg(solver)
func <- cOde::funC(f, forcings = forcings, events = events, outputs = outputs, fixed = fixed, modelname = modelname , solver = solver, nGridpoints = gridpoints, ...)
extended <- NULL
if (solver == "Sundials") {
# Sundials does not need "extended" by itself, but dMod relies on it.
extended <- func
attr(extended, "deriv") <- TRUE
attr(extended, "variables") <- c(attr(extended, "variables"), attr(extended, "variablesSens"))
attr(extended, "events") <- events
}
if (deriv && solver == "deSolve") {
mystates <- attr(func, "variables")
myparameters <- attr(func, "parameters")
if (is.null(estimate) & !is.null(fixed)) {
mystates <- setdiff(mystates, fixed)
myparameters <- setdiff(myparameters, fixed)
}
if (!is.null(estimate)) {
mystates <- intersect(mystates, estimate)
myparameters <- intersect(myparameters, estimate)
}
s <- sensitivitiesSymb(f,
states = mystates,
parameters = myparameters,
inputs = attr(func, "forcings"),
events = attr(func, "events"),
reduce = TRUE)
fs <- c(f, s)
outputs <- c(attr(s, "outputs"), attr(func, "outputs"))
events.sens <- attr(s, "events")
events.func <- attr(func, "events")
events <- NULL
if (!is.null(events.func)) {
if (is.data.frame(events.sens)) {
events <- rbind(events.sens, events.func, straingsAsFactors = FALSE)
} else {
events <- do.call(rbind, lapply(1:nrow(events.func), function(i) {
rbind(events.sens[[i]], events.func[i,], stringsAsFactors = FALSE)
}))
}
}
extended <- cOde::funC(fs, forcings = forcings, modelname = modelname_s, solver = solver, nGridpoints = gridpoints, events = events, outputs = outputs, ...)
}
out <- list(func = func, extended = extended)
attr(out, "class") <- "odemodel"
return(out)
}
## Function classes ------------------------------------------------------
#' dMod match function arguments
#'
#' The function is exported for dependency reasons
#'
#' @param arglist list
#' @param choices character
#'
#' @export
match.fnargs <- function(arglist, choices) {
# Catch the case of names == NULL
if (is.null(names(arglist))) names(arglist) <- rep("", length(arglist))
# exlude named arguments which are not in choices
arglist <- arglist[names(arglist) %in% c(choices, "")]
# determine available arguments
available <- choices %in% names(arglist)
if (!all(available)) names(arglist)[names(arglist) == ""] <- choices[!available]
if (any(duplicated(names(arglist)))) stop("duplicate arguments in prdfn/obsfn/parfn function call")
mapping <- match(choices, names(arglist))
return(mapping)
}
## Equation classes -------------------------------------------------------
#' Generate equation vector object
#'
#' @description The eqnvec object stores explicit algebraic equations, like the
#' right-hand sides of an ODE, observation functions or parameter transformations
#' as named character vectors.
#' @param ... mathematical expressions as characters to be coerced,
#' the right-hand sides of the equations
#' @return object of class \code{eqnvec}, basically a named character.
#' @example inst/examples/eqnvec.R
#' @seealso \link{eqnlist}
#' @export
eqnvec <- function(...) {
mylist <- list(...)
if (length(mylist) > 0) {
mynames <- paste0("eqn", 1:length(mylist))
is.available <- !is.null(names(mylist))
mynames[is.available] <- names(mylist)[is.available]
names(mylist) <- mynames
out <- unlist(mylist)
return(as.eqnvec(out))
} else {
return(NULL)
}
}
#' Generate eqnlist object
#'
#' @description The eqnlist object stores an ODE as a list of stoichiometric matrix,
#' rate expressions, state names and compartment volumes.
#' @export
#' @param smatrix Matrix of class numeric. The stoichiometric matrix,
#' one row per reaction/process and one column per state.
#' @param states Character vector. Names of the states.
#' @param rates Character vector. The rate expressions.
#' @param volumes Named character, volume parameters for states. Names must be a subset of the states.
#' Values can be either characters, e.g. "V1", or numeric values for the volume. If \code{volumes} is not
#' \code{NULL}, missing entries are treated as 1.
#' @param description Character vector. Description of the single processes.
#' @return An object of class \code{eqnlist}, basically a list.
#' @example inst/examples/eqnlist.R
eqnlist <- function(smatrix = NULL, states = colnames(smatrix), rates = NULL, volumes = NULL, description = NULL) {
# Dimension checks and preparations for non-empty argument list.
if (all(!is.null(c(smatrix, states, rates)))) {
#Dimension checks
d1 <- dim(smatrix)
l2 <- length(states)
l3 <- length(rates)
if (l2 != d1[2]) stop("Number of states does not coincide with number of columns of stoichiometric matrix")
if (l3 != d1[1]) stop("Number of rates does not coincide with number of rows of stoichiometric matrix")
# Prepare variables
smatrix <- as.matrix(smatrix)
colnames(smatrix) <- states
if (is.null(description)) {
description <- 1:nrow(smatrix)
}
}
out <- list(smatrix = smatrix,
states = as.character(states),
rates = as.character(rates),
volumes = volumes,
description = as.character(description))
class(out) <- c("eqnlist", "list")
return(out)
}
## Parameter classes --------------------------------------------------------
#' Parameter transformation function
#'
#' Generate functions that transform one parameter vector into another
#' by means of a transformation, pushing forward the jacobian matrix
#' of the original parameter.
#' Usually, this function is called internally, e.g. by \link{P}.
#' However, you can use it to add your own specialized parameter
#' transformations to the general framework.
#' @param p2p a transformation function for one condition, i.e. a function
#' \code{p2p(p, fixed, deriv)} which translates a parameter vector \code{p}
#' and a vector of fixed parameter values \code{fixed} into a new parameter
#' vector. If \code{deriv = TRUE}, the function should return an attribute
#' \code{deriv} with the Jacobian matrix of the parameter transformation.
#' @param parameters character vector, the parameters accepted by the function
#' @param condition character, the condition for which the transformation is defined
#' @return object of class \code{parfn}, i.e. a function \code{p(..., fixed, deriv,
#' conditions, env)}. The argument \code{pars} should be passed via the \code{...}
#' argument.
#'
#' Contains attributes "mappings", a list of \code{p2p}
#' functions, "parameters", the union of parameters acceted by the mappings and
#' "conditions", the total set of conditions.
#' @seealso \link{sumfn}, \link{P}
#' @example inst/examples/prediction.R
#' @export
parfn <- function(p2p, parameters = NULL, condition = NULL) {
force(condition)
mappings <- list()
mappings[[1]] <- p2p
names(mappings) <- condition
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = condition, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, "pars")]
pars <- arglist[[1]]
overlap <- test_conditions(conditions, condition)
# NULL if at least one argument is NULL
# character(0) if no overlap
# character if overlap
if (is.null(overlap)) conditions <- union(condition, conditions)
if (is.null(overlap) | length(overlap) > 0)
result <- p2p(pars = pars, fixed = fixed, deriv = deriv)
else
result <- NULL
# Initialize output object
length.out <- max(c(1, length(conditions)))
outlist <- structure(vector("list", length.out), names = conditions)
if (is.null(condition)) available <- 1:length.out else available <- match(condition, conditions)
for (C in available[!is.na(available)]) outlist[[C]] <- result
return(outlist)
}
attr(outfn, "mappings") <- mappings
attr(outfn, "parameters") <- parameters
attr(outfn, "conditions") <- condition
class(outfn) <- c("parfn", "fn")
return(outfn)
}
#' Generate a parameter frame
#'
#' @description A parameter frame is a data.frame where the rows correspond to different
#' parameter specifications. The columns are divided into three parts. (1) the meta-information
#' columns (e.g. index, value, constraint, etc.), (2) the attributes of an objective function
#' (e.g. data contribution and prior contribution) and (3) the parameters.
#' @seealso \link{profile}, \link{mstrust}
#' @param x data.frame.
#' @param parameters character vector, the names of the parameter columns.
#' @param metanames character vector, the names of the meta-information columns.
#' @param obj.attributes character vector, the names of the objective function attributes.
#' @return An object of class \code{parframe}, i.e. a data.frame with attributes for the
#' different names. Inherits from data.frame.
#' @details Parameter frames can be subsetted either by \code{[ , ]} or by \code{subset}. If
#' \code{[ , index]} is used, the names of the removed columns will also be removed from
#' the corresponding attributes, i.e. metanames, obj.attributes and parameters.
#' @example inst/examples/parlist.R
#' @export
parframe <- function(x = NULL, parameters = colnames(x), metanames = NULL, obj.attributes = NULL) {
if (!is.null(x)) {
rownames(x) <- NULL
out <- as.data.frame(x)
} else {
out <- data.frame()
}
attr(out, "parameters") <- parameters
attr(out, "metanames") <- metanames
attr(out, "obj.attributes") <- obj.attributes
class(out) <- c("parframe", "data.frame")
return(out)
}
#' Parameter list
#'
#' @description The special use of a parameter list is to save
#' the outcome of multiple optimization runs provided by \link{mstrust},
#' into one list.
#' @param ... Objects to be coerced to parameter list.
#' @export
#' @example inst/examples/parlist.R
#' @seealso \link{load.parlist}, \link{plot.parlist}
parlist <- function(...) {
mylist <- list(...)
return(as.parlist(mylist))
}
#' Parameter vector
#'
#' @description A parameter vector is a named numeric vector (the parameter values)
#' together with a "deriv" attribute
#' (the Jacobian of a parameter transformation by which the parameter vector was generated).
#' @param ... objects to be concatenated
#' @param deriv matrix with rownames (according to names of \code{...}) and colnames
#' according to the names of the parameter by which the parameter vector was generated.
#' @return An object of class \code{parvec}, i.e. a named numeric vector with attribute "deriv".
#' @example inst/examples/parvec.R
#' @export
parvec <- function(..., deriv = NULL) {
mylist <- list(...)
if (length(mylist) > 0) {
mynames <- paste0("par", 1:length(mylist))
is.available <- !is.null(names(mylist))
mynames[is.available] <- names(mylist)[is.available]
out <- as.numeric(unlist(mylist))
names(out) <- mynames
return(as.parvec(out, deriv = deriv))
} else {
return(NULL)
}
}
## Prediction classes ----------------------------------------------------
#' Prediction function
#'
#' @description A prediction function is a function \code{x(..., fixed, deriv, conditions)}.
#' Prediction functions are generated by \link{Xs}, \link{Xf} or \link{Xd}. For an example
#' see the last one.
#'
#' @param P2X transformation function as being produced by \link{Xs}.
#' @param parameters character vector with parameter names
#' @param condition character, the condition name
#' @details Prediction functions can be "added" by the "+" operator, see \link{sumfn}. Thereby,
#' predictions for different conditions are merged or overwritten. Prediction functions can
#' also be concatenated with other functions, e.g. observation functions (\link{obsfn}) or
#' parameter transformation functions (\link{parfn}) by the "*" operator, see \link{prodfn}.
#' @return Object of class \code{prdfn}, i.e. a function \code{x(..., fixed, deriv, conditions, env)}
#' which returns a \link{prdlist}. The arguments \code{times} and
#' \code{pars} (parameter values) should be passed via the \code{...} argument, in this order.
#' @example inst/examples/prediction.R
#' @export
prdfn <- function(P2X, parameters = NULL, condition = NULL) {
mycondition <- condition
mappings <- list()
mappings[[1]] <- P2X
names(mappings) <- condition
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = mycondition, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("times", "pars"))]
times <- arglist[[1]]
pars <- arglist[[2]]
# yields derivatives for all parameters in pars but not in fixed
pars <- c(as.parvec(pars[setdiff(names(pars), names(fixed))]),
fixed)
overlap <- test_conditions(conditions, condition)
# NULL if at least one argument is NULL
# character(0) if no overlap
# character if overlap
if (is.null(overlap)) conditions <- union(condition, conditions)
if (is.null(overlap) | length(overlap) > 0)
result <- P2X(times = times, pars = pars, deriv = deriv)
else
result <- NULL
# Initialize output object
length.out <- max(c(1, length(conditions)))
outlist <- structure(vector("list", length.out), names = conditions)
if (is.null(condition)) available <- 1:length.out else available <- match(condition, conditions)
for (C in available[!is.na(available)]) outlist[[C]] <- result
outlist <- as.prdlist(outlist)
#length.out <- max(c(1, length(conditions)))
#outlist <- as.prdlist(lapply(1:length.out, function(i) result), names = conditions)
#attr(outlist, "pars") <- pars
return(outlist)
}
attr(outfn, "mappings") <- mappings
attr(outfn, "parameters") <- parameters
attr(outfn, "conditions") <- mycondition
class(outfn) <- c("prdfn", "fn")
return(outfn)
}
#' Observation function
#'
#' @description An observation function is a function is that is concatenated
#' with a prediction function via \link{prodfn} to yield a new prediction function,
#' see \link{prdfn}. Observation functions are generated by \link{Y}. Handling
#' of the conditions is then organized by the \code{obsfn} object.
#' @param X2Y the low-level observation function generated e.g. by \link{Y}.
#' @param parameters character vector with parameter names
#' @param condition character, the condition name
#' @details Observation functions can be "added" by the "+" operator, see \link{sumfn}. Thereby,
#' observations for different conditions are merged or, overwritten. Observation functions can
#' also be concatenated with other functions, e.g. observation functions (\link{obsfn}) or
#' prediction functions (\link{prdfn}) by the "*" operator, see \link{prodfn}.
#' @return Object of class \code{obsfn}, i.e. a function \code{x(..., fixed, deriv, conditions, env)}
#' which returns a \link{prdlist}. The arguments \code{out} (prediction) and \code{pars} (parameter values)
#' should be passed via the \code{...} argument.
#' @example inst/examples/prediction.R
#' @export
obsfn <- function(X2Y, parameters = NULL, condition = NULL) {
mycondition <- condition
mappings <- list()
mappings[[1]] <- X2Y
names(mappings) <- condition
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = mycondition, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("out", "pars"))]
out <- arglist[[1]]
pars <- arglist[[2]]
# yields derivatives for all parameters in pars but not in fixed
pars <- c(as.parvec(pars[setdiff(names(pars), names(fixed))]),
fixed)
overlap <- test_conditions(conditions, condition)
# NULL if at least one argument is NULL
# character(0) if no overlap
# character if overlap
if (is.null(overlap)) conditions <- union(condition, conditions)
if (is.null(overlap) | length(overlap) > 0)
result <- X2Y(out = out, pars = pars)
else
result <- NULL
# Initialize output object
length.out <- max(c(1, length(conditions)))
outlist <- structure(vector("list", length.out), names = conditions)
if (is.null(condition)) available <- 1:length.out else available <- match(condition, conditions)
for (C in available[!is.na(available)]) outlist[[C]] <- result
outlist <- as.prdlist(outlist)
#length.out <- max(c(1, length(conditions)))
#outlist <- as.prdlist(lapply(1:length.out, function(i) result), names = conditions)
#attr(outlist, "pars") <- pars
return(outlist)
}
attr(outfn, "mappings") <- mappings
attr(outfn, "parameters") <- parameters
attr(outfn, "conditions") <- mycondition
class(outfn) <- c("obsfn", "fn")
return(outfn)
}
#' Prediction frame
#'
#' @description A prediction frame is used to store a model prediction in a matrix. The columns
#' of the matrix are "time" and one column per state. The prediction frame has attributes "deriv",
#' the matrix of sensitivities with respect to "outer parameters" (see \link{P}), an attribute
#' "sensitivities", the matrix of sensitivities with respect to the "inner parameters" (the model
#' parameters, left-hand-side of the parameter transformation) and an attributes "parameters", the
#' parameter vector of inner parameters to produce the prediction frame.
#'
#' Prediction frames are usually the constituents of prediction lists (\link{prdlist}). They are
#' produced by \link{Xs}, \link{Xd} or \link{Xf}. When you define your own prediction functions,
#' see \code{P2X} in \link{prdfn}, the result should be returned as a prediction frame.
#' @param prediction matrix of model prediction
#' @param deriv matrix of sensitivities wrt outer parameters
#' @param sensitivities matrix of sensitivitie wrt inner parameters
#' @param parameters names of the outer paramters
#' @return Object of class \code{prdframe}, i.e. a matrix with other matrices and vectors as attributes.
#' @export
prdframe <- function(prediction = NULL, deriv = NULL, sensitivities = NULL, parameters = NULL) {
out <- if (!is.null(prediction)) as.matrix(prediction) else matrix()
attr(out, "deriv") <- deriv
attr(out, "sensitivities") <- sensitivities
attr(out, "parameters") <- parameters
class(out) <- c("prdframe", "matrix")
return(out)
}
#' Prediction list
#'
#' @description A prediction list is used to store a list of model predictions
#' from different prediction functions or the same prediction function with different
#' parameter specifications. Each entry of the list is a \link{prdframe}.
#' @param ... objects of class \link{prdframe}
#' conditions.
#' @export
prdlist <- function(...) {
mylist <- list(...)
mynames <- names(mylist)
if (is.null(mynames)) mynames <- as.character(1:length(mylist))
as.prdlist(mylist, mynames)
}
## Data classes ----------------------------------------------------------------
#' Generate a datalist object
#'
#' @description The datalist object stores time-course data in a list of data.frames.
#' The names of the list serve as identifiers, e.g. of an experimental condition, etc.
#' @details Datalists can be plotted, see \link{plotData} and merged, see \link{sumdatalist}.
#' They are the basic structure when combining model prediction and data via the \link{normL2}
#' objective function.
#'
#' The standard columns of the datalist data frames are "name" (observable name),
#' "time" (time points), "value" (data value), "sigma" (uncertainty, can be NA), and
#' "lloq" (lower limit of quantification, \code{-Inf} by default).
#'
#' Datalists carry the attribute \code{condition.grid} which contains additional information about different
#' conditions, such as dosing information for the experiment. It can be conveniently accessed by the \link{covariates}-function.
#' Reassigning names to a datalist also renames the rows of the \code{condition.grid}.
#' @param ... data.frame objects to be coerced into a list and additional arguments
#' @return Object of class \code{datalist}.
#' @export
datalist <- function(...) {
mylist <- list(...)
mynames <- names(mylist)
if (is.null(mynames)) mynames <- as.character(1:length(mylist))
as.datalist(mylist, mynames)
}
## Objective classes ---------------------------------------------------------
#' Generate objective list
#'
#' @description An objective list contains an objective value, a gradient, and a Hessian matrix.
#'
#' Objective lists can contain additional numeric attributes that are preserved or
#' combined with the corresponding attributes of another objective list when
#' both are added by the "+" operator, see \link{sumobjlist}.
#'
#' Objective lists are returned by objective functions as being generated
#' by \link{normL2}, \link{constraintL2}, \link{priorL2} and \link{datapointL2}.
#' @param value numeric of length 1
#' @param gradient named numeric
#' @param hessian matrix with rownames and colnames according to gradient names
#' @return Object of class \code{objlist}
#' @export
objlist <- function(value, gradient, hessian) {
out <- list(value = value, gradient = gradient, hessian = hessian)
class(out) <- c("objlist", "list")
return(out)
}
#' Objective frame
#'
#' @description An objective frame is supposed to store the residuals of a model prediction
#' with respect to a data frame.
#' @param mydata data.frame as being generated by \link{res}.
#' @param deriv matrix of the derivatives of the residuals with respect to parameters.
#' @param deriv.err matrix of the derivatives of the error model.
#' @return An object of class \code{objframe}, i.e. a data frame with attribute "deriv".
#' @export
objframe <- function(mydata, deriv = NULL, deriv.err = NULL) {
# Check column names
mydata <- as.data.frame(mydata)
correct.names <- c("time", "name", "value", "prediction",
"sigma", "residual", "weighted.residual", "bloq")
ok <- all(correct.names %in% names(mydata))
if (!ok) stop("mydata does not have required names")
out <- mydata[, correct.names]
attr(out, "deriv") <- deriv
attr(out, "deriv.err") <- deriv.err
class(out) <- c("objframe", "data.frame")
return(out)
}
## General concatenation of functions ------------------------------------------
#' Direct sum of objective functions
#'
#' @param x1 function of class \code{objfn}
#' @param x2 function of class \code{objfn}
#' @details The objective functions are evaluated and their results as added. Sometimes,
#' the evaluation of an objective function depends on results that have been computed
#' internally in a preceding objective function. Therefore, environments are forwarded
#' and all evaluations take place in the same environment. The first objective function
#' in a sum of functions generates a new environment.
#' @return Object of class \code{objfn}.
#' @seealso \link{normL2}, \link{constraintL2}, \link{priorL2}, \link{datapointL2}
#' @aliases sumobjfn
#' @example inst/examples/objective.R
#' @export
"+.objfn" <- function(x1, x2) {
if (is.null(x1)) return(x2)
conditions.x1 <- attr(x1, "conditions")
conditions.x2 <- attr(x2, "conditions")
conditions12 <- union(conditions.x1, conditions.x2)
parameters.x1 <- attr(x1, "parameters")
parameters.x2 <- attr(x2, "parameters")
parameters12 <- union(parameters.x1, parameters.x2)
modelname.x1 <- attr(x1, "modelname")
modelname.x2 <- attr(x2, "modelname")
modelname12 <- union(modelname.x1, modelname.x2)
# objfn + objfn
if (inherits(x1, "objfn") & inherits(x2, "objfn")) {
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = conditions12, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("pars"))]
pars <- arglist[[1]]
# 1. If conditions.xi is null, always evaluate xi, but only once
# 2. If not null, evaluate at intersection with conditions
# 3. If not null & intersection is empty, don't evaluate xi at all
v1 <- v2 <- NULL
if (is.null(conditions.x1)) {
v1 <- x1(pars = pars, fixed = fixed, deriv = deriv, conditions = conditions.x1, env = env)
} else if (any(conditions %in% conditions.x1)) {
v1 <- x1(pars = pars, fixed = fixed, deriv = deriv, conditions = intersect(conditions, conditions.x1), env = env)
}
if (is.null(conditions.x2)) {
v2 <- x2(pars = pars, fixed = fixed, deriv = deriv, conditions = conditions.x2, env = env)
} else if (any(conditions %in% conditions.x2)) {
v2 <- x2(pars = pars, fixed = fixed, deriv = deriv, conditions = intersect(conditions, conditions.x2), env = attr(v1, "env"))
}
out <- v1 + v2
attr(out, "env") <- attr(v1, "env")
return(out)
}
class(outfn) <- c("objfn", "fn")
attr(outfn, "conditions") <- conditions12
attr(outfn, "parameters") <- parameters12
attr(outfn, "modelname") <- modelname12
return(outfn)
}
}
#' Multiplication of objective functions with scalars
#'
#' @description The \code{\%.*\%} operator allows to multiply objects of class objlist or objfn with
#' a scalar.
#'
#' @param x1 object of class objfn or objlist.
#' @param x2 numeric of length one.
#' @return An objective function or objlist object.
#'
#' @export
"%.*%" <- function(x1, x2) {
if (inherits(x2, "objlist")) {
out <- lapply(x2, function(x) {
x1*x
})
# Multiply attributes
out2.attributes <- attributes(x2)[sapply(attributes(x2), is.numeric)]
attr.names <- names(out2.attributes)
out.attributes <- lapply(attr.names, function(n) {
x1*attr(x2, n)
})
attributes(out) <- attributes(x2)
attributes(out)[attr.names] <- out.attributes
return(out)
} else if (inherits(x2, "objfn")) {
conditions12 <- attr(x2, "conditions")
parameters12 <- attr(x2, "parameters")
modelname12 <- attr(x2, "modelname")
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = conditions12, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("pars"))]
pars <- arglist[[1]]
v1 <- x1
v2 <- x2(pars = pars, fixed = fixed, deriv = deriv, conditions = conditions, env = attr(v1, "env"))
out <- v1 %.*% v2
attr(out, "env") <- attr(v2, "env")
return(out)
}
class(outfn) <- c("objfn", "fn")
attr(outfn, "conditions") <- conditions12
attr(outfn, "parameters") <- parameters12
attr(outfn, "modelname") <- modelname12
return(outfn)
} else {
x1*x2
}
}
#' Direct sum of functions
#'
#' Used to add prediction function, parameter transformation functions or observation functions.
#'
#' @param x1 function of class \code{obsfn}, \code{prdfn} or \code{parfn}
#' @param x2 function of class \code{obsfn}, \code{prdfn} or \code{parfn}
#' @details Each prediction function is associated to a number of conditions. Adding functions
#' means merging or overwriting the set of conditions.
#' @return Object of the same class as \code{x1} and \code{x2} which returns results for the
#' union of conditions.
#' @aliases sumfn
#' @seealso \link{P}, \link{Y}, \link{Xs}
#' @example inst/examples/prediction.R
#' @export
"+.fn" <- function(x1, x2) {
if (is.null(x1)) return(x2)
mappings.x1 <- attr(x1, "mappings")
mappings.x2 <- attr(x2, "mappings")
conditions.x1 <- attr(x1, "conditions")
conditions.x2 <- attr(x2, "conditions")
overlap <- intersect(conditions.x1, conditions.x2)
if (is.null(names(mappings.x1)) || is.null(names(mappings.x2))) stop("General transformations (NULL names) cannot be coerced.")
if (length(overlap) > 0) {
warning(paste("Condition", overlap, "existed and has been overwritten."))
mappings.x1 <- mappings.x1[!conditions.x1 %in% overlap]
conditions.x1 <- conditions.x1[!conditions.x1 %in% overlap]
}
conditions.x12 <- c(conditions.x1, conditions.x2)
mappings <- c(mappings.x1, mappings.x2)
# prdfn + prdfn
if (inherits(x1, "prdfn") & inherits(x2, "prdfn")) {
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = names(mappings), env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("times", "pars"))]
times <- arglist[[1]]
pars <- arglist[[2]]
if (is.null(conditions)) {
available <- names(mappings)
} else {
available <- intersect(names(mappings), conditions)
}
outlist <- structure(vector("list", length(conditions)), names = conditions)
#outpars <- structure(vector("list", length(conditions)), names = conditions)
for (C in available) {
outlist[[C]] <- mappings[[C]](times = times, pars = pars, deriv = deriv)
#outpars[[C]] <- attr(outlist[[C]], "pars")
#attr(outlist[[C]], "pars") <- NULL
}
out <- as.prdlist(outlist)
#attr(out, "pars") <- outpars
return(out)
}
class(outfn) <- c("prdfn", "fn")
}
# obsfn + obsfn
if (inherits(x1, "obsfn") & inherits(x2, "obsfn")) {
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = names(mappings), env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("out", "pars"))]
out <- arglist[[1]]
pars <- arglist[[2]]
if (is.null(conditions)) {
available <- names(mappings)
} else {
available <- intersect(names(mappings), conditions)
}
outlist <- structure(vector("list", length(conditions)), names = conditions)
for (C in available) {
outlist[[C]] <- mappings[[C]](out = out, pars = pars)
}
out <- as.prdlist(outlist)
return(out)
}
class(outfn) <- c("obsfn", "fn")
}
# parfn + parfn
if (inherits(x1, "parfn") & inherits(x2, "parfn")) {
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = names(mappings), env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("pars"))]
pars <- arglist[[1]]
if (is.null(conditions)) {
available <- names(mappings)
} else {
available <- intersect(names(mappings), conditions)
}
outlist <- structure(vector("list", length(conditions)), names = conditions)
for (C in available) {
outlist[[C]] <- mappings[[C]](pars = pars, fixed = fixed, deriv = deriv)
}
return(outlist)
}
class(outfn) <- c("parfn", "fn")
}
attr(outfn, "mappings") <- mappings
attr(outfn, "parameters") <- union(attr(x1, "parameters"), attr(x2, "parameters"))
attr(outfn, "conditions") <- conditions.x12
attr(outfn, "forcings") <- do.call(c, list(attr(x1, "forcings"), attr(x2, "forcings")))
return(outfn)
}
#' Direct sum of datasets
#'
#' Used to merge datasets with overlapping conditions.
#'
#' @param data1 dataset of class \code{datalist}
#' @param data2 dataset of class \code{datalist}
#' @details Each data list contains data frames for a number of conditions.
#' The direct sum of datalist is meant as merging the two data lists and
#' returning the overarching datalist.
#' @return Object of class \code{datalist} for the
#' union of conditions.
#' @aliases sumdatalist
#' @example inst/examples/sumdatalist.R
#' @export
"+.datalist" <- function(data1, data2) {
overlap <- names(data2)[names(data2) %in% names(data1)]
if (length(overlap) > 0) {
warning(paste("Condition", overlap, "existed and has been overwritten."))
data1 <- data1[!names(data1) %in% names(data2)]
}
conditions <- union(names(data1), names(data2))
data <- lapply(conditions, function(C) rbind(data1[[C]], data2[[C]]))
names(data) <- conditions
grid1 <- attr(data1, "condition.grid")
grid2 <- attr(data2, "condition.grid")
grid <- combine(grid1, grid2)
if (is.data.frame(grid)) grid <- grid[!duplicated(rownames(grid)), , drop = FALSE]
out <- as.datalist(data)
attr(out, "condition.grid") <- grid
return(out)
}
out_conditions <- function(c1, c2) {
if (!is.null(c1)) return(c1)
if (!is.null(c2)) return(c2)
return(NULL)
}
test_conditions <- function(c1, c2) {
if (is.null(c1)) return(NULL)
if (is.null(c2)) return(NULL)
return(intersect(c1, c2))
}
#' Concatenation of functions
#'
#' Used to concatenate observation functions, prediction functions and parameter transformation functions.
#'
#' @param p1 function of class \code{obsfn}, \code{prdfn}, \code{parfn} or \code{idfn}
#' @param p2 function of class \code{obsfn}, \code{prdfn}, \code{parfn} or \code{idfn}
#' @return Object of the same class as \code{x1} and \code{x2}.
#' @aliases prodfn
#' @example inst/examples/prediction.R
#' @export
"*.fn" <- function(p1, p2) {
# obsfn * obsfn -> obsfn
if (inherits(p1, "obsfn") & inherits(p2, "obsfn")) {
conditions.p1 <- attr(p1, "conditions")
conditions.p2 <- attr(p2, "conditions")
conditions.out <- out_conditions(conditions.p1, conditions.p2)
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = NULL, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("out", "pars"))]
out <- arglist[[1]]
pars <- arglist[[2]]
step1 <- p2(out = out, pars = pars, fixed = fixed, deriv = deriv, conditions = conditions)
step2 <- do.call(c, lapply(1:length(step1), function(i) p1(out = step1[[i]], pars = attr(step1[[i]], "parameters"), fixed = fixed, deriv = deriv, conditions = names(step1)[i])))
out <- as.prdlist(step2)
return(out)
}
# Generate mappings for observation function
l <- max(c(1, length(conditions.out)))
mappings <- lapply(1:l, function(i) {
mapping <- function(out, pars) {
outfn(out = out, pars = pars, conditions = conditions.out[i])[[1]]
}
m1 <- modelname(p1, conditions = conditions.p1[i])
m2 <- modelname(p2, conditions = conditions.p2[i])
attr(mapping, "modelname") <- union(m1, m2)
attr(mapping, "parameters") <- getParameters(p2, conditions = conditions.out[i])
return(mapping)
})
names(mappings) <- conditions.out
attr(outfn, "mappings") <- mappings
attr(outfn, "parameters") <- attr(p2, "parameters")
attr(outfn, "conditions") <- conditions.out
class(outfn) <- c("obsfn", "fn", "composed")
return(outfn)
}
# obsfn * parfn -> obsfn
if (inherits(p1, "obsfn") & inherits(p2, "parfn")) {
conditions.p1 <- attr(p1, "conditions")
conditions.p2 <- attr(p2, "conditions")
conditions.out <- out_conditions(conditions.p1, conditions.p2)
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = NULL, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("out", "pars"))]
out <- arglist[[1]]
pars <- arglist[[2]]
step1 <- p2(pars = pars, fixed = fixed, deriv = deriv, conditions = conditions)
step2 <- do.call(c, lapply(1:length(step1), function(i) p1(out = out, pars = step1[[i]], fixed = fixed, deriv = deriv, conditions = names(step1)[i])))
out <- as.prdlist(step2)
return(out)
}
# Generate mappings for observation function
l <- max(c(1, length(conditions.out)))
mappings <- lapply(1:l, function(i) {
mapping <- function(out, pars) {
outfn(out = out, pars = pars, conditions = conditions.out[i])[[1]]
}
m1 <- modelname(p1, conditions = conditions.p1[i])
m2 <- modelname(p2, conditions = conditions.p2[i])
attr(mapping, "modelname") <- union(m1, m2)
attr(mapping, "parameters") <- getParameters(p2, conditions = conditions.out[i])
return(mapping)
})
names(mappings) <- conditions.out
attr(outfn, "mappings") <- mappings
attr(outfn, "parameters") <- attr(p2, "parameters")
attr(outfn, "conditions") <- conditions.out
class(outfn) <- c("obsfn", "fn", "composed")
return(outfn)
}
# obsfn * prdfn -> prdfn
if (inherits(p1, "obsfn") & inherits(p2, "prdfn")) {
conditions.p1 <- attr(p1, "conditions")
conditions.p2 <- attr(p2, "conditions")
conditions.out <- out_conditions(conditions.p1, conditions.p2)
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = NULL, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("times", "pars"))]
times <- arglist[[1]]
pars <- arglist[[2]]
step1 <- p2(times = times, pars = pars, fixed = fixed, deriv = deriv, conditions = conditions)
step2 <- do.call(c, lapply(1:length(step1), function(i) p1(out = step1[[i]], pars = attr(step1[[i]], "parameters"), fixed = fixed, deriv = deriv, conditions = names(step1)[i])))
out <- as.prdlist(step2)
return(out)
}
# Generate mappings for prediction function
l <- max(c(1, length(conditions.out)))
mappings <- lapply(1:l, function(i) {
mapping <- function(times, pars, deriv = TRUE) {
outfn(times = times, pars = pars, deriv = deriv, conditions = conditions.out[i])[[1]]
}
m1 <- modelname(p1, conditions = conditions.p1[i])
m2 <- modelname(p2, conditions = conditions.p2[i])
attr(mapping, "modelname") <- union(m1, m2)
attr(mapping, "parameters") <- getParameters(p2, conditions = conditions.out[i])
return(mapping)
})
names(mappings) <- conditions.out
attr(outfn, "mappings") <- mappings
attr(outfn, "parameters") <- attr(p2, "parameters")
attr(outfn, "conditions") <- conditions.out
class(outfn) <- c("prdfn", "fn", "composed")
return(outfn)
}
# prdfn * parfn -> prdfn
if (inherits(p1, "prdfn") & inherits(p2, "parfn")) {
conditions.p1 <- attr(p1, "conditions")
conditions.p2 <- attr(p2, "conditions")
conditions.out <- out_conditions(conditions.p1, conditions.p2)
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = NULL, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("times", "pars"))]
times <- arglist[[1]]
pars <- arglist[[2]]
step1 <- p2(pars = pars, fixed = fixed, deriv = deriv, conditions = conditions)
step2 <- do.call(c, lapply(1:length(step1), function(i) p1(times = times, pars = step1[[i]], deriv = deriv, conditions = names(step1)[i])))
out <- as.prdlist(step2)
return(out)
}
# Generate mappings for prediction function
l <- max(c(1, length(conditions.out)))
mappings <- lapply(1:l, function(i) {
mapping <- function(times, pars, deriv = TRUE) {
outfn(times = times, pars = pars, deriv = deriv, conditions = conditions.out[i])[[1]]
}
attr(mapping, "parameters") <- getParameters(p2, conditions = conditions.out[i])
m1 <- modelname(p1, conditions = conditions.p1[i])
m2 <- modelname(p2, conditions = conditions.p2[i])
attr(mapping, "modelname") <- union(m1, m2)
return(mapping)
})
names(mappings) <- conditions.out
attr(outfn, "mappings") <- mappings
attr(outfn, "conditions") <- conditions.out
attr(outfn, "parameters") <- attr(p2, "parameters")
class(outfn) <- c("prdfn", "fn", "composed")
return(outfn)
}
# parfn * parfn -> parfn
if (inherits(p1, "parfn") & inherits(p2, "parfn")) {
conditions.p1 <- attr(p1, "conditions")
conditions.p2 <- attr(p2, "conditions")
conditions.out <- out_conditions(conditions.p1, conditions.p2)
outfn <- function(..., fixed = NULL, deriv = TRUE, conditions = NULL, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, c("pars"))]
pars <- arglist[[1]]
step1 <- p2(pars = pars, fixed = fixed, deriv = deriv, conditions = conditions)
step2 <- do.call(c, lapply(1:length(step1), function(i) p1(pars = step1[[i]], fixed = fixed, deriv = deriv, conditions = names(step1)[i])))
return(step2)
}
# Generate mappings for parameters function
l <- max(c(1, length(conditions.out)))
mappings <- lapply(1:l, function(i) {
mapping <- function(pars, fixed = NULL, deriv = TRUE) {
outfn(pars = pars, fixed = fixed, deriv = deriv, conditions = conditions.out[i])[[1]]
}
m1 <- modelname(p1, conditions = conditions.p1[i])
m2 <- modelname(p2, conditions = conditions.p2[i])
attr(mapping, "modelname") <- union(m1, m2)
attr(mapping, "parameters") <- getParameters(p2, conditions = conditions.out[i])
return(mapping)
})
names(mappings) <- conditions.out
attr(outfn, "mappings") <- mappings
attr(outfn, "parameters") <- attr(p2, "parameters")
attr(outfn, "conditions") <- conditions.out
class(outfn) <- c("parfn", "fn", "composed")
return(outfn)
}
# objfn * parfn -> objfn
if (inherits(p1, "objfn") & inherits(p2, "parfn")) {
conditions.p1 <- attr(p1, "conditions")
conditions.p2 <- attr(p2, "conditions")
conditions.out <- out_conditions(conditions.p1, conditions.p2)
outfn <- function(..., fixed = NULL, deriv=TRUE, conditions = NULL, env = NULL) {
arglist <- list(...)
arglist <- arglist[match.fnargs(arglist, "pars")]
pars <- arglist[[1]]
step1 <- p2(pars = pars, fixed = fixed, deriv = deriv, conditions = conditions)
step2 <- Reduce("+", lapply(1:length(step1), function(i) p1(pars = step1[[i]], fixed = NULL, deriv = deriv, conditions = names(step1)[i], env = env)))
return(step2)
}
attr(outfn, "conditions") <- conditions.out
class(outfn) <- c("objfn", "fn", "composed")
return(outfn)
}
# idfn * fn -> fn
if (inherits(p1, "idfn")) {
return(p2)
}
# fn * idfn -> fn
if (inherits(p2, "idfn")) {
return(p1)
}
}
## General purpose functions for different dMod classes ------------------------------
#' List, get and set controls for different functions
#'
#' @description Applies to objects of class \code{objfn},
#' \code{parfn}, \code{prdfn} and \code{obsfn}. Allows to manipulate
#' different arguments that have been set when creating the
#' objects.
#' @details If called without further arguments, \code{controls(x)} lists the
#' available controls within an object. Calling \code{controls()} with \code{name}
#' and \code{condition} returns the control value. The value can be overwritten. If
#' a list or data.frame ist returned, elements of those can be manipulated by the
#' \code{$}- or \code{[]}-operator.
#'
#' @param x function
#' @param ... arguments going to the appropriate S3 methods
#' @return Either a print-out or the values of the control.
#' @examples
#' ## parfn with condition
#' p <- P(eqnvec(x = "-a*x"), method = "implicit", condition = "C1")
#' controls(p)
#' controls(p, "C1", "keep.root")
#' controls(p, "C1", "keep.root") <- FALSE
#'
#' ## obsfn with NULL condition
#' g <- Y(g = eqnvec(y = "s*x"), f = NULL, states = "x", parameters = "s")
#' controls(g)
#' controls(g, NULL, "attach.input")
#' controls(g, NULL, "attach.input") <- FALSE
#' @export
controls <- function(x, ...) {
UseMethod("controls", x)
}
lscontrols_objfn <- function(x) {
names(environment(x)$controls)
}
lscontrols_fn <- function(x, condition = NULL) {
conditions <- attr(x, "conditions")
mappings <- attr(x, "mappings")
for (i in 1:length(mappings)) {
if (is.null(conditions) || is.null(condition) || conditions[i] %in% condition) {
cat(conditions[i], ":\n", sep = "")
print(names(environment(mappings[[i]])$controls))
}
}
}
#' @export
#' @rdname controls
#' @param name character, the name of the control
controls.objfn <- function(x, name = NULL, ...) {
if (is.null(name)) lscontrols_objfn(x) else environment(x)$controls[[name]]
}
#' @export
#' @rdname controls
#' @param condition character, the condition name
controls.fn <- function(x, condition = NULL, name = NULL, ...) {
if (is.null(name)) {
lscontrols_fn(x, condition)
} else {
mappings <- attr(x, "mappings")
if (is.null(condition)) y <- mappings[[1]] else y <- mappings[[condition]]
environment(y)$controls[[name]]
}
}
#' @export
#' @rdname controls
"controls<-" <- function(x, ..., value) {
UseMethod("controls<-", x)
}
#' @export
#' @param value the new value
#' @rdname controls
"controls<-.objfn" <- function(x, name, ..., value) {
environment(x)$controls[[name]] <- value
return(x)
}
#' @export
#' @rdname controls
"controls<-.fn" <- function(x, condition = NULL, name, ..., value) {
mappings <- attr(x, "mappings")
if (is.null(condition)) y <- mappings[[1]] else y <- mappings[[condition]]
environment(y)$controls[[name]] <- value
return(x)
}
#' Extract the derivatives of an object
#'
#' @param x object from which the derivatives should be extracted
#' @param ... additional arguments (not used right now)
#' @return The derivatives in a format that depends on the class of \code{x}.
#' This is
#' \code{parvec -> matrix},
#' \code{prdframe -> prdframe},
#' \code{prdlist -> prdlist},
#' \code{objlist -> named numeric}.
#' @export
getDerivs <- function(x, ...) {
UseMethod("getDerivs", x)
}
#' @export
#' @rdname getDerivs
getDerivs.parvec <- function(x, ...) {
attr(x, "deriv")
}
#' @export
#' @rdname getDerivs
getDerivs.prdframe <- function(x, ...) {
prdframe(prediction = attr(x, "deriv"), parameters = attr(x, "parameters"))
}
#' @export
#' @rdname getDerivs
getDerivs.prdlist <- function(x, ...) {
as.prdlist(
lapply(x, function(myx) {
getDerivs(myx)
}),
names = names(x)
)
}
#' @export
#' @rdname getDerivs
getDerivs.list <- function(x, ...) {
lapply(x, function(myx) getDerivs(myx))
}
#' @export
#' @rdname getDerivs
getDerivs.objlist <- function(x, ...) {
x$gradient
}
getEquations <- function(x, ...) {
UseMethod("getEquations", x)
}
#' Extract the parameters of an object
#'
#' @param ... objects from which the parameters should be extracted
#' @param conditions character vector specifying the conditions to
#' which \code{getParameters} is restricted
#' @return The parameters in a format that depends on the class of \code{x}.
#' @export
getParameters <- function(..., conditions = NULL) {
Reduce("union", lapply(list(...), function(x) {
UseMethod("getParameters", x)
}))
}
#' @export
#' @rdname getParameters
#' @param x object from which the parameters are extracted
getParameters.odemodel <- function(x, conditions = NULL) {
parameters <- c(
attr(x$func, "variables"),
attr(x$func, "parameters")
)
return(parameters)
}
#' @export
#' @rdname getParameters
getParameters.fn <- function(x, conditions = NULL) {
if (is.null(conditions)) {
parameters <- attr(x, "parameters")
} else {
mappings <- attr(x, "mappings")
mappings <- mappings[intersect(names(mappings), conditions)]
parameters <- Reduce("union",
lapply(mappings, function(m) attr(m, "parameters"))
)
}
return(parameters)
}
#' @export
#' @rdname getParameters
getParameters.parvec <- function(x, conditions = NULL) {
names(x)
}
#' @export
#' @rdname getParameters
getParameters.prdframe <- function(x, conditions = NULL) {
attr(x, "parameters")
}
#' @export
#' @rdname getParameters
getParameters.prdlist <- function(x, conditions = NULL) {
select <- 1:length(x)
if (!is.null(conditions)) select <- intersect(names(x), conditions)
lapply(x[select], function(myx) getParameters(myx))
}
#' @export
#' @rdname getParameters
getParameters.eqnlist <- function(x) {
unique(c(getSymbols(x$states), getSymbols(x$rates), getSymbols(x$volumes)))
}
#' @export
#' @rdname getParameters
getParameters.eventlist <- function(x) {
Reduce(union, lapply(x[c(1:3)], getSymbols))
}
#' Extract the conditions of an object
#'
#' @param x object from which the conditions should be extracted
#' @param ... additional arguments (not used right now)
#' @return The conditions in a format that depends on the class of \code{x}.
#' @export
getConditions <- function(x, ...) {
UseMethod("getConditions", x)
}
#' @export
#' @rdname getConditions
getConditions.list <- function(x, ...) {
names(x)
}
#' @export
#' @rdname getConditions
getConditions.fn <- function(x, ...) {
attr(x, "conditions")
}
#' Get and set modelname
#'
#' @description The modelname attribute refers to the name of a C file associated with
#' a dMod function object like prediction-, parameter transformation- or
#' objective functions.
#'
#' @param ... objects of type \code{prdfn}, \code{parfn}, \code{objfn}
#' @param conditions character vector of conditions
#' @return character vector of model names, corresponding to C files
#' in the local directory.
#'
#' @export
modelname <- function(..., conditions = NULL) {
Reduce("union", lapply(list(...), mname, conditions = conditions))
}
#' Get modelname from single object (used internally)
#'
#' @param x dMod object
#' @param conditions character vector of conditions
#' @export
mname <- function(x, conditions = NULL) {
UseMethod("mname", x)
}
#' @export
#' @rdname mname
mname.NULL <- function(x, conditions = NULL) NULL
#' @export
#' @rdname mname
mname.character <- function(x, conditions = NULL) {
mname(get(x), conditions = conditions)
}
#' @export
#' @rdname mname
mname.objfn <- function(x, conditions = NULL) {
attr(x, "modelname")
}
#' @export
#' @rdname mname
mname.fn <- function(x, conditions = NULL) {
mappings <- attr(x, "mappings")
select <- 1:length(mappings)
if (!is.null(conditions)) select <- intersect(names(mappings), conditions)
modelnames <- Reduce("union",
lapply(mappings[select], function(m) attr(m, "modelname"))
)
return(modelnames)
}
#' @export
#' @rdname modelname
#' @param x dMod object for which the model name should be set
#' @param value character, the new modelname (does not change the C file)
"modelname<-" <- function(x, ..., value) {
UseMethod("modelname<-", x)
}
#' @export
#' @rdname modelname
"modelname<-.fn" <- function(x, conditions = NULL, ..., value) {
mappings <- attr(x, "mappings")
select <- 1:length(mappings)
if (!is.null(conditions)) select <- intersect(names(mappings), conditions)
#if (length(value) > 1 && length(value) != length(mappings[select]))
# stop("Length of modelname vector should be either 1 or equal to the number of conditions.")
if (length(value) == 1) {
value <- rep(value, length.out = length(mappings[select]))
if (!is.null(conditions)) names(value) <- conditions
}
for (i in select) {
attr(attr(x, "mappings")[[i]], "modelname") <- value[i]
if (inherits(x, "prdfn")) {
extended <- environment(attr(x, "mappings")[[i]])[["extended"]]
if (!is.null(extended)) {
attr(environment(attr(x, "mappings")[[i]])[["extended"]], "modelname") <- value[i]
}
attr(environment(attr(x, "mappings")[[i]])[["func"]], "modelname") <- value[i]
}
}
return(x)
}
#' @export
#' @rdname modelname
"modelname<-.objfn" <- function(x, conditions = NULL, ..., value) {
attr(x, "modelname") <- value
return(x)
}
#' Extract the equations of an object
#'
#' @param x object from which the equations should be extracted
#' @param conditions character or numeric vector specifying the conditions to
#' which \code{getEquations} is restricted. If \code{conditions} has length one,
#' the result is not returned as a list.
#' @return The equations as list of \code{eqnvec} objects.
#' @export
getEquations <- function(x, conditions = NULL) {
UseMethod("getEquations", x)
}
#' @export
#' @rdname getEquations
getEquations.odemodel <- function(x, conditions = NULL) {
attr(x$func, "equations")
}
#' @export
#' @rdname getEquations
getEquations.prdfn <- function(x, conditions = NULL) {
mappings <- attr(x, "mappings")
if (is.null(conditions)) {
equations <- lapply(mappings, function(m) attr(m, "equations"))
return(equations)
}
if (!is.null(conditions)) {
mappings <- mappings[conditions]
equations <- lapply(mappings, function(m) attr(m, "equations"))
if (length(equations) == 1) {
return(equations[[1]])
} else {
return(equations)
}
}
}
#' @export
#' @rdname getEquations
getEquations.fn <- function(x, conditions = NULL) {
mappings <- attr(x, "mappings")
if (is.null(conditions)) {
equations <- lapply(mappings, function(m) attr(m, "equations"))
return(equations)
}
if (!is.null(conditions)) {
mappings <- mappings[conditions]
equations <- lapply(mappings, function(m) attr(m, "equations"))
if (length(equations) == 1) {
return(equations[[1]])
} else {
return(equations)
}
}
}
#' Extract the observables of an object
#'
#' @param x object from which the equations should be extracted
#' @param ... not used
#' @return The equations as a character.
#' @export
getObservables <- function(x, ...) {
UseMethod("getObservables", x)
}
|
# Libraries ---------------------------------------------------------------
library(mvnfast)
# Data Simulation ---------------------------------------------------------
source("data_simulation.R")
# Initialization ----------------------------------------------------------
al <- 0.5 * rep(1, km) # gamma hyperparameters for latent variable precisions
bl <- 0.5 * rep(1, km) # gamma hyperparameters for latent variable precisions
cj <- 1 * rep(1, p) # gamma hyperparameters for error precisions
dj <- 0.2 * rep(1, p) # gamma hyperparameters for error precisions
pl <- km * (p - km) + km * (km + 1) / 2 # number of free parameters in factor matrix
nc <- c(1:km, rep(km, p - km)) # number of free parameters in each row of Lambda
Plam <- diag(pl) # used to specify the prior precision matrix for Lambda^*
Pe <- diag(p) # initial value for error precision matrix
Pl <- 3 * diag(km) # initial precision matrix for latent variables
Ls <- matrix(0, p, km) # initial value for Lambda^*
Ls[1:km, 1:km] <- diag(km)
# MCMC parameters and arrays ----------------------------------------------
n_sim <- 25000 # total number of MCMC iterations
n_burn_in <- 5000 # burn-in for Gibbs
# Define arrays for saving output
Lout <- matrix(0, n_sim - n_burn_in, p * km) # for Lambda
Pout <- matrix(0, n_sim - n_burn_in, p) # for Sigma^(-1)
Oout <- matrix(0, n_sim - n_burn_in, p * p) # for Omega
# Sample from full conditionals -------------------------------------------
for (iter in 1:n_sim) {
# Step 1 - update latent factors
# covariance matrix for factors
Veta <- solve(Pl + t(Ls) %*% Pe %*% Ls)
# mean vector for factors
Eeta <- t(Veta %*% t(Ls) %*% Pe %*% t(Y))
eta <- apply(Eeta, 1, function(x) rmvn(1, x, Veta))
dim(eta) <- c(n, km)
# eta <- rmvn(nrow(Eeta), Eeta, Veta) # latent factors
# Step 2 - update factor loadings
for (j in 1:p) { # jth row of Lambda
# etaS <- eta[, 1:nc[j]]
z_j <- eta[, 1:nc[j]]
dim(z_j) <- c(n, min(j, km))
# covariance matrix for loadings
Vlam <- solve(
Plam[nc[j], nc[j]] + Pe[j, j] * t(z_j) %*% z_j
)
# mean vector for for loadings
Elam <- Vlam %*% (Pe[j, j] * t(z_j) %*% Y[, j, drop = FALSE])
# Lambda^*: factor loadings under PX model
Ls[j, 1:nc[j]] <- rmvn(1, Elam, Vlam)
}
# Step 3 - update latent variable precision
ae <- al + n / 2
be <- bl + 0.5 * t(eta ^ 2) %*% matrix(1, n, 1)
# latent variable precision matrix
Pl <- diag(rgamma(km, ae, be), nrow = km, ncol = km)
# Step 4 - update residual precision
ap <- cj + n / 2
bp <- dj + 0.5 * t((Y - eta %*% t(Ls)) ^ 2) %*% matrix(1, n, 1)
# error precision matrix
Pe <- diag(rgamma(p, ap, bp), nrow = p, ncol = p)
# Step 5 - Recalculate original factor loadings and save sampled values
L <- Ls
for (j in 1:km) {
if (Ls[j, j] < 0) {
L[, j] <- -L[, j]
}
}
L <- L %*% sqrt(solve(Pl))
if (iter > n_burn_in) {
Lout[iter - n_burn_in, ] <- c(L)
Pout[iter - n_burn_in, ] <- diag(Pe)
Oout[iter - n_burn_in, ] <- c(L %*% t(L) + solve(Pe))
}
if (iter %% 1000 == 0 & iter <= n_burn_in) {
cat(paste0("Iteration: ", iter, " (burn in).\n"))
} else if (iter %% 1000 == 0 & iter > n_burn_in) {
cat(paste0("Iteration: ", iter, " (sampling).\n"))
}
}
readr::write_rds(
list(
Lout = Lout,
Pout = Pout,
Oout = Oout
),
"data/posterior_samples.rds"
)
| /gibbs_sampler.R | no_license | Derenik-H/factor-analysis | R | false | false | 3,503 | r | # Libraries ---------------------------------------------------------------
library(mvnfast)
# Data Simulation ---------------------------------------------------------
source("data_simulation.R")
# Initialization ----------------------------------------------------------
al <- 0.5 * rep(1, km) # gamma hyperparameters for latent variable precisions
bl <- 0.5 * rep(1, km) # gamma hyperparameters for latent variable precisions
cj <- 1 * rep(1, p) # gamma hyperparameters for error precisions
dj <- 0.2 * rep(1, p) # gamma hyperparameters for error precisions
pl <- km * (p - km) + km * (km + 1) / 2 # number of free parameters in factor matrix
nc <- c(1:km, rep(km, p - km)) # number of free parameters in each row of Lambda
Plam <- diag(pl) # used to specify the prior precision matrix for Lambda^*
Pe <- diag(p) # initial value for error precision matrix
Pl <- 3 * diag(km) # initial precision matrix for latent variables
Ls <- matrix(0, p, km) # initial value for Lambda^*
Ls[1:km, 1:km] <- diag(km)
# MCMC parameters and arrays ----------------------------------------------
n_sim <- 25000 # total number of MCMC iterations
n_burn_in <- 5000 # burn-in for Gibbs
# Define arrays for saving output
Lout <- matrix(0, n_sim - n_burn_in, p * km) # for Lambda
Pout <- matrix(0, n_sim - n_burn_in, p) # for Sigma^(-1)
Oout <- matrix(0, n_sim - n_burn_in, p * p) # for Omega
# Sample from full conditionals -------------------------------------------
for (iter in 1:n_sim) {
# Step 1 - update latent factors
# covariance matrix for factors
Veta <- solve(Pl + t(Ls) %*% Pe %*% Ls)
# mean vector for factors
Eeta <- t(Veta %*% t(Ls) %*% Pe %*% t(Y))
eta <- apply(Eeta, 1, function(x) rmvn(1, x, Veta))
dim(eta) <- c(n, km)
# eta <- rmvn(nrow(Eeta), Eeta, Veta) # latent factors
# Step 2 - update factor loadings
for (j in 1:p) { # jth row of Lambda
# etaS <- eta[, 1:nc[j]]
z_j <- eta[, 1:nc[j]]
dim(z_j) <- c(n, min(j, km))
# covariance matrix for loadings
Vlam <- solve(
Plam[nc[j], nc[j]] + Pe[j, j] * t(z_j) %*% z_j
)
# mean vector for for loadings
Elam <- Vlam %*% (Pe[j, j] * t(z_j) %*% Y[, j, drop = FALSE])
# Lambda^*: factor loadings under PX model
Ls[j, 1:nc[j]] <- rmvn(1, Elam, Vlam)
}
# Step 3 - update latent variable precision
ae <- al + n / 2
be <- bl + 0.5 * t(eta ^ 2) %*% matrix(1, n, 1)
# latent variable precision matrix
Pl <- diag(rgamma(km, ae, be), nrow = km, ncol = km)
# Step 4 - update residual precision
ap <- cj + n / 2
bp <- dj + 0.5 * t((Y - eta %*% t(Ls)) ^ 2) %*% matrix(1, n, 1)
# error precision matrix
Pe <- diag(rgamma(p, ap, bp), nrow = p, ncol = p)
# Step 5 - Recalculate original factor loadings and save sampled values
L <- Ls
for (j in 1:km) {
if (Ls[j, j] < 0) {
L[, j] <- -L[, j]
}
}
L <- L %*% sqrt(solve(Pl))
if (iter > n_burn_in) {
Lout[iter - n_burn_in, ] <- c(L)
Pout[iter - n_burn_in, ] <- diag(Pe)
Oout[iter - n_burn_in, ] <- c(L %*% t(L) + solve(Pe))
}
if (iter %% 1000 == 0 & iter <= n_burn_in) {
cat(paste0("Iteration: ", iter, " (burn in).\n"))
} else if (iter %% 1000 == 0 & iter > n_burn_in) {
cat(paste0("Iteration: ", iter, " (sampling).\n"))
}
}
readr::write_rds(
list(
Lout = Lout,
Pout = Pout,
Oout = Oout
),
"data/posterior_samples.rds"
)
|
\name{plotRiskscorePredrisk}
\alias{plotRiskscorePredrisk}
\title{Function to plot predicted risks against risk scores.}
\usage{plotRiskscorePredrisk(data, riskScore, predRisk, plottitle, xlabel,
ylabel, rangexaxis, rangeyaxis, filename, fileplot, plottype)}
\description{This function is used to make a plot of predicted risks against risk scores.}
\details{The function creates a plot of predicted risks against risk scores.
Predicted risks can be obtained using the functions
\code{\link{fitLogRegModel}} and \code{\link{predRisk}}
or be imported from other methods or packages.
The function \code{\link{riskScore}} can be
used to compute unweighted or weighted risk scores.}
\value{The function creates a plot of predicted risks against risk scores.}
\keyword{hplot}
\seealso{\code{\link{riskScore}}, \code{\link{predRisk}}}
\arguments{\item{data}{Data frame or matrix that includes the outcome and
predictors variables.}
\item{riskScore}{Vector of (weighted or unweighted) genetic risk scores.}
\item{predRisk}{Vector of predicted risks.}
\item{plottitle}{Title of the plot. Specification of \code{plottitle} is optional. Default is "Risk score predicted risk plot".}
\item{xlabel}{Label of x-axis. Specification of \code{xlabel} is optional. Default is "Risk score".}
\item{ylabel}{Label of y-axis. Specification of \code{ylabel} is optional. Default is "Predicted risk".}
\item{rangexaxis}{Range of the x axis. Specification of \code{rangexaxis} is optional.}
\item{rangeyaxis}{Range of the y axis. Specification of \code{rangeyaxis} is optional. Default is \code{c(0,1)}.}
\item{filename}{Name of the output file in which risk scores and
predicted risks for each individual will be saved. If no directory is
specified, the file is saved in the working directory as a txt file.
When no \code{filename} is specified, the output is not saved.}
\item{fileplot}{Name of the output file that contains the plot. The file is
saved in the working directory in the format specified under \code{plottype}. Example:
\code{fileplot="plotname"}. Note that the extension is not specified here.
When \code{fileplot} is not specified, the plot is not saved.}
\item{plottype}{The format in which the plot is saved. Available formats are
wmf, emf, png, jpg, jpeg, bmp, tif, tiff, ps,
eps or pdf. For example, \code{plottype="eps"} will save the plot in eps format.
When \code{plottype} is not specified, the plot will be saved in jpg format.}}
\examples{# specify dataset with outcome and predictor variables
data(ExampleData)
# fit a logistic regression model
# all steps needed to construct a logistic regression model are written in a function
# called 'ExampleModels', which is described on page 4-5
riskmodel <- ExampleModels()$riskModel2
# obtain predicted risks
predRisk <- predRisk(riskmodel)
# specify column numbers of genetic predictors
cGenPred <- c(11:16)
# function to compute unweighted genetic risk scores
riskScore <- riskScore(weights=riskmodel, data=ExampleData,
cGenPreds=cGenPred, Type="unweighted")
# specify range of x-axis
rangexaxis <- c(0,12)
# specify range of y-axis
rangeyaxis <- c(0,1)
# specify label of x-axis
xlabel <- "Risk score"
# specify label of y-axis
ylabel <- "Predicted risk"
# specify title for the plot
plottitle <- "Risk score versus predicted risk"
# produce risk score-predicted risk plot
plotRiskscorePredrisk(data=ExampleData, riskScore=riskScore, predRisk=predRisk,
plottitle=plottitle, xlabel=xlabel, ylabel=ylabel, rangexaxis=rangexaxis,
rangeyaxis=rangeyaxis)}
| /man/plotRiskscorePredrisk.Rd | no_license | cran/PredictABEL | R | false | false | 3,603 | rd | \name{plotRiskscorePredrisk}
\alias{plotRiskscorePredrisk}
\title{Function to plot predicted risks against risk scores.}
\usage{plotRiskscorePredrisk(data, riskScore, predRisk, plottitle, xlabel,
ylabel, rangexaxis, rangeyaxis, filename, fileplot, plottype)}
\description{This function is used to make a plot of predicted risks against risk scores.}
\details{The function creates a plot of predicted risks against risk scores.
Predicted risks can be obtained using the functions
\code{\link{fitLogRegModel}} and \code{\link{predRisk}}
or be imported from other methods or packages.
The function \code{\link{riskScore}} can be
used to compute unweighted or weighted risk scores.}
\value{The function creates a plot of predicted risks against risk scores.}
\keyword{hplot}
\seealso{\code{\link{riskScore}}, \code{\link{predRisk}}}
\arguments{\item{data}{Data frame or matrix that includes the outcome and
predictors variables.}
\item{riskScore}{Vector of (weighted or unweighted) genetic risk scores.}
\item{predRisk}{Vector of predicted risks.}
\item{plottitle}{Title of the plot. Specification of \code{plottitle} is optional. Default is "Risk score predicted risk plot".}
\item{xlabel}{Label of x-axis. Specification of \code{xlabel} is optional. Default is "Risk score".}
\item{ylabel}{Label of y-axis. Specification of \code{ylabel} is optional. Default is "Predicted risk".}
\item{rangexaxis}{Range of the x axis. Specification of \code{rangexaxis} is optional.}
\item{rangeyaxis}{Range of the y axis. Specification of \code{rangeyaxis} is optional. Default is \code{c(0,1)}.}
\item{filename}{Name of the output file in which risk scores and
predicted risks for each individual will be saved. If no directory is
specified, the file is saved in the working directory as a txt file.
When no \code{filename} is specified, the output is not saved.}
\item{fileplot}{Name of the output file that contains the plot. The file is
saved in the working directory in the format specified under \code{plottype}. Example:
\code{fileplot="plotname"}. Note that the extension is not specified here.
When \code{fileplot} is not specified, the plot is not saved.}
\item{plottype}{The format in which the plot is saved. Available formats are
wmf, emf, png, jpg, jpeg, bmp, tif, tiff, ps,
eps or pdf. For example, \code{plottype="eps"} will save the plot in eps format.
When \code{plottype} is not specified, the plot will be saved in jpg format.}}
\examples{# specify dataset with outcome and predictor variables
data(ExampleData)
# fit a logistic regression model
# all steps needed to construct a logistic regression model are written in a function
# called 'ExampleModels', which is described on page 4-5
riskmodel <- ExampleModels()$riskModel2
# obtain predicted risks
predRisk <- predRisk(riskmodel)
# specify column numbers of genetic predictors
cGenPred <- c(11:16)
# function to compute unweighted genetic risk scores
riskScore <- riskScore(weights=riskmodel, data=ExampleData,
cGenPreds=cGenPred, Type="unweighted")
# specify range of x-axis
rangexaxis <- c(0,12)
# specify range of y-axis
rangeyaxis <- c(0,1)
# specify label of x-axis
xlabel <- "Risk score"
# specify label of y-axis
ylabel <- "Predicted risk"
# specify title for the plot
plottitle <- "Risk score versus predicted risk"
# produce risk score-predicted risk plot
plotRiskscorePredrisk(data=ExampleData, riskScore=riskScore, predRisk=predRisk,
plottitle=plottitle, xlabel=xlabel, ylabel=ylabel, rangexaxis=rangexaxis,
rangeyaxis=rangeyaxis)}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/userstats.R
\name{userstats}
\alias{userstats}
\title{userstats}
\usage{
userstats(user, n = 1000, bg_col = "white", txt_col = "#1A5276",
year = 2019, x = 10)
}
\arguments{
\item{user}{username of a real Twitter account that the user wishes to analyze}
\item{n}{number of tweets to analyze from that account, Default: 1000}
\item{bg_col}{background color of output tables, Default: 'white'}
\item{txt_col}{text color of output tables, Default: '#1A5276'}
\item{year}{year during which tweets are to be analyzed and displayed in
output, Default: 2019}
\item{x}{number of recent tweets to display from that account in the output,
Default: 10}
}
\value{
an HTML output containing summary information of the specified
account(s) using input username(s) as well as the specified number of recent
tweets from that account(s)
}
\description{
userstats() allows for easy Twitter analysis of user-specified
Twitter accounts. See details for more information.
}
\details{
userstats() allows Twitter users to search the website by username,
allowing the user to search multiple usernames at one time, specify which year
they would like summary information for, specify the number of tweets they
would like to analyze (with a maximum output allowed of 3,200 tweets),
customize the background color of output tables, and customize the text color
of the output with the assumption of knowledge of HTML color codes.
}
\examples{
\dontrun{
if(interactive()){
userstats(c("taylorswift13","katyperry"), 1000, year = 2019, x=5)
userstats("taylorswift13", 1000, x=5)
userstats(c("taylorswift13","21savage","trvisxx","katyperry"), 1000, year=2019, x = 3)
}}
}
| /man/userstats.Rd | permissive | Cyanjiner/rtweetstats | R | false | true | 1,724 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/userstats.R
\name{userstats}
\alias{userstats}
\title{userstats}
\usage{
userstats(user, n = 1000, bg_col = "white", txt_col = "#1A5276",
year = 2019, x = 10)
}
\arguments{
\item{user}{username of a real Twitter account that the user wishes to analyze}
\item{n}{number of tweets to analyze from that account, Default: 1000}
\item{bg_col}{background color of output tables, Default: 'white'}
\item{txt_col}{text color of output tables, Default: '#1A5276'}
\item{year}{year during which tweets are to be analyzed and displayed in
output, Default: 2019}
\item{x}{number of recent tweets to display from that account in the output,
Default: 10}
}
\value{
an HTML output containing summary information of the specified
account(s) using input username(s) as well as the specified number of recent
tweets from that account(s)
}
\description{
userstats() allows for easy Twitter analysis of user-specified
Twitter accounts. See details for more information.
}
\details{
userstats() allows Twitter users to search the website by username,
allowing the user to search multiple usernames at one time, specify which year
they would like summary information for, specify the number of tweets they
would like to analyze (with a maximum output allowed of 3,200 tweets),
customize the background color of output tables, and customize the text color
of the output with the assumption of knowledge of HTML color codes.
}
\examples{
\dontrun{
if(interactive()){
userstats(c("taylorswift13","katyperry"), 1000, year = 2019, x=5)
userstats("taylorswift13", 1000, x=5)
userstats(c("taylorswift13","21savage","trvisxx","katyperry"), 1000, year=2019, x = 3)
}}
}
|
\name{cca}
\alias{cca}
\alias{cca.default}
\alias{cca.formula}
\alias{rda}
\alias{rda.default}
\alias{rda.formula}
\title{ [Partial] [Constrained] Correspondence Analysis and Redundancy
Analysis }
\description{
Function \code{cca} performs correspondence analysis, or optionally
constrained correspondence analysis (a.k.a. canonical correspondence
analysis), or optionally partial constrained correspondence
analysis. Function \code{rda} performs redundancy analysis, or
optionally principal components analysis.
These are all very popular ordination techniques in community ecology.
}
\usage{
\method{cca}{formula}(formula, data, na.action = na.fail, subset = NULL,
...)
\method{rda}{formula}(formula, data, scale=FALSE, na.action = na.fail,
subset = NULL, ...)
\method{cca}{default}(X, Y, Z, ...)
\method{rda}{default}(X, Y, Z, scale=FALSE, ...)
}
\arguments{
\item{formula}{Model formula, where the left hand side gives the
community data matrix, right hand side gives the constraining variables,
and conditioning variables can be given within a special function
\code{Condition}.}
\item{data}{Data frame containing the variables on the right hand side
of the model formula.}
\item{X}{ Community data matrix. }
\item{Y}{ Constraining matrix, typically of environmental variables.
Can be missing. If this is a \code{data.frame}, it will be
expanded to a \code{\link{model.matrix}} where factors are
expanded to contrasts (\dQuote{dummy variables}). It is better to
use \code{formula} instead of this argument, and some further
analyses only work when \code{formula} was used.}
\item{Z}{ Conditioning matrix, the effect of which is removed
(`partialled out') before next step. Can be missing. If this is a
\code{data.frame}, it is expanded similarly as constraining
matrix.}
\item{scale}{Scale species to unit variance (like correlations).}
\item{na.action}{Handling of missing values in constraints or
conditions. The default (\code{\link{na.fail}}) is to stop with
missing value. Choice \code{\link{na.omit}} removes all rows with
missing values. Choice \code{\link{na.exclude}} keeps all
observations but gives \code{NA} for results that cannot be
calculated. The WA scores of rows may be found also for missing
values in constraints. Missing values are never allowed in
dependent community data. }
\item{subset}{Subset of data rows. This can be a logical vector which
is \code{TRUE} for kept observations, or a logical expression which
can contain variables in the working environment, \code{data} or
species names of the community data.}
\item{...}{Other arguments for \code{print} or \code{plot} functions
(ignored in other functions).}
}
\details{
Since their introduction (ter Braak 1986), constrained, or canonical,
correspondence analysis and its spin-off, redundancy analysis, have
been the most popular ordination methods in community ecology.
Functions \code{cca} and \code{rda} are similar to popular
proprietary software \code{Canoco}, although the implementation is
completely different. The functions are based on Legendre &
Legendre's (2012) algorithm: in \code{cca}
Chi-square transformed data matrix is subjected to weighted linear
regression on constraining variables, and the fitted values are
submitted to correspondence analysis performed via singular value
decomposition (\code{\link{svd}}). Function \code{rda} is similar, but uses
ordinary, unweighted linear regression and unweighted SVD. Legendre &
Legendre (2012), Table 11.5 (p. 650) give a skeleton of the RDA
algorithm of \pkg{vegan}. The algorithm of CCA is similar, but
involves standardization by row and column weights.
The functions can be called either with matrix-like entries for
community data and constraints, or with formula interface. In
general, the formula interface is preferred, because it allows a
better control of the model and allows factor constraints. Some
analyses of ordination results are only possible if model was fitted
with formula (e.g., most cases of \code{\link{anova.cca}}, automatic
model building).
In the following sections, \code{X}, \code{Y} and \code{Z}, although
referred to as matrices, are more commonly data frames.
In the matrix interface, the
community data matrix \code{X} must be given, but the other data
matrices may be omitted, and the corresponding stage of analysis is
skipped. If matrix \code{Z} is supplied, its effects are removed from
the community matrix, and the residual matrix is submitted to the next
stage. This is called `partial' correspondence or redundancy
analysis. If matrix
\code{Y} is supplied, it is used to constrain the ordination,
resulting in constrained or canonical correspondence analysis, or
redundancy analysis.
Finally, the residual is submitted to ordinary correspondence
analysis (or principal components analysis). If both matrices
\code{Z} and \code{Y} are missing, the
data matrix is analysed by ordinary correspondence analysis (or
principal components analysis).
Instead of separate matrices, the model can be defined using a model
\code{\link{formula}}. The left hand side must be the
community data matrix (\code{X}). The right hand side defines the
constraining model.
The constraints can contain ordered or unordered factors,
interactions among variables and functions of variables. The defined
\code{\link{contrasts}} are honoured in \code{\link{factor}}
variables. The constraints can also be matrices (but not data
frames).
The formula can include a special term \code{Condition}
for conditioning variables (``covariables'') ``partialled out'' before
analysis. So the following commands are equivalent:
\code{cca(X, Y, Z)}, \code{cca(X ~ Y + Condition(Z))}, where \code{Y}
and \code{Z} refer to constraints and conditions matrices respectively.
Constrained correspondence analysis is indeed a constrained method:
CCA does not try to display all variation in the
data, but only the part that can be explained by the used constraints.
Consequently, the results are strongly dependent on the set of
constraints and their transformations or interactions among the
constraints. The shotgun method is to use all environmental variables
as constraints. However, such exploratory problems are better
analysed with
unconstrained methods such as correspondence analysis
(\code{\link{decorana}}, \code{\link[MASS]{corresp}}) or non-metric
multidimensional scaling (\code{\link{metaMDS}}) and
environmental interpretation after analysis
(\code{\link{envfit}}, \code{\link{ordisurf}}).
CCA is a good choice if the user has
clear and strong \emph{a priori} hypotheses on constraints and is not
interested in the major structure in the data set.
CCA is able to correct the curve artefact commonly found in
correspondence analysis by forcing the configuration into linear
constraints. However, the curve artefact can be avoided only with a
low number of constraints that do not have a curvilinear relation with
each other. The curve can reappear even with two badly chosen
constraints or a single factor. Although the formula interface makes it
easy to include polynomial or interaction terms, such terms often
produce curved artefacts (that are difficult to interpret), these
should probably be avoided.
According to folklore, \code{rda} should be used with ``short
gradients'' rather than \code{cca}. However, this is not based
on research which finds methods based on Euclidean metric as uniformly
weaker than those based on Chi-squared metric. However, standardized
Euclidean distance may be an appropriate measures (see Hellinger
standardization in \code{\link{decostand}} in particular).
Partial CCA (pCCA; or alternatively partial RDA) can be used to remove
the effect of some
conditioning or ``background'' or ``random'' variables or
``covariables'' before CCA proper. In fact, pCCA compares models
\code{cca(X ~ Z)} and \code{cca(X ~ Y + Z)} and attributes their
difference to the effect of \code{Y} cleansed of the effect of
\code{Z}. Some people have used the method for extracting
``components of variance'' in CCA. However, if the effect of
variables together is stronger than sum of both separately, this can
increase total Chi-square after ``partialling out'' some
variation, and give negative ``components of variance''. In general,
such components of ``variance'' are not to be trusted due to
interactions between two sets of variables.
The functions have \code{summary} and \code{plot} methods which are
documented separately (see \code{\link{plot.cca}}, \code{\link{summary.cca}}).
}
\value{
Function \code{cca} returns a huge object of class \code{cca}, which
is described separately in \code{\link{cca.object}}.
Function \code{rda} returns an object of class \code{rda} which
inherits from class \code{cca} and is described in \code{\link{cca.object}}.
The scaling used in \code{rda} scores is described in a separate
vignette with this package.
}
\references{ The original method was by ter Braak, but the current
implementation follows Legendre and Legendre.
Legendre, P. and Legendre, L. (2012) \emph{Numerical Ecology}. 3rd English
ed. Elsevier.
McCune, B. (1997) Influence of noisy environmental data on canonical
correspondence analysis. \emph{Ecology} \strong{78}, 2617-2623.
Palmer, M. W. (1993) Putting things in even better order: The
advantages of canonical correspondence analysis. \emph{Ecology}
\strong{74},2215-2230.
Ter Braak, C. J. F. (1986) Canonical Correspondence Analysis: a new
eigenvector technique for multivariate direct gradient
analysis. \emph{Ecology} \strong{67}, 1167-1179.
}
\author{
The responsible author was Jari Oksanen, but the code borrows heavily
from Dave Roberts (Montana State University, USA).
}
\seealso{
This help page describes two constrained ordination functions,
\code{cca} and \code{rda}. A related method, distance-based
redundancy analysis (dbRDA) is described separately
(\code{\link{capscale}}). All these functions return similar objects
(described in \code{\link{cca.object}}). There are numerous support
functions that can be used to access the result object. In the list
below, functions of type \code{cca} will handle all three constrained
ordination objects, and functions of \code{rda} only handle \code{rda}
and \code{\link{capscale}} results.
The main plotting functions are \code{\link{plot.cca}} for all
methods, and \code{\link{biplot.rda}} for RDA and dbRDA. However,
generic \pkg{vegan} plotting functions can also handle the results.
The scores can be accessed and scaled with \code{\link{scores.cca}},
and summarized with \code{\link{summary.cca}}. The eigenvalues can
be accessed with \code{\link{eigenvals.cca}} and the regression
coefficients for constraints with \code{\link{coef.cca}}. The
eigenvalues can be plotted with \code{\link{screeplot.cca}}, and the
(adjusted) \eqn{R^2}{R-squared} can be found with
\code{\link{RsquareAdj.rda}}. The scores can be also calculated for
new data sets with \code{\link{predict.cca}} which allows adding
points to ordinations. The values of constraints can be inferred
from ordination and community composition with
\code{\link{calibrate.cca}}.
Diagnostic statistics can be found with \code{\link{goodness.cca}},
\code{\link{inertcomp}}, \code{\link{spenvcor}},
\code{\link{intersetcor}}, \code{\link{tolerance.cca}}, and
\code{\link{vif.cca}}. Function \code{\link{as.mlm.cca}} refits the
result object as a multiple \code{\link{lm}} object, and this allows
finding influence statistics (\code{\link{lm.influence}},
\code{\link{cooks.distance}} etc.).
Permutation based significance for the overall model, single
constraining variables or axes can be found with
\code{\link{anova.cca}}. Automatic model building with \R{}
\code{\link{step}} function is possible with
\code{\link{deviance.cca}}, \code{\link{add1.cca}} and
\code{\link{drop1.cca}}. Functions \code{\link{ordistep}} and
\code{\link{ordiR2step}} (for RDA) are special functions for
constrained ordination. Randomized data sets can be generated with
\code{\link{simulate.cca}}.
Separate methods based on constrained ordination model are principal
response curves (\code{\link{prc}}) and variance partitioning between
several components (\code{\link{varpart}}).
Design decisions are explained in \code{\link{vignette}}
on \dQuote{Design decisions} which can be accessed with
\code{browseVignettes("vegan")}.
Package \pkg{ade4} provides alternative constrained ordination
function \code{\link[ade4]{pcaiv}}.
}
\examples{
data(varespec)
data(varechem)
## Common but bad way: use all variables you happen to have in your
## environmental data matrix
vare.cca <- cca(varespec, varechem)
vare.cca
plot(vare.cca)
## Formula interface and a better model
vare.cca <- cca(varespec ~ Al + P*(K + Baresoil), data=varechem)
vare.cca
plot(vare.cca)
## `Partialling out' and `negative components of variance'
cca(varespec ~ Ca, varechem)
cca(varespec ~ Ca + Condition(pH), varechem)
## RDA
data(dune)
data(dune.env)
dune.Manure <- rda(dune ~ Manure, dune.env)
plot(dune.Manure)
}
\keyword{ multivariate }
| /man/cca.Rd | no_license | kevinwkc/vegan | R | false | false | 13,484 | rd | \name{cca}
\alias{cca}
\alias{cca.default}
\alias{cca.formula}
\alias{rda}
\alias{rda.default}
\alias{rda.formula}
\title{ [Partial] [Constrained] Correspondence Analysis and Redundancy
Analysis }
\description{
Function \code{cca} performs correspondence analysis, or optionally
constrained correspondence analysis (a.k.a. canonical correspondence
analysis), or optionally partial constrained correspondence
analysis. Function \code{rda} performs redundancy analysis, or
optionally principal components analysis.
These are all very popular ordination techniques in community ecology.
}
\usage{
\method{cca}{formula}(formula, data, na.action = na.fail, subset = NULL,
...)
\method{rda}{formula}(formula, data, scale=FALSE, na.action = na.fail,
subset = NULL, ...)
\method{cca}{default}(X, Y, Z, ...)
\method{rda}{default}(X, Y, Z, scale=FALSE, ...)
}
\arguments{
\item{formula}{Model formula, where the left hand side gives the
community data matrix, right hand side gives the constraining variables,
and conditioning variables can be given within a special function
\code{Condition}.}
\item{data}{Data frame containing the variables on the right hand side
of the model formula.}
\item{X}{ Community data matrix. }
\item{Y}{ Constraining matrix, typically of environmental variables.
Can be missing. If this is a \code{data.frame}, it will be
expanded to a \code{\link{model.matrix}} where factors are
expanded to contrasts (\dQuote{dummy variables}). It is better to
use \code{formula} instead of this argument, and some further
analyses only work when \code{formula} was used.}
\item{Z}{ Conditioning matrix, the effect of which is removed
(`partialled out') before next step. Can be missing. If this is a
\code{data.frame}, it is expanded similarly as constraining
matrix.}
\item{scale}{Scale species to unit variance (like correlations).}
\item{na.action}{Handling of missing values in constraints or
conditions. The default (\code{\link{na.fail}}) is to stop with
missing value. Choice \code{\link{na.omit}} removes all rows with
missing values. Choice \code{\link{na.exclude}} keeps all
observations but gives \code{NA} for results that cannot be
calculated. The WA scores of rows may be found also for missing
values in constraints. Missing values are never allowed in
dependent community data. }
\item{subset}{Subset of data rows. This can be a logical vector which
is \code{TRUE} for kept observations, or a logical expression which
can contain variables in the working environment, \code{data} or
species names of the community data.}
\item{...}{Other arguments for \code{print} or \code{plot} functions
(ignored in other functions).}
}
\details{
Since their introduction (ter Braak 1986), constrained, or canonical,
correspondence analysis and its spin-off, redundancy analysis, have
been the most popular ordination methods in community ecology.
Functions \code{cca} and \code{rda} are similar to popular
proprietary software \code{Canoco}, although the implementation is
completely different. The functions are based on Legendre &
Legendre's (2012) algorithm: in \code{cca}
Chi-square transformed data matrix is subjected to weighted linear
regression on constraining variables, and the fitted values are
submitted to correspondence analysis performed via singular value
decomposition (\code{\link{svd}}). Function \code{rda} is similar, but uses
ordinary, unweighted linear regression and unweighted SVD. Legendre &
Legendre (2012), Table 11.5 (p. 650) give a skeleton of the RDA
algorithm of \pkg{vegan}. The algorithm of CCA is similar, but
involves standardization by row and column weights.
The functions can be called either with matrix-like entries for
community data and constraints, or with formula interface. In
general, the formula interface is preferred, because it allows a
better control of the model and allows factor constraints. Some
analyses of ordination results are only possible if model was fitted
with formula (e.g., most cases of \code{\link{anova.cca}}, automatic
model building).
In the following sections, \code{X}, \code{Y} and \code{Z}, although
referred to as matrices, are more commonly data frames.
In the matrix interface, the
community data matrix \code{X} must be given, but the other data
matrices may be omitted, and the corresponding stage of analysis is
skipped. If matrix \code{Z} is supplied, its effects are removed from
the community matrix, and the residual matrix is submitted to the next
stage. This is called `partial' correspondence or redundancy
analysis. If matrix
\code{Y} is supplied, it is used to constrain the ordination,
resulting in constrained or canonical correspondence analysis, or
redundancy analysis.
Finally, the residual is submitted to ordinary correspondence
analysis (or principal components analysis). If both matrices
\code{Z} and \code{Y} are missing, the
data matrix is analysed by ordinary correspondence analysis (or
principal components analysis).
Instead of separate matrices, the model can be defined using a model
\code{\link{formula}}. The left hand side must be the
community data matrix (\code{X}). The right hand side defines the
constraining model.
The constraints can contain ordered or unordered factors,
interactions among variables and functions of variables. The defined
\code{\link{contrasts}} are honoured in \code{\link{factor}}
variables. The constraints can also be matrices (but not data
frames).
The formula can include a special term \code{Condition}
for conditioning variables (``covariables'') ``partialled out'' before
analysis. So the following commands are equivalent:
\code{cca(X, Y, Z)}, \code{cca(X ~ Y + Condition(Z))}, where \code{Y}
and \code{Z} refer to constraints and conditions matrices respectively.
Constrained correspondence analysis is indeed a constrained method:
CCA does not try to display all variation in the
data, but only the part that can be explained by the used constraints.
Consequently, the results are strongly dependent on the set of
constraints and their transformations or interactions among the
constraints. The shotgun method is to use all environmental variables
as constraints. However, such exploratory problems are better
analysed with
unconstrained methods such as correspondence analysis
(\code{\link{decorana}}, \code{\link[MASS]{corresp}}) or non-metric
multidimensional scaling (\code{\link{metaMDS}}) and
environmental interpretation after analysis
(\code{\link{envfit}}, \code{\link{ordisurf}}).
CCA is a good choice if the user has
clear and strong \emph{a priori} hypotheses on constraints and is not
interested in the major structure in the data set.
CCA is able to correct the curve artefact commonly found in
correspondence analysis by forcing the configuration into linear
constraints. However, the curve artefact can be avoided only with a
low number of constraints that do not have a curvilinear relation with
each other. The curve can reappear even with two badly chosen
constraints or a single factor. Although the formula interface makes it
easy to include polynomial or interaction terms, such terms often
produce curved artefacts (that are difficult to interpret), these
should probably be avoided.
According to folklore, \code{rda} should be used with ``short
gradients'' rather than \code{cca}. However, this is not based
on research which finds methods based on Euclidean metric as uniformly
weaker than those based on Chi-squared metric. However, standardized
Euclidean distance may be an appropriate measures (see Hellinger
standardization in \code{\link{decostand}} in particular).
Partial CCA (pCCA; or alternatively partial RDA) can be used to remove
the effect of some
conditioning or ``background'' or ``random'' variables or
``covariables'' before CCA proper. In fact, pCCA compares models
\code{cca(X ~ Z)} and \code{cca(X ~ Y + Z)} and attributes their
difference to the effect of \code{Y} cleansed of the effect of
\code{Z}. Some people have used the method for extracting
``components of variance'' in CCA. However, if the effect of
variables together is stronger than sum of both separately, this can
increase total Chi-square after ``partialling out'' some
variation, and give negative ``components of variance''. In general,
such components of ``variance'' are not to be trusted due to
interactions between two sets of variables.
The functions have \code{summary} and \code{plot} methods which are
documented separately (see \code{\link{plot.cca}}, \code{\link{summary.cca}}).
}
\value{
Function \code{cca} returns a huge object of class \code{cca}, which
is described separately in \code{\link{cca.object}}.
Function \code{rda} returns an object of class \code{rda} which
inherits from class \code{cca} and is described in \code{\link{cca.object}}.
The scaling used in \code{rda} scores is described in a separate
vignette with this package.
}
\references{ The original method was by ter Braak, but the current
implementation follows Legendre and Legendre.
Legendre, P. and Legendre, L. (2012) \emph{Numerical Ecology}. 3rd English
ed. Elsevier.
McCune, B. (1997) Influence of noisy environmental data on canonical
correspondence analysis. \emph{Ecology} \strong{78}, 2617-2623.
Palmer, M. W. (1993) Putting things in even better order: The
advantages of canonical correspondence analysis. \emph{Ecology}
\strong{74},2215-2230.
Ter Braak, C. J. F. (1986) Canonical Correspondence Analysis: a new
eigenvector technique for multivariate direct gradient
analysis. \emph{Ecology} \strong{67}, 1167-1179.
}
\author{
The responsible author was Jari Oksanen, but the code borrows heavily
from Dave Roberts (Montana State University, USA).
}
\seealso{
This help page describes two constrained ordination functions,
\code{cca} and \code{rda}. A related method, distance-based
redundancy analysis (dbRDA) is described separately
(\code{\link{capscale}}). All these functions return similar objects
(described in \code{\link{cca.object}}). There are numerous support
functions that can be used to access the result object. In the list
below, functions of type \code{cca} will handle all three constrained
ordination objects, and functions of \code{rda} only handle \code{rda}
and \code{\link{capscale}} results.
The main plotting functions are \code{\link{plot.cca}} for all
methods, and \code{\link{biplot.rda}} for RDA and dbRDA. However,
generic \pkg{vegan} plotting functions can also handle the results.
The scores can be accessed and scaled with \code{\link{scores.cca}},
and summarized with \code{\link{summary.cca}}. The eigenvalues can
be accessed with \code{\link{eigenvals.cca}} and the regression
coefficients for constraints with \code{\link{coef.cca}}. The
eigenvalues can be plotted with \code{\link{screeplot.cca}}, and the
(adjusted) \eqn{R^2}{R-squared} can be found with
\code{\link{RsquareAdj.rda}}. The scores can be also calculated for
new data sets with \code{\link{predict.cca}} which allows adding
points to ordinations. The values of constraints can be inferred
from ordination and community composition with
\code{\link{calibrate.cca}}.
Diagnostic statistics can be found with \code{\link{goodness.cca}},
\code{\link{inertcomp}}, \code{\link{spenvcor}},
\code{\link{intersetcor}}, \code{\link{tolerance.cca}}, and
\code{\link{vif.cca}}. Function \code{\link{as.mlm.cca}} refits the
result object as a multiple \code{\link{lm}} object, and this allows
finding influence statistics (\code{\link{lm.influence}},
\code{\link{cooks.distance}} etc.).
Permutation based significance for the overall model, single
constraining variables or axes can be found with
\code{\link{anova.cca}}. Automatic model building with \R{}
\code{\link{step}} function is possible with
\code{\link{deviance.cca}}, \code{\link{add1.cca}} and
\code{\link{drop1.cca}}. Functions \code{\link{ordistep}} and
\code{\link{ordiR2step}} (for RDA) are special functions for
constrained ordination. Randomized data sets can be generated with
\code{\link{simulate.cca}}.
Separate methods based on constrained ordination model are principal
response curves (\code{\link{prc}}) and variance partitioning between
several components (\code{\link{varpart}}).
Design decisions are explained in \code{\link{vignette}}
on \dQuote{Design decisions} which can be accessed with
\code{browseVignettes("vegan")}.
Package \pkg{ade4} provides alternative constrained ordination
function \code{\link[ade4]{pcaiv}}.
}
\examples{
data(varespec)
data(varechem)
## Common but bad way: use all variables you happen to have in your
## environmental data matrix
vare.cca <- cca(varespec, varechem)
vare.cca
plot(vare.cca)
## Formula interface and a better model
vare.cca <- cca(varespec ~ Al + P*(K + Baresoil), data=varechem)
vare.cca
plot(vare.cca)
## `Partialling out' and `negative components of variance'
cca(varespec ~ Ca, varechem)
cca(varespec ~ Ca + Condition(pH), varechem)
## RDA
data(dune)
data(dune.env)
dune.Manure <- rda(dune ~ Manure, dune.env)
plot(dune.Manure)
}
\keyword{ multivariate }
|
\name{agreementplot}
\alias{agreementplot}
\alias{agreementplot.default}
\alias{agreementplot.formula}
\title{Bangdiwala's Observer Agreement Chart}
\description{
Representation of a \eqn{k \times k}{k by k} confusion matrix,
where the observed and expected diagonal elements are represented by
superposed black and white rectangles, respectively. The function
also computes a statistic measuring the strength of agreement
(relation of respective area sums).
}
\usage{
\method{agreementplot}{default}(x, reverse_y = TRUE, main = NULL,
weights = c(1, 1 - 1/(ncol(x) - 1)^2), margins = par("mar"),
newpage = TRUE, pop = TRUE,
xlab = names(dimnames(x))[2],
ylab = names(dimnames(x))[1],
xlab_rot = 0, xlab_just = "center",
ylab_rot = 90, ylab_just = "center",
fill_col = function(j) gray((1 - (weights[j]) ^ 2) ^ 0.5),
line_col = "red", xscale = TRUE, yscale = TRUE,
return_grob = FALSE,
prefix = "", \dots)
\method{agreementplot}{formula}(formula, data = NULL, ..., subset)
}
\arguments{
\item{x}{a confusion matrix, i.e., a table with equal-sized dimensions.}
\item{reverse_y}{if \code{TRUE}, the y axis is reversed (i.e., the
rectangles' positions correspond to the contingency table).}
\item{main}{user-specified main title.}
\item{weights}{vector of weights for successive larger observed areas,
used in the agreement strength statistic, and also for the
shading. The first element should be 1.}
\item{margins}{vector of margins (see \code{\link[graphics]{par}}).}
\item{newpage}{logical; if \code{TRUE}, the plot is drawn on a new page.}
\item{pop}{logical; if \code{TRUE}, all newly generated viewports are popped after plotting.}
\item{return_grob}{logical. Should a snapshot of the display be
returned as a grid grob?}
\item{xlab, ylab}{labels of x- and y-axis.}
\item{xlab_rot, ylab_rot}{rotation angle for the category labels.}
\item{xlab_just, ylab_just}{justification for the category labels.}
\item{fill_col}{a function, giving the fill colors used for exact and partial agreement}
\item{line_col}{color used for the diagonal reference line}
\item{formula}{a formula, such as \code{y ~ x}.
For details, see \code{\link{xtabs}}.}
\item{data}{a data frame (or list), or a contingency table from which
the variables in \code{formula} should be taken.}
\item{subset}{an optional vector specifying a subset of the rows in
the data frame to be used for plotting.}
\item{xscale, yscale}{logicals indicating whether the marginals should
be added on the x-axis/y-axis, respectively.}
\item{prefix}{character string used as prefix for the viewport name}
\item{\dots}{further graphics parameters (see \code{\link{par}}).}
}
\details{
Weights can be specified to allow for partial agreement, taking into
account contributions from off-diagonal cells. Partial agreement
is typically represented in the display by lighter shading, as given by
\code{fill_col(j)}, corresponding to \code{weights[j]}.
A weight vector of
length 1 means strict agreement only, each additional element
increases the maximum number of disagreement steps.
\code{\link{cotabplot}} can be used for stratified analyses (see examples).
}
\value{
Invisibly returned, a list with components
\item{Bangdiwala}{the unweighted agreement strength statistic.}
\item{Bangdiwala_Weighted}{the weighted statistic.}
\item{weights}{the weight vector used.}
}
\references{
Bangdiwala, S. I. (1988). The Agreement Chart. Department of Biostatistics,
University of North Carolina at Chapel Hill,
Institute of Statistics Mimeo Series No. 1859,
\url{https://repository.lib.ncsu.edu/bitstream/handle/1840.4/3827/ISMS_1988_1859.pdf}
Bangdiwala, S. I., Ana S. Haedo, Marcela L. Natal, and Andres
Villaveces. The agreement chart as an alternative to the
receiver-operating characteristic curve for diagnostic tests.
\emph{Journal of Clinical Epidemiology}, 61 (9), 866-874.
Michael Friendly (2000),
\emph{Visualizing Categorical Data}.
SAS Institute, Cary, NC.
}
\author{
David Meyer \email{David.Meyer@R-project.org}
}
\examples{
data("SexualFun")
agreementplot(t(SexualFun))
data("MSPatients")
\dontrun{
## best visualized using a resized device, e.g. using:
## get(getOption("device"))(width = 12)
pushViewport(viewport(layout = grid.layout(ncol = 2)))
pushViewport(viewport(layout.pos.col = 1))
agreementplot(t(MSPatients[,,1]), main = "Winnipeg Patients",
newpage = FALSE)
popViewport()
pushViewport(viewport(layout.pos.col = 2))
agreementplot(t(MSPatients[,,2]), main = "New Orleans Patients",
newpage = FALSE)
popViewport(2)
dev.off()
}
## alternatively, use cotabplot:
cotabplot(MSPatients, panel = cotab_agreementplot)
}
\keyword{category}
\keyword{hplot}
| /man/agreementplot.Rd | no_license | cran/vcd | R | false | false | 4,905 | rd | \name{agreementplot}
\alias{agreementplot}
\alias{agreementplot.default}
\alias{agreementplot.formula}
\title{Bangdiwala's Observer Agreement Chart}
\description{
Representation of a \eqn{k \times k}{k by k} confusion matrix,
where the observed and expected diagonal elements are represented by
superposed black and white rectangles, respectively. The function
also computes a statistic measuring the strength of agreement
(relation of respective area sums).
}
\usage{
\method{agreementplot}{default}(x, reverse_y = TRUE, main = NULL,
weights = c(1, 1 - 1/(ncol(x) - 1)^2), margins = par("mar"),
newpage = TRUE, pop = TRUE,
xlab = names(dimnames(x))[2],
ylab = names(dimnames(x))[1],
xlab_rot = 0, xlab_just = "center",
ylab_rot = 90, ylab_just = "center",
fill_col = function(j) gray((1 - (weights[j]) ^ 2) ^ 0.5),
line_col = "red", xscale = TRUE, yscale = TRUE,
return_grob = FALSE,
prefix = "", \dots)
\method{agreementplot}{formula}(formula, data = NULL, ..., subset)
}
\arguments{
\item{x}{a confusion matrix, i.e., a table with equal-sized dimensions.}
\item{reverse_y}{if \code{TRUE}, the y axis is reversed (i.e., the
rectangles' positions correspond to the contingency table).}
\item{main}{user-specified main title.}
\item{weights}{vector of weights for successive larger observed areas,
used in the agreement strength statistic, and also for the
shading. The first element should be 1.}
\item{margins}{vector of margins (see \code{\link[graphics]{par}}).}
\item{newpage}{logical; if \code{TRUE}, the plot is drawn on a new page.}
\item{pop}{logical; if \code{TRUE}, all newly generated viewports are popped after plotting.}
\item{return_grob}{logical. Should a snapshot of the display be
returned as a grid grob?}
\item{xlab, ylab}{labels of x- and y-axis.}
\item{xlab_rot, ylab_rot}{rotation angle for the category labels.}
\item{xlab_just, ylab_just}{justification for the category labels.}
\item{fill_col}{a function, giving the fill colors used for exact and partial agreement}
\item{line_col}{color used for the diagonal reference line}
\item{formula}{a formula, such as \code{y ~ x}.
For details, see \code{\link{xtabs}}.}
\item{data}{a data frame (or list), or a contingency table from which
the variables in \code{formula} should be taken.}
\item{subset}{an optional vector specifying a subset of the rows in
the data frame to be used for plotting.}
\item{xscale, yscale}{logicals indicating whether the marginals should
be added on the x-axis/y-axis, respectively.}
\item{prefix}{character string used as prefix for the viewport name}
\item{\dots}{further graphics parameters (see \code{\link{par}}).}
}
\details{
Weights can be specified to allow for partial agreement, taking into
account contributions from off-diagonal cells. Partial agreement
is typically represented in the display by lighter shading, as given by
\code{fill_col(j)}, corresponding to \code{weights[j]}.
A weight vector of
length 1 means strict agreement only, each additional element
increases the maximum number of disagreement steps.
\code{\link{cotabplot}} can be used for stratified analyses (see examples).
}
\value{
Invisibly returned, a list with components
\item{Bangdiwala}{the unweighted agreement strength statistic.}
\item{Bangdiwala_Weighted}{the weighted statistic.}
\item{weights}{the weight vector used.}
}
\references{
Bangdiwala, S. I. (1988). The Agreement Chart. Department of Biostatistics,
University of North Carolina at Chapel Hill,
Institute of Statistics Mimeo Series No. 1859,
\url{https://repository.lib.ncsu.edu/bitstream/handle/1840.4/3827/ISMS_1988_1859.pdf}
Bangdiwala, S. I., Ana S. Haedo, Marcela L. Natal, and Andres
Villaveces. The agreement chart as an alternative to the
receiver-operating characteristic curve for diagnostic tests.
\emph{Journal of Clinical Epidemiology}, 61 (9), 866-874.
Michael Friendly (2000),
\emph{Visualizing Categorical Data}.
SAS Institute, Cary, NC.
}
\author{
David Meyer \email{David.Meyer@R-project.org}
}
\examples{
data("SexualFun")
agreementplot(t(SexualFun))
data("MSPatients")
\dontrun{
## best visualized using a resized device, e.g. using:
## get(getOption("device"))(width = 12)
pushViewport(viewport(layout = grid.layout(ncol = 2)))
pushViewport(viewport(layout.pos.col = 1))
agreementplot(t(MSPatients[,,1]), main = "Winnipeg Patients",
newpage = FALSE)
popViewport()
pushViewport(viewport(layout.pos.col = 2))
agreementplot(t(MSPatients[,,2]), main = "New Orleans Patients",
newpage = FALSE)
popViewport(2)
dev.off()
}
## alternatively, use cotabplot:
cotabplot(MSPatients, panel = cotab_agreementplot)
}
\keyword{category}
\keyword{hplot}
|
testlist <- list(c = -1499027802L, r = 178693798L)
result <- do.call(landscapemetrics:::triangular_index,testlist)
str(result) | /landscapemetrics/inst/testfiles/triangular_index/libFuzzer_triangular_index/triangular_index_valgrind_files/1609955215-test.R | no_license | akhikolla/newtestfiles-2 | R | false | false | 126 | r | testlist <- list(c = -1499027802L, r = 178693798L)
result <- do.call(landscapemetrics:::triangular_index,testlist)
str(result) |
# this script serves no other purpose other then generating multidimensional tables for genomic repeat contant
rm(list = ls())
setwd("~/Documents/phd/Desktop analyses/new_PCA/script")
# new PCA on chr 16 repeats
require(GenomicRanges)
bin <- read.table("../bins/H_bin.txt", header=T)
bin.gr <- GRanges( seqnames = Rle(bin[,1]),
ranges = IRanges(start = bin[,2], end = bin[,3])
)
rep <- read.table("../repeat_files/hg19/hg19_all_chr")
# process the repeat names so they are readable
mamsum <- read.table("../repeat_libraries/human/summary_human2", sep = "\t")
colnames(mamsum) <- "V4"
R <- merge(rep, mamsum)
# set up colnames to make the table subsetable
name <- c("fam", "chrom", "start", "end", "r_start", "r_end", "strand", "score1", "score2", "score3", "zero", "score4", "type", "species")
colnames(R) <- name
# get rid of spaces
for(i in seq(length(R))){
if(class(R[,i]) == "factor"){
R[,i] <- gsub(" ", "_", R[,i])
}
}
R$type_species <- paste(R$type, R$species,sep ="__")
R$fam_species <- paste(R$fam, R$species,sep ="__")
R.gr <- GRanges(seqnames= Rle( R$chrom),
ranges = IRanges( start = R$start, end = R$end))
# produce an overlap table that will show all the R entries that will overlap with each bin
bin_Rep_Ol <- as.matrix(findOverlaps(bin.gr, R.gr))
# Sort repeats into their types and their bins
identifier <- unique(R$type_species)
Counts <- data.frame(rep(data.frame(rep(0, dim(bin)[1])), length(identifier)))
colnames(Counts) <- identifier
for(i in 1:dim(bin)[1]){
b <- R$type_species[bin_Rep_Ol[bin_Rep_Ol[,1] == i,2]]
for(z in seq(dim(Counts)[2])){
Counts[i,z] <- length(b[b == colnames(Counts)[z] ])
}
}
# Repeats are in according to an identiffier
# maybe add extra columns for bin chr start stop
# data will then be easier to upload
write.table(C, file = "../../human-sort_p_gene,species,family", quote = FALSE, sep = "\t", row.names = FALSE, col.names= TRUE)
system.time(R$type_species[bin_Rep_Ol[bin_Rep_Ol[,1] == i,2]])
system.time(Counts[i,z] <- length(b[b == colnames(Counts)[z] ]))
# this thing should take around 14 hours
# there is probably that other table package that can do it much quicker
# on the data sci workshop
b <- R$type_species[bin_Rep_Ol[bin_Rep_Ol[,1] == i,2]]
for(z in seq(dim(Counts)[2])){
Counts[i,z] <- length(b[b == colnames(Counts)[z] ])
}
| /sort_repeats.R | no_license | ReubenBuck/Repeat_Distributions | R | false | false | 2,357 | r |
# this script serves no other purpose other then generating multidimensional tables for genomic repeat contant
rm(list = ls())
setwd("~/Documents/phd/Desktop analyses/new_PCA/script")
# new PCA on chr 16 repeats
require(GenomicRanges)
bin <- read.table("../bins/H_bin.txt", header=T)
bin.gr <- GRanges( seqnames = Rle(bin[,1]),
ranges = IRanges(start = bin[,2], end = bin[,3])
)
rep <- read.table("../repeat_files/hg19/hg19_all_chr")
# process the repeat names so they are readable
mamsum <- read.table("../repeat_libraries/human/summary_human2", sep = "\t")
colnames(mamsum) <- "V4"
R <- merge(rep, mamsum)
# set up colnames to make the table subsetable
name <- c("fam", "chrom", "start", "end", "r_start", "r_end", "strand", "score1", "score2", "score3", "zero", "score4", "type", "species")
colnames(R) <- name
# get rid of spaces
for(i in seq(length(R))){
if(class(R[,i]) == "factor"){
R[,i] <- gsub(" ", "_", R[,i])
}
}
R$type_species <- paste(R$type, R$species,sep ="__")
R$fam_species <- paste(R$fam, R$species,sep ="__")
R.gr <- GRanges(seqnames= Rle( R$chrom),
ranges = IRanges( start = R$start, end = R$end))
# produce an overlap table that will show all the R entries that will overlap with each bin
bin_Rep_Ol <- as.matrix(findOverlaps(bin.gr, R.gr))
# Sort repeats into their types and their bins
identifier <- unique(R$type_species)
Counts <- data.frame(rep(data.frame(rep(0, dim(bin)[1])), length(identifier)))
colnames(Counts) <- identifier
for(i in 1:dim(bin)[1]){
b <- R$type_species[bin_Rep_Ol[bin_Rep_Ol[,1] == i,2]]
for(z in seq(dim(Counts)[2])){
Counts[i,z] <- length(b[b == colnames(Counts)[z] ])
}
}
# Repeats are in according to an identiffier
# maybe add extra columns for bin chr start stop
# data will then be easier to upload
write.table(C, file = "../../human-sort_p_gene,species,family", quote = FALSE, sep = "\t", row.names = FALSE, col.names= TRUE)
system.time(R$type_species[bin_Rep_Ol[bin_Rep_Ol[,1] == i,2]])
system.time(Counts[i,z] <- length(b[b == colnames(Counts)[z] ]))
# this thing should take around 14 hours
# there is probably that other table package that can do it much quicker
# on the data sci workshop
b <- R$type_species[bin_Rep_Ol[bin_Rep_Ol[,1] == i,2]]
for(z in seq(dim(Counts)[2])){
Counts[i,z] <- length(b[b == colnames(Counts)[z] ])
}
|
##################################################
## Script to get sequence (not SNV/SNP masked)
## around SNV or indels to design primers for
## Targeted resequencing on the MiSeq
## Aparicio Lab WSOP 2013-001 developed by
## Dr Damian Yap , Research Associate
## dyap@bccrc.ca Version 3.0 (Sep 2013)
## Pipeline use gets parse args from html form
##################################################
# These commands must be specifed in order for this script to work
# source("http://www.bioconductor.org/biocLite.R");
# source("http://www.bioconductor.org/biocLite.R"); biocLite("BSgenome");
# biocLite("BSgenome.Hsapiens.UCSC.hg19"); library('BSgenome.Hsapiens.UCSC.hg19')
library('BSgenome.Hsapiens.UCSC.hg19')
# if run directly uncomment the sample name
# Command line `Rscript ~/Scripts/GetSeq.R --no-save --no-restore --args $dir/$sample/$file`
# This takes the 4th argument (see str above) which is sample name
args <- commandArgs(trailingOnly = TRUE)
input <- args[4]
# To test this programme in R using source
# commandArgs <- function() "TEST/123/20130926214630"
# source(file="~/Scripts/v3.1_pipeline/GetSeq.R")
# For testing only uncomment for production
# input <- "Tumour_Xenograft/SA494/SA494_p3_positions.txt"
Project <- strsplit(input, split="/")[[1]][1]
name <- strsplit(input, split="/")[[1]][2]
posfile <- strsplit(input, split="/")[[1]][3]
print("Directory")
print(Project)
print("Sample_ID")
print(name)
print("File")
print(posfile)
# all files from this point should be hg19
infile=paste(name, "p3_positions.txt", sep="_")
homebase="/home/dyap/Projects/PrimerDesign"
setwd(homebase)
basedir=paste(homebase,Project,sep="/")
setwd(basedir)
#system('mkdir positions')
system('mkdir Annotate')
#system('mkdir primer3')
#############################################
# Save input files under $homebase/positions#
#############################################
##############################################
###### User defined variables ######
# Directory and file references
sourcedir=paste(basedir,"positions", sep="/")
p3dir=paste(basedir,"primer3", sep="/")
annpath=paste(basedir,"Annotate", sep="/")
############ name processing #################
######################
# These are the input files
input=paste(sourcedir,posfile,sep="/")
#######################################
# This is the name of the primer3 design file
p3file=paste(name,"p3_design.txt",sep="_")
outfile=paste(p3dir,p3file,sep="/")
###############################################
file1 = paste(annpath, paste(name, "Annotate.csv", sep="_") ,sep="/")
###############################################
file2 = paste(sourcedir, paste(name, "positions.txt", sep="_") ,sep="/")
# offsets (sequences on either side of SNV,indel for matching only)
WToffset=5
snpdf <- read.csv(file=input, stringsAsFactors = FALSE, header= FALSE)
# For positions
posdf <- data.frame(Chr = rep("", nrow(snpdf)),
Pos1 = rep(0, nrow(snpdf)),
ID = rep("", nrow(snpdf)),
stringsAsFactors = FALSE)
# For annotation files
andf <- data.frame(Chr = rep("", nrow(snpdf)),
Pos1 = rep(0, nrow(snpdf)),
Pos2 = rep(0, nrow(snpdf)),
WT = rep("", nrow(snpdf)),
SNV = rep("", nrow(snpdf)),
stringsAsFactors = FALSE)
# For SNV matching
outdf <- data.frame(ID = rep("", nrow(snpdf)),
Chr = rep("", nrow(snpdf)),
Pos1 = rep(0, nrow(snpdf)),
Pos2 = rep(0, nrow(snpdf)),
SNV = rep("", nrow(snpdf)),
Cxt = rep("", nrow(snpdf)),
Seq = rep("", nrow(snpdf)),
stringsAsFactors = FALSE)
offset <- 5
for (ri in seq(nrow(snpdf))) {
chr <- paste("chr",strsplit(snpdf[ri,2],split=":")[[1]][1],sep="")
position1 <- as.numeric(strsplit(strsplit(snpdf[ri,2],split=":")[[1]][2], split="-")[[1]][1])
# for SNV the position is the same for both
position2 <- as.numeric(strsplit(strsplit(snpdf[ri,2],split=":")[[1]][2], split="-")[[1]][2])
sample <- strsplit(snpdf[ri,1],split="_")[[1]][1]
sequence <- snpdf[ri,3]
wt <- as.character(getSeq(Hsapiens,chr,position1,position1))
cxt <- as.character(paste(getSeq(Hsapiens,chr,position1-offset,position1),
getSeq(Hsapiens,chr,position2+1,position2+offset),
sep=''))
outdf$ID[ri] <- paste(paste(sample, chr, sep="_"), position1, sep="_")
outdf$Chr[ri] <- chr
outdf$Pos1[ri] <- position1
outdf$Pos2[ri] <- position2
outdf$SNV[ri] <- wt
outdf$Cxt[ri] <-cxt
outdf$Seq[ri] <- sequence
print(outdf$ID[ri])
posdf$ID[ri] <- outdf$ID[ri]
posdf$Chr[ri] <- outdf$Chr[ri]
posdf$Pos1[ri] <- outdf$Pos1[ri]
# Fake the SNV to be just the complement of WT position (as SNV allele is not known)
if (wt=="A") snv <- "T"
if (wt=="C") snv <- "G"
if (wt=="G") snv <- "C"
if (wt=="T") snv <- "A"
andf$Chr[ri] <- gsub("chr","", outdf$Chr[ri])
andf$Pos1[ri] <- outdf$Pos1[ri]
andf$Pos2[ri] <- outdf$Pos2[ri]
andf$WT[ri] <- outdf$SNV[ri]
andf$SNV[ri] <-snv
}
# Output file design.csv
print(outdf)
write.csv(outdf, file = outfile )
# Output file positions.txt
print(posdf)
write.csv(posdf, file = file2 )
# Format for ANNOVAR <15 43762161 43762161 T C>
print(andf)
write.csv(andf, file = file1)
print("GetSeq.R complete...")
| /beast_scripts/v3.1_pipeline/v3.1_GetSeq.R | no_license | oncoapop/data_reporting | R | false | false | 5,507 | r | ##################################################
## Script to get sequence (not SNV/SNP masked)
## around SNV or indels to design primers for
## Targeted resequencing on the MiSeq
## Aparicio Lab WSOP 2013-001 developed by
## Dr Damian Yap , Research Associate
## dyap@bccrc.ca Version 3.0 (Sep 2013)
## Pipeline use gets parse args from html form
##################################################
# These commands must be specifed in order for this script to work
# source("http://www.bioconductor.org/biocLite.R");
# source("http://www.bioconductor.org/biocLite.R"); biocLite("BSgenome");
# biocLite("BSgenome.Hsapiens.UCSC.hg19"); library('BSgenome.Hsapiens.UCSC.hg19')
library('BSgenome.Hsapiens.UCSC.hg19')
# if run directly uncomment the sample name
# Command line `Rscript ~/Scripts/GetSeq.R --no-save --no-restore --args $dir/$sample/$file`
# This takes the 4th argument (see str above) which is sample name
args <- commandArgs(trailingOnly = TRUE)
input <- args[4]
# To test this programme in R using source
# commandArgs <- function() "TEST/123/20130926214630"
# source(file="~/Scripts/v3.1_pipeline/GetSeq.R")
# For testing only uncomment for production
# input <- "Tumour_Xenograft/SA494/SA494_p3_positions.txt"
Project <- strsplit(input, split="/")[[1]][1]
name <- strsplit(input, split="/")[[1]][2]
posfile <- strsplit(input, split="/")[[1]][3]
print("Directory")
print(Project)
print("Sample_ID")
print(name)
print("File")
print(posfile)
# all files from this point should be hg19
infile=paste(name, "p3_positions.txt", sep="_")
homebase="/home/dyap/Projects/PrimerDesign"
setwd(homebase)
basedir=paste(homebase,Project,sep="/")
setwd(basedir)
#system('mkdir positions')
system('mkdir Annotate')
#system('mkdir primer3')
#############################################
# Save input files under $homebase/positions#
#############################################
##############################################
###### User defined variables ######
# Directory and file references
sourcedir=paste(basedir,"positions", sep="/")
p3dir=paste(basedir,"primer3", sep="/")
annpath=paste(basedir,"Annotate", sep="/")
############ name processing #################
######################
# These are the input files
input=paste(sourcedir,posfile,sep="/")
#######################################
# This is the name of the primer3 design file
p3file=paste(name,"p3_design.txt",sep="_")
outfile=paste(p3dir,p3file,sep="/")
###############################################
file1 = paste(annpath, paste(name, "Annotate.csv", sep="_") ,sep="/")
###############################################
file2 = paste(sourcedir, paste(name, "positions.txt", sep="_") ,sep="/")
# offsets (sequences on either side of SNV,indel for matching only)
WToffset=5
snpdf <- read.csv(file=input, stringsAsFactors = FALSE, header= FALSE)
# For positions
posdf <- data.frame(Chr = rep("", nrow(snpdf)),
Pos1 = rep(0, nrow(snpdf)),
ID = rep("", nrow(snpdf)),
stringsAsFactors = FALSE)
# For annotation files
andf <- data.frame(Chr = rep("", nrow(snpdf)),
Pos1 = rep(0, nrow(snpdf)),
Pos2 = rep(0, nrow(snpdf)),
WT = rep("", nrow(snpdf)),
SNV = rep("", nrow(snpdf)),
stringsAsFactors = FALSE)
# For SNV matching
outdf <- data.frame(ID = rep("", nrow(snpdf)),
Chr = rep("", nrow(snpdf)),
Pos1 = rep(0, nrow(snpdf)),
Pos2 = rep(0, nrow(snpdf)),
SNV = rep("", nrow(snpdf)),
Cxt = rep("", nrow(snpdf)),
Seq = rep("", nrow(snpdf)),
stringsAsFactors = FALSE)
offset <- 5
for (ri in seq(nrow(snpdf))) {
chr <- paste("chr",strsplit(snpdf[ri,2],split=":")[[1]][1],sep="")
position1 <- as.numeric(strsplit(strsplit(snpdf[ri,2],split=":")[[1]][2], split="-")[[1]][1])
# for SNV the position is the same for both
position2 <- as.numeric(strsplit(strsplit(snpdf[ri,2],split=":")[[1]][2], split="-")[[1]][2])
sample <- strsplit(snpdf[ri,1],split="_")[[1]][1]
sequence <- snpdf[ri,3]
wt <- as.character(getSeq(Hsapiens,chr,position1,position1))
cxt <- as.character(paste(getSeq(Hsapiens,chr,position1-offset,position1),
getSeq(Hsapiens,chr,position2+1,position2+offset),
sep=''))
outdf$ID[ri] <- paste(paste(sample, chr, sep="_"), position1, sep="_")
outdf$Chr[ri] <- chr
outdf$Pos1[ri] <- position1
outdf$Pos2[ri] <- position2
outdf$SNV[ri] <- wt
outdf$Cxt[ri] <-cxt
outdf$Seq[ri] <- sequence
print(outdf$ID[ri])
posdf$ID[ri] <- outdf$ID[ri]
posdf$Chr[ri] <- outdf$Chr[ri]
posdf$Pos1[ri] <- outdf$Pos1[ri]
# Fake the SNV to be just the complement of WT position (as SNV allele is not known)
if (wt=="A") snv <- "T"
if (wt=="C") snv <- "G"
if (wt=="G") snv <- "C"
if (wt=="T") snv <- "A"
andf$Chr[ri] <- gsub("chr","", outdf$Chr[ri])
andf$Pos1[ri] <- outdf$Pos1[ri]
andf$Pos2[ri] <- outdf$Pos2[ri]
andf$WT[ri] <- outdf$SNV[ri]
andf$SNV[ri] <-snv
}
# Output file design.csv
print(outdf)
write.csv(outdf, file = outfile )
# Output file positions.txt
print(posdf)
write.csv(posdf, file = file2 )
# Format for ANNOVAR <15 43762161 43762161 T C>
print(andf)
write.csv(andf, file = file1)
print("GetSeq.R complete...")
|
library(mapdeck)
library(sf)
library(Hmisc)
key <- "pk.eyJ1IjoiaWZlbGxvd3MiLCJhIjoiY2tmNDd3dXZrMGFqOTJzb2V2azB3YnZ5aCJ9.nG777E-EH37e5wAJdsykug"
load("shiny_app/data/data.RData")
df_plot_sub <- df_raw[df_raw$time_ind==
max(df_raw$time_ind)#14
,]
df_plot_sub <- as(df_plot_sub,"sf")
#df_plot_sub <- df_plot_sub
mapdeck(
token = key,
#pitch = 35,
style = 'mapbox://styles/mapbox/light-v10'
) %>%
add_geojson(
data = df_plot_sub,
#tooltip = "popup_html",
fill_colour = "index_tsts_per_non_index_pos",
legend=TRUE,
#update_view=FALSE,
#auto_highlight = TRUE,
palette="reds",
layer_id="poly"
)
#df_site_plot <- sf::st_as_sf(df_site_plot, coords = c("longitude", "latitude"))
df_plot_sub <- df_site_plot[df_site_plot$time_ind==
max(df_site_plot$time_ind) &
!is.na(df_site_plot$fitted_tsts_per_non_pos)
,]
df_plot_sub$fills <-pmin(df_plot_sub$fitted_tsts_per_non_pos, 5)
df_plot_sub$fills <-pmin(df_plot_sub$fitted_pos_per_non_pos, 2)
df_plot_sub$fills <-Hmisc::cut2(round(df_plot_sub$fitted_pos_per_non_pos,2), g=6)
df_plot_sub$fills <-Hmisc::cut2(round(df_plot_sub$fitted_tsts_per_non_pos,2), g=6)
mapdeck(
token = key,
#pitch = 35,
style = 'mapbox://styles/mapbox/dark-v10'
) %>%
add_scatterplot(
data = df_plot_sub,
lat = "latitude",
lon = "longitude",
fill_colour = "fills",
#stroke_width=4,
#stroke_colour = "fill_color",
tooltip = "popup_html",
radius = 1500,
radius_min_pixels = 3,
legend=TRUE,
palette = "reds",
#legend=js,
#update_view=FALSE,
#auto_highlight = FALSE,
layer_id="scatter"
)
(
ggplot(
data=df,
aes(
y=index_yield,
x=tsts_per_non_index_pos,
text = facility
)) +
geom_point() +
scale_x_log10() #+
# scale_y_log10()
) %>%
plotly::ggplotly(tooltip = "text")
(
ggplot(
data=df,
aes(
y=index_yield,
x=non_index,
text = facility
)) +
geom_point() +
scale_x_log10() +
ylab("Index Yield") +
xlab("# Non-Index Pos.") +
theme_bw()
) %>%
plotly::ggplotly(tooltip = "text")
"Hopital du Personnell du Kolowezi"
"kz eThekwini Metropolitan Municipality Sub" | /index_test_mapping/R/scratch.R | no_license | ICPI/Denominators | R | false | false | 2,323 | r | library(mapdeck)
library(sf)
library(Hmisc)
key <- "pk.eyJ1IjoiaWZlbGxvd3MiLCJhIjoiY2tmNDd3dXZrMGFqOTJzb2V2azB3YnZ5aCJ9.nG777E-EH37e5wAJdsykug"
load("shiny_app/data/data.RData")
df_plot_sub <- df_raw[df_raw$time_ind==
max(df_raw$time_ind)#14
,]
df_plot_sub <- as(df_plot_sub,"sf")
#df_plot_sub <- df_plot_sub
mapdeck(
token = key,
#pitch = 35,
style = 'mapbox://styles/mapbox/light-v10'
) %>%
add_geojson(
data = df_plot_sub,
#tooltip = "popup_html",
fill_colour = "index_tsts_per_non_index_pos",
legend=TRUE,
#update_view=FALSE,
#auto_highlight = TRUE,
palette="reds",
layer_id="poly"
)
#df_site_plot <- sf::st_as_sf(df_site_plot, coords = c("longitude", "latitude"))
df_plot_sub <- df_site_plot[df_site_plot$time_ind==
max(df_site_plot$time_ind) &
!is.na(df_site_plot$fitted_tsts_per_non_pos)
,]
df_plot_sub$fills <-pmin(df_plot_sub$fitted_tsts_per_non_pos, 5)
df_plot_sub$fills <-pmin(df_plot_sub$fitted_pos_per_non_pos, 2)
df_plot_sub$fills <-Hmisc::cut2(round(df_plot_sub$fitted_pos_per_non_pos,2), g=6)
df_plot_sub$fills <-Hmisc::cut2(round(df_plot_sub$fitted_tsts_per_non_pos,2), g=6)
mapdeck(
token = key,
#pitch = 35,
style = 'mapbox://styles/mapbox/dark-v10'
) %>%
add_scatterplot(
data = df_plot_sub,
lat = "latitude",
lon = "longitude",
fill_colour = "fills",
#stroke_width=4,
#stroke_colour = "fill_color",
tooltip = "popup_html",
radius = 1500,
radius_min_pixels = 3,
legend=TRUE,
palette = "reds",
#legend=js,
#update_view=FALSE,
#auto_highlight = FALSE,
layer_id="scatter"
)
(
ggplot(
data=df,
aes(
y=index_yield,
x=tsts_per_non_index_pos,
text = facility
)) +
geom_point() +
scale_x_log10() #+
# scale_y_log10()
) %>%
plotly::ggplotly(tooltip = "text")
(
ggplot(
data=df,
aes(
y=index_yield,
x=non_index,
text = facility
)) +
geom_point() +
scale_x_log10() +
ylab("Index Yield") +
xlab("# Non-Index Pos.") +
theme_bw()
) %>%
plotly::ggplotly(tooltip = "text")
"Hopital du Personnell du Kolowezi"
"kz eThekwini Metropolitan Municipality Sub" |
library(rich)
### Name: c2cv
### Title: Comparison of 2 values of species richness using a randomization
### procedure
### Aliases: c2cv
### ** Examples
## Not run:
##D data(efeb)
##D c2cv(com1=efeb$ef,com2=efeb$eb,nrandom=100,verbose=FALSE)
## End(Not run)
| /data/genthat_extracted_code/rich/examples/c2cv.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 267 | r | library(rich)
### Name: c2cv
### Title: Comparison of 2 values of species richness using a randomization
### procedure
### Aliases: c2cv
### ** Examples
## Not run:
##D data(efeb)
##D c2cv(com1=efeb$ef,com2=efeb$eb,nrandom=100,verbose=FALSE)
## End(Not run)
|
source("bootstrap_stability_investigation.R")
source("bootlasso.R")
f <- function(bootsamp=a2.i, model= model2.imp, start.end=NULL, maxit=100){
d <- data.frame(i=NULL, glmm=NULL, lrmm=NULL)
fmla <- model$model$global_model$formula
if(is.null(start.end)){
start.end <- 1:ncol(bootsamp)
}
cat("\n\n\n")
for(i in start.end){
svMisc::progress(100*(order(start.end)[start.end==i])/length(start.end))
#assign("last.warning", NULL, envir=baseenv())
model_2 <- lrm(data= as.data.frame(bootsamp[,i]), formula = fmla, maxit=maxit)
model_2_glm <- glm(data= as.data.frame( bootsamp[,i]), formula = fmla, family = binomial(), method="detect_separation")
a <- c(i, glmm= model_2_glm$separation,lrmm=model_2$fail )
d<- rbind(d, a)
#if(is.null(model_2$Design)){ print(paste( "lrm",i))}
#if(length(warnings())>0){print(i)}
#if(model_2_glm$separation){print(paste("glm", i))}
}
colnames(d) <- c("model", "glmm_separation", "lrm_nonconvergence")
return(d)
}
table(k$glmm_separation, k$lrm_nonconvergence)
set.seed(2020.10)
# example: model 1
a.i <- bootsamp(data=model1.imp$data, R=2000)
#bsi2.bwp <- bootglm(bootsamp = a2, model=model2, method = "bwP", p=0.05)
bsi.i.aic <- bootglm(bootsamp = a.i, model=model1.imp, method = "bwAIC", p=0.05)
bsi.i.p <- bootglm(bootsamp = a.i, model=model1.imp, method = "p", p=0.05)
bsi.i.lasso <- bootlasso(a.i, model = model1.imp)
a2.i <- bootsamp(data=model2.imp$data, R=2000)
#b2 <- bootsamp(data=model2$data, R=200)
#bsi2.bwp <- bootglm(bootsamp = a2, model=model2, method = "bwP", p=0.05)
bsi2.i.aic <- bootglm(bootsamp = a2.i, model=model2.imp, method = "bwAIC", p=0.05)
bsi2.i.p <- bootglm(bootsamp = a2.i, model=model2.imp, method = "p", p=0.05)
bsi2.i.lasso <- bootlasso(a2.i, model = model2.imp)
#example: model 3
act3 <- bootsamp(data=model3.act$data, R=2000)
bsiact3.aic <- bootglm(bootsamp = act3, model=model3.act, method = "bwAIC", p=0.05)
bsiact3.p <- bootglm(bootsamp = act3, model=model3.act, method = "p", p=0.05)
bsiact3.lasso <- bootlasso(bootsamp = act3, model=model3.act)
summary.bsi <- function(bsi="bsi",n=NULL, aic=T ){
bsip <- eval(parse(text=paste(bsi,n, ".p$bsi_summary", sep = "")))
lv_order <- bsip$var
if(aic){
bsiaic <- eval(parse(text=paste(bsi,n, ".aic", "$bsi_summary", sep = "")))
lv <- match(lv_order, bsiaic$var)
}
bsilasso <- eval(parse(text=paste(bsi,n, ".lasso", "$bsi_summary", sep = "")))
lvlasso <- match(lv_order[-1], bsilasso$var)
if(aic){
bsi1 <- cbind(bsip[,c(1:9)],
bsiaic[lv,][,4:11],
rbind(rep(NA,length.out=5), bsilasso[lvlasso,][,4:8])
)
}else{
bsi1 <- cbind(bsip[,c(1:9)],
rbind(rep(NA,length.out=5), bsilasso[lvlasso,][,4:8])
)
}
return(bsi1)
}
bsi1 <- summary.bsi(n=NULL, aic = T)
write.table(bsi1, "bsi1.csv",sep = ",")
bsi1i <-summary.bsi(n=".i", aic = T)
write.table(bsi1i, "bsi1i.csv",sep = ",")
bsi2 <- summary.bsi(n="2", aic = F)
write.table(bsi2, "bsi2.csv",sep = ",")
bsi2i <- summary.bsi(n="2.i", aic = F)
write.table(bsi2i, "bsi2i.csv",sep = ",")
bsiact3 <- summary.bsi(n="act3", aic = T)
write.table(bsiact3, "bsi3.csv",sep = ",")
bsi3i <- summary.bsi(n="3.i", aic = T)
write.table(bsi3i, "bsi3i.csv",sep = ",")
level.name <- c("Intercept", "Active victimization: 1-2 times", "Active victimization: >1-2 times",
"Relational victimization: 1-2 times", "Relational victimization: >1-2 times","Impulsivity: BIS",
"AUDIT-C: >=4", "PHQ-9: 6-10","PHQ-9: 11-15",
"PHQ-9: >15", "MDSS","Gender: male",
"Smoking: yes", "Father's unemployment", "School PR",
"Mother's unemployment", "Living with parents: No", "Self-esteem: RSES",
"Needy family: yes"
)
names(level.name) <- names(bsiact3.p$boot_inclusion_freq)
names(bsiact3.p$boot_inclusion_freq) <- names(level.name)
a.test <- bootsamp(data=model1.imp$data, R=1000, subsampling = T, m=0.5)
bsi.test <- bootglm(bootsamp = a.test, model=model1.imp, method = "bwAIC", p=0.05)
| /bsi_mypaper.R | no_license | jasonliao2jesus/bully_inv | R | false | false | 4,147 | r | source("bootstrap_stability_investigation.R")
source("bootlasso.R")
f <- function(bootsamp=a2.i, model= model2.imp, start.end=NULL, maxit=100){
d <- data.frame(i=NULL, glmm=NULL, lrmm=NULL)
fmla <- model$model$global_model$formula
if(is.null(start.end)){
start.end <- 1:ncol(bootsamp)
}
cat("\n\n\n")
for(i in start.end){
svMisc::progress(100*(order(start.end)[start.end==i])/length(start.end))
#assign("last.warning", NULL, envir=baseenv())
model_2 <- lrm(data= as.data.frame(bootsamp[,i]), formula = fmla, maxit=maxit)
model_2_glm <- glm(data= as.data.frame( bootsamp[,i]), formula = fmla, family = binomial(), method="detect_separation")
a <- c(i, glmm= model_2_glm$separation,lrmm=model_2$fail )
d<- rbind(d, a)
#if(is.null(model_2$Design)){ print(paste( "lrm",i))}
#if(length(warnings())>0){print(i)}
#if(model_2_glm$separation){print(paste("glm", i))}
}
colnames(d) <- c("model", "glmm_separation", "lrm_nonconvergence")
return(d)
}
table(k$glmm_separation, k$lrm_nonconvergence)
set.seed(2020.10)
# example: model 1
a.i <- bootsamp(data=model1.imp$data, R=2000)
#bsi2.bwp <- bootglm(bootsamp = a2, model=model2, method = "bwP", p=0.05)
bsi.i.aic <- bootglm(bootsamp = a.i, model=model1.imp, method = "bwAIC", p=0.05)
bsi.i.p <- bootglm(bootsamp = a.i, model=model1.imp, method = "p", p=0.05)
bsi.i.lasso <- bootlasso(a.i, model = model1.imp)
a2.i <- bootsamp(data=model2.imp$data, R=2000)
#b2 <- bootsamp(data=model2$data, R=200)
#bsi2.bwp <- bootglm(bootsamp = a2, model=model2, method = "bwP", p=0.05)
bsi2.i.aic <- bootglm(bootsamp = a2.i, model=model2.imp, method = "bwAIC", p=0.05)
bsi2.i.p <- bootglm(bootsamp = a2.i, model=model2.imp, method = "p", p=0.05)
bsi2.i.lasso <- bootlasso(a2.i, model = model2.imp)
#example: model 3
act3 <- bootsamp(data=model3.act$data, R=2000)
bsiact3.aic <- bootglm(bootsamp = act3, model=model3.act, method = "bwAIC", p=0.05)
bsiact3.p <- bootglm(bootsamp = act3, model=model3.act, method = "p", p=0.05)
bsiact3.lasso <- bootlasso(bootsamp = act3, model=model3.act)
summary.bsi <- function(bsi="bsi",n=NULL, aic=T ){
bsip <- eval(parse(text=paste(bsi,n, ".p$bsi_summary", sep = "")))
lv_order <- bsip$var
if(aic){
bsiaic <- eval(parse(text=paste(bsi,n, ".aic", "$bsi_summary", sep = "")))
lv <- match(lv_order, bsiaic$var)
}
bsilasso <- eval(parse(text=paste(bsi,n, ".lasso", "$bsi_summary", sep = "")))
lvlasso <- match(lv_order[-1], bsilasso$var)
if(aic){
bsi1 <- cbind(bsip[,c(1:9)],
bsiaic[lv,][,4:11],
rbind(rep(NA,length.out=5), bsilasso[lvlasso,][,4:8])
)
}else{
bsi1 <- cbind(bsip[,c(1:9)],
rbind(rep(NA,length.out=5), bsilasso[lvlasso,][,4:8])
)
}
return(bsi1)
}
bsi1 <- summary.bsi(n=NULL, aic = T)
write.table(bsi1, "bsi1.csv",sep = ",")
bsi1i <-summary.bsi(n=".i", aic = T)
write.table(bsi1i, "bsi1i.csv",sep = ",")
bsi2 <- summary.bsi(n="2", aic = F)
write.table(bsi2, "bsi2.csv",sep = ",")
bsi2i <- summary.bsi(n="2.i", aic = F)
write.table(bsi2i, "bsi2i.csv",sep = ",")
bsiact3 <- summary.bsi(n="act3", aic = T)
write.table(bsiact3, "bsi3.csv",sep = ",")
bsi3i <- summary.bsi(n="3.i", aic = T)
write.table(bsi3i, "bsi3i.csv",sep = ",")
level.name <- c("Intercept", "Active victimization: 1-2 times", "Active victimization: >1-2 times",
"Relational victimization: 1-2 times", "Relational victimization: >1-2 times","Impulsivity: BIS",
"AUDIT-C: >=4", "PHQ-9: 6-10","PHQ-9: 11-15",
"PHQ-9: >15", "MDSS","Gender: male",
"Smoking: yes", "Father's unemployment", "School PR",
"Mother's unemployment", "Living with parents: No", "Self-esteem: RSES",
"Needy family: yes"
)
names(level.name) <- names(bsiact3.p$boot_inclusion_freq)
names(bsiact3.p$boot_inclusion_freq) <- names(level.name)
a.test <- bootsamp(data=model1.imp$data, R=1000, subsampling = T, m=0.5)
bsi.test <- bootglm(bootsamp = a.test, model=model1.imp, method = "bwAIC", p=0.05)
|
library(magrittr)
library(ggplot2)
library(patchwork)
library(warbleR)
setwd("data-raw/wav_testes/")
# "Strix-hylophila-1953532"
id = list.files("./", "Strix-hylophila") %>%
sample(1) %>%
stringr::str_replace(".rds", "")
##
wav_orig <- tuneR::readWave(glue::glue("{id}"))
wav_bd <- data.frame(
sound.files = glue::glue("{id}.wav"),
start = 0,
end = length(wav_orig@left)/wav_orig@samp.rate
)
lspec(ovlp = 50, sxrow = 3, rows = 12, flim = c(0,10))
spec_an()
ad <- auto_detec(wl = 200, threshold = 10, ssmooth = 1000,
bp = c(1.2, 1.8), mindur = 0.1, flim = c(0,5),
maxdur = 3, img = TRUE, redo = TRUE)
| /inst/analises/z_warbler.R | permissive | Athospd/mestrado | R | false | false | 649 | r | library(magrittr)
library(ggplot2)
library(patchwork)
library(warbleR)
setwd("data-raw/wav_testes/")
# "Strix-hylophila-1953532"
id = list.files("./", "Strix-hylophila") %>%
sample(1) %>%
stringr::str_replace(".rds", "")
##
wav_orig <- tuneR::readWave(glue::glue("{id}"))
wav_bd <- data.frame(
sound.files = glue::glue("{id}.wav"),
start = 0,
end = length(wav_orig@left)/wav_orig@samp.rate
)
lspec(ovlp = 50, sxrow = 3, rows = 12, flim = c(0,10))
spec_an()
ad <- auto_detec(wl = 200, threshold = 10, ssmooth = 1000,
bp = c(1.2, 1.8), mindur = 0.1, flim = c(0,5),
maxdur = 3, img = TRUE, redo = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_traits.R
\name{get_traits_by_pubmed_id}
\alias{get_traits_by_pubmed_id}
\title{Get GWAS Catalog traits by PubMed identifiers}
\usage{
get_traits_by_pubmed_id(
pubmed_id = NULL,
verbose = FALSE,
warnings = TRUE,
page_size = 20L
)
}
\arguments{
\item{pubmed_id}{An \code{integer} vector of
\href{https://en.wikipedia.org/wiki/PubMed}{PubMed} identifiers.}
\item{verbose}{A \code{logical} indicating whether the function should be
verbose about the different queries or not.}
\item{warnings}{A \code{logical} indicating whether to print warnings, if any.}
\item{page_size}{An \code{integer} scalar indicating the
\href{https://www.ebi.ac.uk/gwas/rest/docs/api#_paging_resources}{page}
value to be used in the JSON requests, can be between \code{1} and
\code{1000}.}
}
\value{
A \linkS4class{traits} object.
}
\description{
Gets traits whose associated publications match
\href{https://en.wikipedia.org/wiki/PubMed}{PubMed} identifiers.
}
\keyword{internal}
| /man/get_traits_by_pubmed_id.Rd | permissive | ramiromagno/gwasrapidd | R | false | true | 1,048 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_traits.R
\name{get_traits_by_pubmed_id}
\alias{get_traits_by_pubmed_id}
\title{Get GWAS Catalog traits by PubMed identifiers}
\usage{
get_traits_by_pubmed_id(
pubmed_id = NULL,
verbose = FALSE,
warnings = TRUE,
page_size = 20L
)
}
\arguments{
\item{pubmed_id}{An \code{integer} vector of
\href{https://en.wikipedia.org/wiki/PubMed}{PubMed} identifiers.}
\item{verbose}{A \code{logical} indicating whether the function should be
verbose about the different queries or not.}
\item{warnings}{A \code{logical} indicating whether to print warnings, if any.}
\item{page_size}{An \code{integer} scalar indicating the
\href{https://www.ebi.ac.uk/gwas/rest/docs/api#_paging_resources}{page}
value to be used in the JSON requests, can be between \code{1} and
\code{1000}.}
}
\value{
A \linkS4class{traits} object.
}
\description{
Gets traits whose associated publications match
\href{https://en.wikipedia.org/wiki/PubMed}{PubMed} identifiers.
}
\keyword{internal}
|
# install a loads needed libraries
installif <- function(p) {
if (!p %in% rownames(installed.packages()))
install.packages(p)
TRUE
}
sapply(c("dplyr", "lubridate"), installif)
library(dplyr)
library(lubridate)
# fetch the zip file from internet and extract content (if needed)
fileUrl <-
"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
destZipFile <- './EPC.ZIP'
if (!file.exists(destZipFile)) {
download.file(fileUrl, destfile = destZipFile, method = "curl")
}
if (!file.exists("./household_power_consumption.txt"))
unzip(destZipFile)
energy <- read.table(
"./household_power_consumption.txt",
sep = ";",
col.names = c(
"Date",
"Time",
"Global_active_power",
"Global_reactive_power",
"Voltage",
"Global_intensity",
"Sub_metering_1",
"Sub_metering_2",
"Sub_metering_3"
),
colClasses = c(
"character",
"character",
"double",
"double",
"double",
"double",
"double",
"double",
"double"
),
header = FALSE,
skip = 21000,
nrows = 54000,
na.strings = c("?")
)
parsed <- energy %>%
## skipping to read dates from 2007/02/01 to 2007/02/02 aprox.
filter(grepl("0?[1|2]/0?2/2007", Date)) %>%
mutate(dt = dmy_hms(paste(Date, Time))) %>%
select(-c(Date, Time))
dim(parsed) ## 2880 observations = 1 sample x min for 2 days
limits <-
with(parsed, range(c(
Sub_metering_1, Sub_metering_2, Sub_metering_3
)))
png(file = "plot4.png",
width = 480,
height = 480)
par(mfrow = c(2, 2)) # creates a grid of 2x2
###### chart 1
with(
parsed,
plot(
dt,
Global_active_power,
xlab = "",
ylab = "Global Active Power (kilowatts)",
type = "l"
)
)
###### chart 2
with(parsed,
plot(
dt,
Voltage,
xlab = "datetime",
ylab = "Voltage",
type = "l"
))
###### chart 3
# submetering 1
par(new = "F", xaxt = "s", yaxt = "s")
with(
parsed,
plot(
dt,
Sub_metering_1,
ylim = limits,
col = "black",
xlab = "",
ylab = "Energy sub metering",
type = "l"
)
)
# submetering 2
par(new = T)
with(parsed,
plot(
dt,
Sub_metering_2,
ylim = limits,
type = "l",
col = "red",
xlab = "",
ylab = ""
))
# submetering 3
par(new = T)
with(parsed,
plot(
dt,
Sub_metering_3,
ylim = limits,
type = "l",
col = "blue",
xlab = "",
ylab = ""
))
# add legends
legend(
x = "topright",
col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lwd = 1
)
###### chart 4
with(
parsed,
plot(
dt,
Global_reactive_power,
xlab = "datetime",
ylab = "Global_reactive_power",
type = "l"
)
)
dev.off() | /plot4.R | no_license | dwerbam/ExData_Plotting1 | R | false | false | 3,059 | r | # install a loads needed libraries
installif <- function(p) {
if (!p %in% rownames(installed.packages()))
install.packages(p)
TRUE
}
sapply(c("dplyr", "lubridate"), installif)
library(dplyr)
library(lubridate)
# fetch the zip file from internet and extract content (if needed)
fileUrl <-
"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
destZipFile <- './EPC.ZIP'
if (!file.exists(destZipFile)) {
download.file(fileUrl, destfile = destZipFile, method = "curl")
}
if (!file.exists("./household_power_consumption.txt"))
unzip(destZipFile)
energy <- read.table(
"./household_power_consumption.txt",
sep = ";",
col.names = c(
"Date",
"Time",
"Global_active_power",
"Global_reactive_power",
"Voltage",
"Global_intensity",
"Sub_metering_1",
"Sub_metering_2",
"Sub_metering_3"
),
colClasses = c(
"character",
"character",
"double",
"double",
"double",
"double",
"double",
"double",
"double"
),
header = FALSE,
skip = 21000,
nrows = 54000,
na.strings = c("?")
)
parsed <- energy %>%
## skipping to read dates from 2007/02/01 to 2007/02/02 aprox.
filter(grepl("0?[1|2]/0?2/2007", Date)) %>%
mutate(dt = dmy_hms(paste(Date, Time))) %>%
select(-c(Date, Time))
dim(parsed) ## 2880 observations = 1 sample x min for 2 days
limits <-
with(parsed, range(c(
Sub_metering_1, Sub_metering_2, Sub_metering_3
)))
png(file = "plot4.png",
width = 480,
height = 480)
par(mfrow = c(2, 2)) # creates a grid of 2x2
###### chart 1
with(
parsed,
plot(
dt,
Global_active_power,
xlab = "",
ylab = "Global Active Power (kilowatts)",
type = "l"
)
)
###### chart 2
with(parsed,
plot(
dt,
Voltage,
xlab = "datetime",
ylab = "Voltage",
type = "l"
))
###### chart 3
# submetering 1
par(new = "F", xaxt = "s", yaxt = "s")
with(
parsed,
plot(
dt,
Sub_metering_1,
ylim = limits,
col = "black",
xlab = "",
ylab = "Energy sub metering",
type = "l"
)
)
# submetering 2
par(new = T)
with(parsed,
plot(
dt,
Sub_metering_2,
ylim = limits,
type = "l",
col = "red",
xlab = "",
ylab = ""
))
# submetering 3
par(new = T)
with(parsed,
plot(
dt,
Sub_metering_3,
ylim = limits,
type = "l",
col = "blue",
xlab = "",
ylab = ""
))
# add legends
legend(
x = "topright",
col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lwd = 1
)
###### chart 4
with(
parsed,
plot(
dt,
Global_reactive_power,
xlab = "datetime",
ylab = "Global_reactive_power",
type = "l"
)
)
dev.off() |
library(Seurat)
library(TITAN)
#library(devtools)
#install_github("JuliusCampbell/TITAN")
scBC_SO <- readRDS(file = "C:/Users/jainar/Documents/Arjun High School/High School Science Research/OHSU Internship - CEDAR/KLF4 Analysis of Chung et al dataset/scBC_SO.rds")
DimPlot(scBC_SO, reduction = "umap", label = TRUE)
Idents(scBC_SO)
MCF7_KLF4_var_genes <- read.table(file = "C:/Users/jainar/Documents/Arjun High School/High School Science Research/OHSU Internship - CEDAR/KLF4 Analysis of Chung et al dataset/Model_MCF7_KLF4_5000Variable_top50_genes_topics.txt", header = T)
T47D_KLF4_var_genes <- read.table(file = "C:/Users/jainar/Documents/Arjun High School/High School Science Research/OHSU Internship - CEDAR/KLF4 Analysis of Chung et al dataset/Model_T47D_KLF4_20T_CLR_5000Variable_M10_top50_genes_topics.txt", header = T)
DefaultAssay(scBC_SO) <- "RNA"
Model_T47D_KLF4 <- readRDS(file = "C:/Users/jainar/Documents/Arjun High School/High School Science Research/OHSU Internship - CEDAR/KLF4 Analysis of Chung et al dataset/Model_T47D_KLF4_20T_CLR_5000Variable_M10.rds")
Model_MCF7_KLF4 <- readRDS(file = "C:/Users/jainar/Documents/Arjun High School/High School Science Research/OHSU Internship - CEDAR/KLF4 Analysis of Chung et al dataset/Model_MCF7_KLF4_20T_CLR_5000Variable_M10.rds")
scBC_SO <- ImputeAndAddTopics(scBC_SO, Model_T47D_KLF4, TopicPrefix = "T47D_KLF4_Topics")
HeatmapTopic(Object = scBC_SO, topics = Embeddings(scBC_SO, "imputedLDA"), AnnoVector = scBC_SO@meta.data$orig.ident, AnnoName = "Timepoint")
| /KLF4 Analysis of Chung et al data.R | no_license | arjun0502/CEDAR | R | false | false | 1,549 | r | library(Seurat)
library(TITAN)
#library(devtools)
#install_github("JuliusCampbell/TITAN")
scBC_SO <- readRDS(file = "C:/Users/jainar/Documents/Arjun High School/High School Science Research/OHSU Internship - CEDAR/KLF4 Analysis of Chung et al dataset/scBC_SO.rds")
DimPlot(scBC_SO, reduction = "umap", label = TRUE)
Idents(scBC_SO)
MCF7_KLF4_var_genes <- read.table(file = "C:/Users/jainar/Documents/Arjun High School/High School Science Research/OHSU Internship - CEDAR/KLF4 Analysis of Chung et al dataset/Model_MCF7_KLF4_5000Variable_top50_genes_topics.txt", header = T)
T47D_KLF4_var_genes <- read.table(file = "C:/Users/jainar/Documents/Arjun High School/High School Science Research/OHSU Internship - CEDAR/KLF4 Analysis of Chung et al dataset/Model_T47D_KLF4_20T_CLR_5000Variable_M10_top50_genes_topics.txt", header = T)
DefaultAssay(scBC_SO) <- "RNA"
Model_T47D_KLF4 <- readRDS(file = "C:/Users/jainar/Documents/Arjun High School/High School Science Research/OHSU Internship - CEDAR/KLF4 Analysis of Chung et al dataset/Model_T47D_KLF4_20T_CLR_5000Variable_M10.rds")
Model_MCF7_KLF4 <- readRDS(file = "C:/Users/jainar/Documents/Arjun High School/High School Science Research/OHSU Internship - CEDAR/KLF4 Analysis of Chung et al dataset/Model_MCF7_KLF4_20T_CLR_5000Variable_M10.rds")
scBC_SO <- ImputeAndAddTopics(scBC_SO, Model_T47D_KLF4, TopicPrefix = "T47D_KLF4_Topics")
HeatmapTopic(Object = scBC_SO, topics = Embeddings(scBC_SO, "imputedLDA"), AnnoVector = scBC_SO@meta.data$orig.ident, AnnoName = "Timepoint")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/var_dif_fis.R
\name{var_dif_fis}
\alias{var_dif_fis}
\title{var_dif_fis}
\usage{
var_dif_fis(n, r, rho)
}
\arguments{
\item{n}{A numerical value specifying the total sample size of a primary study}
\item{r}{A numerical value specifying the Pearson correlation coefficient
between variables h and m (see Details)}
\item{rho}{A numerical value specifying the Pearson correlation coefficient
between variables l and h and variables h and m (see Details)}
}
\value{
The \code{var_dif_fis} function returns a numerical value that is the
variance of the difference of two overlapping Fisher-z transformed correlations
given n, r, and rho.
}
\description{
Function for computing the variance of the difference between two overlapping
Fisher-z transformed correlation coefficients.
}
\details{
In case of three variables (l, h, and m), overlapping Fisher-z
transformed correlation coefficients can be computed between variables l and h
and variables l and m. The function computes the variance of the difference
between these two overlapping Fisher-z transformed correlations. For a derivation
of this variance see van Aert & Wicherts (2020).
The variance that is computed with this function can be used to correct for
outcome reporting bias by including the variance as a moderator in a
(multivariate) meta-analysis. Please see van Aert & Wicherts (2020) for
more information.
}
\examples{
### Compute variance for an artificial example
var_dif_fis(n = 100, r = 0.3, rho = 0.5)
}
\references{
van Aert, R.C.M. & Wicherts, J.M. (2020). Correcting for outcome
reporting bias in a meta-analysis: A meta-regression approach. Manuscript
submitted for publication.
}
\author{
Robbie C.M. van Aert \email{R.C.M.vanAert@tilburguniversity.edu}
}
| /puniform/man/var_dif_fis.Rd | no_license | akhikolla/TestedPackages-NoIssues | R | false | true | 1,877 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/var_dif_fis.R
\name{var_dif_fis}
\alias{var_dif_fis}
\title{var_dif_fis}
\usage{
var_dif_fis(n, r, rho)
}
\arguments{
\item{n}{A numerical value specifying the total sample size of a primary study}
\item{r}{A numerical value specifying the Pearson correlation coefficient
between variables h and m (see Details)}
\item{rho}{A numerical value specifying the Pearson correlation coefficient
between variables l and h and variables h and m (see Details)}
}
\value{
The \code{var_dif_fis} function returns a numerical value that is the
variance of the difference of two overlapping Fisher-z transformed correlations
given n, r, and rho.
}
\description{
Function for computing the variance of the difference between two overlapping
Fisher-z transformed correlation coefficients.
}
\details{
In case of three variables (l, h, and m), overlapping Fisher-z
transformed correlation coefficients can be computed between variables l and h
and variables l and m. The function computes the variance of the difference
between these two overlapping Fisher-z transformed correlations. For a derivation
of this variance see van Aert & Wicherts (2020).
The variance that is computed with this function can be used to correct for
outcome reporting bias by including the variance as a moderator in a
(multivariate) meta-analysis. Please see van Aert & Wicherts (2020) for
more information.
}
\examples{
### Compute variance for an artificial example
var_dif_fis(n = 100, r = 0.3, rho = 0.5)
}
\references{
van Aert, R.C.M. & Wicherts, J.M. (2020). Correcting for outcome
reporting bias in a meta-analysis: A meta-regression approach. Manuscript
submitted for publication.
}
\author{
Robbie C.M. van Aert \email{R.C.M.vanAert@tilburguniversity.edu}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/py_func.R
\name{py_func}
\alias{py_func}
\title{Wrap an R function in a Python function with the same signature.}
\usage{
py_func(f)
}
\arguments{
\item{f}{An R function}
}
\value{
A Python function that calls the R function \code{f} with the same signature.
}
\description{
This function could wrap an R function in a Python function with
the same signature. Note that the signature of the R function
must not contain esoteric Python-incompatible constructs.
}
| /man/py_func.Rd | permissive | rstudio/reticulate | R | false | true | 540 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/py_func.R
\name{py_func}
\alias{py_func}
\title{Wrap an R function in a Python function with the same signature.}
\usage{
py_func(f)
}
\arguments{
\item{f}{An R function}
}
\value{
A Python function that calls the R function \code{f} with the same signature.
}
\description{
This function could wrap an R function in a Python function with
the same signature. Note that the signature of the R function
must not contain esoteric Python-incompatible constructs.
}
|
# library(readr)
# library(magrittr)
library(pins)
pin_from_bucket <- function(
filename,
prefix = c("practice-level", "ONS-postcodes"),
pin_name = filename
) {
prefix <- match.arg(prefix)
base_url <- "https://nhs-prescription-data.s3-us-west-2.amazonaws.com"
bucket_url <- paste(base_url, prefix, filename, sep = "/")
pins::pin(bucket_url, name = filename)
filename
}
| /R/pin_data.R | no_license | andrie/nhs_prescriptions | R | false | false | 390 | r | # library(readr)
# library(magrittr)
library(pins)
pin_from_bucket <- function(
filename,
prefix = c("practice-level", "ONS-postcodes"),
pin_name = filename
) {
prefix <- match.arg(prefix)
base_url <- "https://nhs-prescription-data.s3-us-west-2.amazonaws.com"
bucket_url <- paste(base_url, prefix, filename, sep = "/")
pins::pin(bucket_url, name = filename)
filename
}
|
\name{gts.hierarchy}
\alias{gts.hierarchy}
\title{
Get the broader and narrower concept of one geological time concept in database
}
\description{Get the broader and narrower concept of one geological time concept in database
}
\usage{
gts.hierarchy(geoConcept, region = NULL, iscVersion = NULL, prefix = NULL, graph = NULL)
}
\arguments{
\item{geoConcept}{
[character] Geological time concept, eg. "Cambrian"
}
\item{region}{
[character] region of the geologcial time concept. The options are: "International",
"North America", "South China", "North China",
"West Europe", "Britain", "New Zealand",
"Japan", "Baltoscania", "Australia". [If no input of this, treat it as all regions including the global one]
}
\item{iscVersion}{
[character] Geological time concept, eg. "isc2018-08". See gts.iscSchemes() for all ISC versions.
}
\item{prefix}{
[character] prefix for SPARQL querying. [Optional, default is NULL]
}
\item{graph}{
[character] GRAPH for SPARQL querying. [Optional, default is NULL]
}
}
\references{
}
\examples{
gts.hierarchy("Jurassic")
gts.hierarchy("Harju")
gts.hierarchy("Wordian") # not narrowerConcept
gts.hierarchy("Precambrian") # no broaderCocnept
}
| /R_Functions/man/gts.hierarchy.Rd | permissive | xgmachina/DeepTimeKB | R | false | false | 1,358 | rd | \name{gts.hierarchy}
\alias{gts.hierarchy}
\title{
Get the broader and narrower concept of one geological time concept in database
}
\description{Get the broader and narrower concept of one geological time concept in database
}
\usage{
gts.hierarchy(geoConcept, region = NULL, iscVersion = NULL, prefix = NULL, graph = NULL)
}
\arguments{
\item{geoConcept}{
[character] Geological time concept, eg. "Cambrian"
}
\item{region}{
[character] region of the geologcial time concept. The options are: "International",
"North America", "South China", "North China",
"West Europe", "Britain", "New Zealand",
"Japan", "Baltoscania", "Australia". [If no input of this, treat it as all regions including the global one]
}
\item{iscVersion}{
[character] Geological time concept, eg. "isc2018-08". See gts.iscSchemes() for all ISC versions.
}
\item{prefix}{
[character] prefix for SPARQL querying. [Optional, default is NULL]
}
\item{graph}{
[character] GRAPH for SPARQL querying. [Optional, default is NULL]
}
}
\references{
}
\examples{
gts.hierarchy("Jurassic")
gts.hierarchy("Harju")
gts.hierarchy("Wordian") # not narrowerConcept
gts.hierarchy("Precambrian") # no broaderCocnept
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trimFilter.R
\name{filterWidth}
\alias{filterWidth}
\title{Trim reads containing too few bases}
\usage{
filterWidth(threshold = 14L, .name = "WidthFilter")
}
\description{
Trim reads containing too few bases
}
| /man/filterWidth.Rd | no_license | jliu678/SeqWins | R | false | true | 288 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trimFilter.R
\name{filterWidth}
\alias{filterWidth}
\title{Trim reads containing too few bases}
\usage{
filterWidth(threshold = 14L, .name = "WidthFilter")
}
\description{
Trim reads containing too few bases
}
|
# Find Inter Class Correlation between factor and continuous covariates
# Inspired from http://stats.stackexchange.com/questions/108007/correlations-with-categorical-variables
getFactorContAssociationStatistics <- function(factorContNames,COVARIATES, na.action='remove',
alpha = 0.05){
require(psych)
if (na.action == "remove")
COVARIATES = na.omit(COVARIATES[,factorContNames])
stats = ICC(COVARIATES[,factorContNames], alpha = alpha)
Pval = summary(aov(COVARIATES[,factorContNames[1]]~COVARIATES[,factorContNames[2]]))[[1]][["Pr(>F)"]][1]
return(c(Estimate = stats$results['Single_raters_absolute','ICC'],
Pval = Pval))
}
| /code/R/lib/getFactorContAssociationStatistics.R | permissive | kdaily/pcbc_c4_analysis | R | false | false | 715 | r | # Find Inter Class Correlation between factor and continuous covariates
# Inspired from http://stats.stackexchange.com/questions/108007/correlations-with-categorical-variables
getFactorContAssociationStatistics <- function(factorContNames,COVARIATES, na.action='remove',
alpha = 0.05){
require(psych)
if (na.action == "remove")
COVARIATES = na.omit(COVARIATES[,factorContNames])
stats = ICC(COVARIATES[,factorContNames], alpha = alpha)
Pval = summary(aov(COVARIATES[,factorContNames[1]]~COVARIATES[,factorContNames[2]]))[[1]][["Pr(>F)"]][1]
return(c(Estimate = stats$results['Single_raters_absolute','ICC'],
Pval = Pval))
}
|
\docType{data}
\name{capitanes}
\alias{capitanes}
\title{Tabla de capitanes}
\format{Un data frame con 3.504 filas y 10 columnas
\describe{
\item{id_jugador}{ID del jugador}
\item{anio}{Año}
\item{id_equipo}{ID equipo (factor)}
\item{id_liga}{ID liga (factor)}
\item{en_temporada}{Cero si fue capitán del equipo al equipo toda la temporada. En otro caso denota el orden de entrada en la temporada (uno si fue el primer capitán del equipo que entró esa temporada, dos si fue el segundo capitán del equipo que entró esa temporada, etc)}
\item{juegos}{Juegos dirigidos}
\item{juegos_ganados}{Juegos ganados}
\item{juegos_perdidos}{Juegos perdidos}
\item{posicion}{Posición del equipo en la clasificación final del año}
\item{jugador_representado}{Tiene valor "S" (sí) para los jugadores estuvieron de capitán del equipo durante la temporada y "N" (no) en caso contrario (factor).}
}}
\description{Información de los equipos que dirigieron y algunas estadísticas básicas de los equipos en cada año.}
\keyword{datasets}
| /man/capitanes.rd | permissive | cienciadedatos/datos | R | false | false | 1,030 | rd | \docType{data}
\name{capitanes}
\alias{capitanes}
\title{Tabla de capitanes}
\format{Un data frame con 3.504 filas y 10 columnas
\describe{
\item{id_jugador}{ID del jugador}
\item{anio}{Año}
\item{id_equipo}{ID equipo (factor)}
\item{id_liga}{ID liga (factor)}
\item{en_temporada}{Cero si fue capitán del equipo al equipo toda la temporada. En otro caso denota el orden de entrada en la temporada (uno si fue el primer capitán del equipo que entró esa temporada, dos si fue el segundo capitán del equipo que entró esa temporada, etc)}
\item{juegos}{Juegos dirigidos}
\item{juegos_ganados}{Juegos ganados}
\item{juegos_perdidos}{Juegos perdidos}
\item{posicion}{Posición del equipo en la clasificación final del año}
\item{jugador_representado}{Tiene valor "S" (sí) para los jugadores estuvieron de capitán del equipo durante la temporada y "N" (no) en caso contrario (factor).}
}}
\description{Información de los equipos que dirigieron y algunas estadísticas básicas de los equipos en cada año.}
\keyword{datasets}
|
\name{imageanalysisBrain-package}
\alias{imageanalysisBrain-package}
\alias{imageanalysisBrain}
\docType{package}
\title{
\packageTitle{imageanalysisBrain}
}
\description{
\packageDescription{imageanalysisBrain}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{imageanalysisBrain}
\packageIndices{imageanalysisBrain}
}
\author{
\packageAuthor{imageanalysisBrain}
Maintainer: \packageMaintainer{imageanalysisBrain}
}
\keyword{ package }
| /man/imageanalysisBrain-package.Rd | no_license | mknoll/imageanalysisBrain | R | false | false | 443 | rd | \name{imageanalysisBrain-package}
\alias{imageanalysisBrain-package}
\alias{imageanalysisBrain}
\docType{package}
\title{
\packageTitle{imageanalysisBrain}
}
\description{
\packageDescription{imageanalysisBrain}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{imageanalysisBrain}
\packageIndices{imageanalysisBrain}
}
\author{
\packageAuthor{imageanalysisBrain}
Maintainer: \packageMaintainer{imageanalysisBrain}
}
\keyword{ package }
|
de9a6782f378655603c4fbc189c93495 incrementer-enc07-nonuniform-depth-6.qdimacs 4964 12846 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Miller-Marin/incrementer-encoder/incrementer-enc07-nonuniform-depth-6/incrementer-enc07-nonuniform-depth-6.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 88 | r | de9a6782f378655603c4fbc189c93495 incrementer-enc07-nonuniform-depth-6.qdimacs 4964 12846 |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SmokePlot.R
\name{SmokePlot}
\alias{SmokePlot}
\title{Make smoke plot}
\usage{
SmokePlot(
x.PlaceboPool.full = SCUL.inference$y.placebo.StandardizedDifference.Full,
x.PlaceboPool.CohensD = SCUL.inference$y.placebo.CohensD,
TreatmentBeginsAt = SCUL.input$TreatmentBeginsAt,
OutputFilePath = SCUL.input$OutputFilePath,
CohensD = SCUL.input$CohensDThreshold,
y.actual = SCUL.output$y.actual,
y.scul = SCUL.output$y.scul,
fig.title = "Standardized differences of target compared\\n and each placebo",
custom.alpha = 0.33,
save.figure = FALSE
)
}
\arguments{
\item{x.PlaceboPool.full}{A (T by L), where L<=J) data frame containing all products that are included in the placebo distribution
Default is SCUL.inference$y.placebo.StandardizedDifference.Full}
\item{x.PlaceboPool.CohensD}{A (1 by L) data frame containing all pre-period Cohen's D fit statistic for each placebo unit.
Default is SCUL.inference$y.placebo.CohensD,}
\item{TreatmentBeginsAt}{An integer indicating which row begins treatment. Default is SCUL.output$TreatmentBeginsAt.}
\item{OutputFilePath}{Output file path. Default is SCUL.input$OutputFilePath.}
\item{CohensD}{A real number greater than 0, indicating the Cohen's D threshold at which
fit is determined to be "poor". The difference is in standard deviation units. Default is SCUL.input$CohensDThreshold.}
\item{y.actual}{The actual (target) data. Default is SCUL.output$y.actual.}
\item{y.scul}{Synthetic data created by SCUL procedure. Default is SCUL.output$y.scul.}
\item{fig.title}{Title of smoke-plot. Default is "Standardized difference for target variable compared to standardized difference for each placebo"}
\item{custom.alpha}{Choose transparancy of placebo pool lines. Default is .33.}
\item{save.figure}{Boolean, set to TRUE if you want output saved as figure to OutputFilePath automatically. Default is FALSE}
}
\value{
graph A smoke plot of the standardized effect size compared to placbos.
}
\description{
Plot standardized differences of all placebo goods and target good.
}
| /man/SmokePlot.Rd | permissive | hollina/scul | R | false | true | 2,124 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SmokePlot.R
\name{SmokePlot}
\alias{SmokePlot}
\title{Make smoke plot}
\usage{
SmokePlot(
x.PlaceboPool.full = SCUL.inference$y.placebo.StandardizedDifference.Full,
x.PlaceboPool.CohensD = SCUL.inference$y.placebo.CohensD,
TreatmentBeginsAt = SCUL.input$TreatmentBeginsAt,
OutputFilePath = SCUL.input$OutputFilePath,
CohensD = SCUL.input$CohensDThreshold,
y.actual = SCUL.output$y.actual,
y.scul = SCUL.output$y.scul,
fig.title = "Standardized differences of target compared\\n and each placebo",
custom.alpha = 0.33,
save.figure = FALSE
)
}
\arguments{
\item{x.PlaceboPool.full}{A (T by L), where L<=J) data frame containing all products that are included in the placebo distribution
Default is SCUL.inference$y.placebo.StandardizedDifference.Full}
\item{x.PlaceboPool.CohensD}{A (1 by L) data frame containing all pre-period Cohen's D fit statistic for each placebo unit.
Default is SCUL.inference$y.placebo.CohensD,}
\item{TreatmentBeginsAt}{An integer indicating which row begins treatment. Default is SCUL.output$TreatmentBeginsAt.}
\item{OutputFilePath}{Output file path. Default is SCUL.input$OutputFilePath.}
\item{CohensD}{A real number greater than 0, indicating the Cohen's D threshold at which
fit is determined to be "poor". The difference is in standard deviation units. Default is SCUL.input$CohensDThreshold.}
\item{y.actual}{The actual (target) data. Default is SCUL.output$y.actual.}
\item{y.scul}{Synthetic data created by SCUL procedure. Default is SCUL.output$y.scul.}
\item{fig.title}{Title of smoke-plot. Default is "Standardized difference for target variable compared to standardized difference for each placebo"}
\item{custom.alpha}{Choose transparancy of placebo pool lines. Default is .33.}
\item{save.figure}{Boolean, set to TRUE if you want output saved as figure to OutputFilePath automatically. Default is FALSE}
}
\value{
graph A smoke plot of the standardized effect size compared to placbos.
}
\description{
Plot standardized differences of all placebo goods and target good.
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 1440
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1440
c
c Input Parameter (command line, file):
c input filename QBFLIB/Letombe/renHorn/renHorn_400CNF1440_2aQBF_95.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 400
c no.of clauses 1440
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 1440
c
c QBFLIB/Letombe/renHorn/renHorn_400CNF1440_2aQBF_95.qdimacs 400 1440 E1 [] 0 220 180 1440 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Letombe/renHorn/renHorn_400CNF1440_2aQBF_95/renHorn_400CNF1440_2aQBF_95.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 645 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 1440
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1440
c
c Input Parameter (command line, file):
c input filename QBFLIB/Letombe/renHorn/renHorn_400CNF1440_2aQBF_95.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 400
c no.of clauses 1440
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 1440
c
c QBFLIB/Letombe/renHorn/renHorn_400CNF1440_2aQBF_95.qdimacs 400 1440 E1 [] 0 220 180 1440 NONE
|
\name{cox.mcmc}
\alias{cox.mcmc}
\title{
Cox Model Markov Chain Monte Carlo
}
\description{
This sampler function implements a derivative based MCMC algorithm for
flexible Cox models with structured additive predictors.
}
\usage{
cox.mcmc(x, y, family, start, weights, offset,
n.iter = 1200, burnin = 200, thin = 1,
verbose = TRUE, digits = 4, step = 20, ...)
}
\arguments{
\item{x}{The \code{x} list, as returned from function
\code{\link{bamlss.frame}} and transformed by function \code{\link{surv.transform}},
holding all model matrices and other information that is used for
fitting the model.}
\item{y}{The model response, as returned from function \code{\link{bamlss.frame}}.}
\item{family}{A \pkg{bamlss} family object, see \code{\link{family.bamlss}}.
In this case this is the \code{\link{cox_bamlss}} family object.}
\item{start}{A named numeric vector containing possible starting values, the names are based on
function \code{\link{parameters}}.}
\item{weights}{Prior weights on the data, as returned from function \code{\link{bamlss.frame}}.}
\item{offset}{Can be used to supply model offsets for use in fitting,
returned from function \code{\link{bamlss.frame}}.}
\item{n.iter}{Sets the number of MCMC iterations.}
\item{burnin}{Sets the burn-in phase of the sampler, i.e., the number of starting samples that
should be removed.}
\item{thin}{Defines the thinning parameter for MCMC simulation. E.g., \code{thin = 10} means,
that only every 10th sampled parameter will be stored.}
\item{verbose}{Print information during runtime of the algorithm.}
\item{digits}{Set the digits for printing when \code{verbose = TRUE}.}
\item{step}{How many times should algorithm runtime information be printed, divides \code{n.iter}.}
\item{\dots}{Currently not used.}
}
\details{
The sampler uses derivative based proposal functions to create samples of parameters.
For time-dependent functions the proposals are based on one Newton-Raphson iteration centered
at the last state, while for the time-constant functions proposals can be based
on iteratively reweighted least squares (IWLS), see also function \code{\link{GMCMC}}.
The integrals that are part of the time-dependent function updates are solved numerically.
In addition, smoothing variances are sampled using slice sampling.
}
\value{
The function returns samples of parameters. The samples are provided as a
\code{\link[coda]{mcmc}} matrix.
}
\references{
Umlauf N, Klein N, Zeileis A (2016). Bayesian Additive Models for Location
Scale and Shape (and Beyond). \emph{(to appear)}
}
\seealso{
\code{\link{cox.mcmc}}, \code{\link{cox_bamlss}}, \code{\link{surv.transform}},
\code{\link{simSurv}}, \code{\link{bamlss}}
}
\examples{
\dontrun{library("survival")
set.seed(123)
## Simulate survival data.
d <- simSurv(n = 500)
## Formula of the survival model, note
## that the baseline is given in the first formula by s(time).
f <- list(
Surv(time, event) ~ s(time) + s(time, by = x3),
gamma ~ s(x1) + s(x2)
)
## Cox model with continuous time.
## Note the the family object cox_bamlss() sets
## the default optimizer and sampler function!
## First, posterior mode estimates are computed
## using function cox.mode(), afterwards the
## sampler cox.mcmc() is started.
b <- bamlss(f, family = "cox", data = d)
## Plot estimated effects.
plot(b)
}
}
\keyword{regression}
\keyword{survival}
| /man/cox.mcmc.Rd | no_license | baydoganm/bamlss | R | false | false | 3,549 | rd | \name{cox.mcmc}
\alias{cox.mcmc}
\title{
Cox Model Markov Chain Monte Carlo
}
\description{
This sampler function implements a derivative based MCMC algorithm for
flexible Cox models with structured additive predictors.
}
\usage{
cox.mcmc(x, y, family, start, weights, offset,
n.iter = 1200, burnin = 200, thin = 1,
verbose = TRUE, digits = 4, step = 20, ...)
}
\arguments{
\item{x}{The \code{x} list, as returned from function
\code{\link{bamlss.frame}} and transformed by function \code{\link{surv.transform}},
holding all model matrices and other information that is used for
fitting the model.}
\item{y}{The model response, as returned from function \code{\link{bamlss.frame}}.}
\item{family}{A \pkg{bamlss} family object, see \code{\link{family.bamlss}}.
In this case this is the \code{\link{cox_bamlss}} family object.}
\item{start}{A named numeric vector containing possible starting values, the names are based on
function \code{\link{parameters}}.}
\item{weights}{Prior weights on the data, as returned from function \code{\link{bamlss.frame}}.}
\item{offset}{Can be used to supply model offsets for use in fitting,
returned from function \code{\link{bamlss.frame}}.}
\item{n.iter}{Sets the number of MCMC iterations.}
\item{burnin}{Sets the burn-in phase of the sampler, i.e., the number of starting samples that
should be removed.}
\item{thin}{Defines the thinning parameter for MCMC simulation. E.g., \code{thin = 10} means,
that only every 10th sampled parameter will be stored.}
\item{verbose}{Print information during runtime of the algorithm.}
\item{digits}{Set the digits for printing when \code{verbose = TRUE}.}
\item{step}{How many times should algorithm runtime information be printed, divides \code{n.iter}.}
\item{\dots}{Currently not used.}
}
\details{
The sampler uses derivative based proposal functions to create samples of parameters.
For time-dependent functions the proposals are based on one Newton-Raphson iteration centered
at the last state, while for the time-constant functions proposals can be based
on iteratively reweighted least squares (IWLS), see also function \code{\link{GMCMC}}.
The integrals that are part of the time-dependent function updates are solved numerically.
In addition, smoothing variances are sampled using slice sampling.
}
\value{
The function returns samples of parameters. The samples are provided as a
\code{\link[coda]{mcmc}} matrix.
}
\references{
Umlauf N, Klein N, Zeileis A (2016). Bayesian Additive Models for Location
Scale and Shape (and Beyond). \emph{(to appear)}
}
\seealso{
\code{\link{cox.mcmc}}, \code{\link{cox_bamlss}}, \code{\link{surv.transform}},
\code{\link{simSurv}}, \code{\link{bamlss}}
}
\examples{
\dontrun{library("survival")
set.seed(123)
## Simulate survival data.
d <- simSurv(n = 500)
## Formula of the survival model, note
## that the baseline is given in the first formula by s(time).
f <- list(
Surv(time, event) ~ s(time) + s(time, by = x3),
gamma ~ s(x1) + s(x2)
)
## Cox model with continuous time.
## Note the the family object cox_bamlss() sets
## the default optimizer and sampler function!
## First, posterior mode estimates are computed
## using function cox.mode(), afterwards the
## sampler cox.mcmc() is started.
b <- bamlss(f, family = "cox", data = d)
## Plot estimated effects.
plot(b)
}
}
\keyword{regression}
\keyword{survival}
|
\alias{gFileMountMountableFinish}
\name{gFileMountMountableFinish}
\title{gFileMountMountableFinish}
\description{Finishes a mount operation. See \code{\link{gFileMountMountable}} for details.}
\usage{gFileMountMountableFinish(object, result, .errwarn = TRUE)}
\arguments{
\item{\verb{object}}{input \code{\link{GFile}}.}
\item{\verb{result}}{a \code{\link{GAsyncResult}}.}
\item{.errwarn}{Whether to issue a warning on error or fail silently}
}
\details{Finish an asynchronous mount operation that was started
with \code{\link{gFileMountMountable}}.}
\value{
A list containing the following elements:
\item{retval}{[\code{\link{GFile}}] a \code{\link{GFile}} or \code{NULL} on error.}
\item{\verb{error}}{a \code{\link{GError}}, or \code{NULL}}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /RGtk2/man/gFileMountMountableFinish.Rd | no_license | lawremi/RGtk2 | R | false | false | 820 | rd | \alias{gFileMountMountableFinish}
\name{gFileMountMountableFinish}
\title{gFileMountMountableFinish}
\description{Finishes a mount operation. See \code{\link{gFileMountMountable}} for details.}
\usage{gFileMountMountableFinish(object, result, .errwarn = TRUE)}
\arguments{
\item{\verb{object}}{input \code{\link{GFile}}.}
\item{\verb{result}}{a \code{\link{GAsyncResult}}.}
\item{.errwarn}{Whether to issue a warning on error or fail silently}
}
\details{Finish an asynchronous mount operation that was started
with \code{\link{gFileMountMountable}}.}
\value{
A list containing the following elements:
\item{retval}{[\code{\link{GFile}}] a \code{\link{GFile}} or \code{NULL} on error.}
\item{\verb{error}}{a \code{\link{GError}}, or \code{NULL}}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
## ----setup, echo=FALSE, results="asis"----------------------------------------
library(rebook)
chapterPreamble()
## ---- include = FALSE---------------------------------------------------------
library(ggplot2)
theme_set(theme_classic())
library(mia)
library(scater)
library(patchwork)
library(miaViz)
library(sechm)
library(reshape2)
library(pheatmap)
library(ape)
library(ggtree)
# essential data
data("GlobalPatterns", package = "mia")
tse <- GlobalPatterns
## -----------------------------------------------------------------------------
# list row meta data
names(rowData(tse))
# list column meta data
names(colData(tse))
## ---- warning = FALSE---------------------------------------------------------
# obtain QC data
tse <- addPerCellQC(tse)
tse <- addPerFeatureQC(tse)
# plot QC Mean against Species
plotRowData(tse, "mean", "Species") +
theme(axis.text.x = element_blank()) +
labs(x = "Species", y = "QC Mean")
# plot QC Sum against Sample ID, colour-labeled by Sample Type
plotColData(tse, "sum", "X.SampleID", colour_by = "SampleType") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
labs(x = "Sample ID", y = "QC Sum")
## -----------------------------------------------------------------------------
# store colData into a data frame
coldata <- as.data.frame(colData(tse))
# plot Number of Samples against Sampling Site
ggplot(coldata, aes(x = SampleType)) +
geom_bar(width = 0.5) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
labs(x = "Sampling Site",
y = "Number of Samples")
## -----------------------------------------------------------------------------
# estimate shannon diversity index
tse <- mia::estimateDiversity(tse,
assay.type = "counts",
index = "shannon",
name = "shannon")
# plot shannon diversity index, colour-labeled by Sample Type
plotColData(tse, "shannon", colour_by = "SampleType")
## -----------------------------------------------------------------------------
# estimate faith diversity index
tse <- mia::estimateFaith(tse,
assay.type = "counts")
# store colData into a data frame
coldata <- as.data.frame(colData(tse))
# generate plots for shannon and faith indices
# and store them into a list
plots <- lapply(c("shannon", "faith"),
function(i) ggplot(coldata, aes_string(y = i)) +
geom_boxplot() +
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank()))
# combine plots with patchwork
plots[[1]] + plots[[2]]
## -----------------------------------------------------------------------------
# perform NMDS coordination method
tse <- runNMDS(tse,
FUN = vegan::vegdist,
name = "NMDS")
# plot results of a 2-component NMDS on tse,
# coloured-scaled by shannon diversity index
plotReducedDim(tse, "NMDS", colour_by = "shannon")
## -----------------------------------------------------------------------------
# perform MDS coordination method
tse <- runMDS(tse,
FUN = vegan::vegdist,
method = "bray",
name = "MDS",
assay.type = "counts",
ncomponents = 3)
# plot results of a 3-component MDS on tse,
# coloured-scaled by faith diversity index
plotReducedDim(tse, "MDS", ncomponents = c(1:3), colour_by = "faith")
## -----------------------------------------------------------------------------
# generate plots for MDS and NMDS methods
# and store them into a list
plots <- lapply(c("MDS", "NMDS"),
plotReducedDim,
object = tse,
colour_by = "shannon")
# combine plots with patchwork
plots[[1]] + plots[[2]] +
plot_layout(guides = "collect")
## ----plotAbundance1-----------------------------------------------------------
# agglomerate tse by Order
tse_order <- mergeFeaturesByRank(tse,
rank = "Order",
onRankOnly = TRUE)
# transform counts into relative abundance
tse_order <- transformAssay(tse_order,
assay.type = "counts",
method = "relabundance")
# get top orders
top_taxa <- getTopFeatures(tse_order,
top = 10,
assay.type = "relabundance")
# leave only names for top 10 orders and label the rest with "Other"
order_renamed <- lapply(rowData(tse_order)$Order,
function(x){if (x %in% top_taxa) {x} else {"Other"}})
rowData(tse_order)$Order <- as.character(order_renamed)
# plot composition as a bar plot
plotAbundance(tse_order,
assay.type = "relabundance",
rank = "Order",
order_rank_by = "abund",
order_sample_by = "Clostridiales")
## ----plotAbundance2-----------------------------------------------------------
# Create plots
plots <- plotAbundance(tse_order,
assay.type = "relabundance",
rank = "Order",
order_rank_by = "abund",
order_sample_by = "Clostridiales",
features = "SampleType")
# Modify the legend of the first plot to be smaller
plots[[1]] <- plots[[1]] +
theme(legend.key.size = unit(0.3, 'cm'),
legend.text = element_text(size = 6),
legend.title = element_text(size = 8))
# Modify the legend of the second plot to be smaller
plots[[2]] <- plots[[2]] +
theme(legend.key.height = unit(0.3, 'cm'),
legend.key.width = unit(0.3, 'cm'),
legend.text = element_text(size = 6),
legend.title = element_text(size = 8),
legend.direction = "vertical")
# Load required packages
if( !require("ggpubr") ){
install.packages("ggpubr")
library("ggpubr")
}
# Load required packages
if( !require("patchwork") ){
install.packages("patchwork")
library("patchwork")
}
# Combine legends
legend <- wrap_plots(as_ggplot(get_legend(plots[[1]])), as_ggplot(get_legend(plots[[2]])), ncol = 1)
# Remove legends from the plots
plots[[1]] <- plots[[1]] + theme(legend.position = "none")
plots[[2]] <- plots[[2]] + theme(legend.position = "none", axis.title.x=element_blank())
# Combine plots
plot <- wrap_plots(plots[[2]], plots[[1]], ncol = 1, heights = c(2, 10))
# Combine the plot with the legend
wrap_plots(plot, legend, nrow = 1, widths = c(2, 1))
## ----pheatmap1----------------------------------------------------------------
# Agglomerate tse by phylum
tse_phylum <- mergeFeaturesByRank(tse,
rank = "Phylum",
onRankOnly = TRUE)
# Add clr-transformation on samples
tse_phylum <- transformAssay(tse_phylum, MARGIN = "samples", method = "clr", assay.type = "counts", pseudocount=1)
# Add z-transformation on features (taxa)
tse_phylum <- transformAssay(tse_phylum, assay.type = "clr",
MARGIN = "features",
method = "z", name = "clr_z")
# Take subset: only samples from feces, skin, or tongue
tse_phylum_subset <- tse_phylum[ , tse_phylum$SampleType %in% c("Feces", "Skin", "Tongue") ]
# Add clr-transformation
tse_phylum_subset <- transformAssay(tse_phylum_subset, method = "clr",
MARGIN="samples",
assay.type = "counts", pseudocount=1)
# Does z-transformation
tse_phylum_subset <- transformAssay(tse_phylum_subset, assay.type = "clr",
MARGIN = "features",
method = "z", name = "clr_z")
# Get n most abundant taxa, and subsets the data by them
top_taxa <- getTopFeatures(tse_phylum_subset, top = 20)
tse_phylum_subset <- tse_phylum_subset[top_taxa, ]
# Gets the assay table
mat <- assay(tse_phylum_subset, "clr_z")
# Creates the heatmap
pheatmap(mat)
## ----pheatmap2----------------------------------------------------------------
# Hierarchical clustering
taxa_hclust <- hclust(dist(mat), method = "complete")
# Creates a phylogenetic tree
taxa_tree <- as.phylo(taxa_hclust)
# Plot taxa tree
taxa_tree <- ggtree(taxa_tree) +
theme(plot.margin=margin(0,0,0,0)) # removes margins
# Get order of taxa in plot
taxa_ordered <- get_taxa_name(taxa_tree)
# to view the tree, run
# taxa_tree
## ----pheatmap3----------------------------------------------------------------
# Creates clusters
taxa_clusters <- cutree(tree = taxa_hclust, k = 3)
# Converts into data frame
taxa_clusters <- data.frame(clusters = taxa_clusters)
taxa_clusters$clusters <- factor(taxa_clusters$clusters)
# Order data so that it's same as in phylo tree
taxa_clusters <- taxa_clusters[taxa_ordered, , drop = FALSE]
# Prints taxa and their clusters
taxa_clusters
## ----pheatmap4----------------------------------------------------------------
# Adds information to rowData
rowData(tse_phylum_subset)$clusters <- taxa_clusters[order(match(rownames(taxa_clusters), rownames(tse_phylum_subset))), ]
# Prints taxa and their clusters
rowData(tse_phylum_subset)$clusters
## ----pheatmap5----------------------------------------------------------------
# Hierarchical clustering
sample_hclust <- hclust(dist(t(mat)), method = "complete")
# Creates a phylogenetic tree
sample_tree <- as.phylo(sample_hclust)
# Plot sample tree
sample_tree <- ggtree(sample_tree) + layout_dendrogram() +
theme(plot.margin=margin(0,0,0,0)) # removes margins
# Get order of samples in plot
samples_ordered <- rev(get_taxa_name(sample_tree))
# to view the tree, run
# sample_tree
# Creates clusters
sample_clusters <- factor(cutree(tree = sample_hclust, k = 3))
# Converts into data frame
sample_data <- data.frame(clusters = sample_clusters)
# Order data so that it's same as in phylo tree
sample_data <- sample_data[samples_ordered, , drop = FALSE]
# Order data based on
tse_phylum_subset <- tse_phylum_subset[ , rownames(sample_data)]
# Add sample type data
sample_data$sample_types <- unfactor(colData(tse_phylum_subset)$SampleType)
sample_data
## ----pheatmap6----------------------------------------------------------------
# Determines the scaling of colorss
# Scale colors
breaks <- seq(-ceiling(max(abs(mat))), ceiling(max(abs(mat))),
length.out = ifelse( max(abs(mat))>5, 2*ceiling(max(abs(mat))), 10 ) )
colors <- colorRampPalette(c("darkblue", "blue", "white", "red", "darkred"))(length(breaks)-1)
pheatmap(mat, annotation_row = taxa_clusters,
annotation_col = sample_data,
breaks = breaks,
color = colors)
## ----sechm--------------------------------------------------------------------
# Stores annotation colros to metadata
metadata(tse_phylum_subset)$anno_colors$SampleType <- c(Feces = "blue",
Skin = "red",
Tongue = "gray")
# Create a plot
sechm(tse_phylum_subset,
features = rownames(tse_phylum_subset),
assayName = "clr",
do.scale = TRUE,
top_annotation = c("SampleType"),
gaps_at = "SampleType",
cluster_cols = TRUE, cluster_rows = TRUE)
## ----more_complex_heatmap-----------------------------------------------------
# Add feature names to column as a factor
taxa_clusters$Feature <- rownames(taxa_clusters)
taxa_clusters$Feature <- factor(taxa_clusters$Feature, levels = taxa_clusters$Feature)
# Create annotation plot
row_annotation <- ggplot(taxa_clusters) +
geom_tile(aes(x = NA, y = Feature, fill = clusters)) +
coord_equal(ratio = 1) +
theme(
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
axis.title.y=element_blank(),
axis.title.x = element_text(angle = 90, vjust = 0.5, hjust=1),
plot.margin=margin(0,0,0,0),
) +
labs(fill = "Clusters", x = "Clusters")
# to view the notation, run
# row_annotation
# Add sample names to one of the columns
sample_data$sample <- factor(rownames(sample_data), levels = rownames(sample_data))
# Create annotation plot
sample_types_annotation <- ggplot(sample_data) +
scale_y_discrete(position = "right", expand = c(0,0)) +
geom_tile(aes(y = NA, x = sample, fill = sample_types)) +
coord_equal(ratio = 1) +
theme(
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.title.x=element_blank(),
axis.ticks.x=element_blank(),
plot.margin=margin(0,0,0,0),
axis.title.y.right = element_text(angle=0, vjust = 0.5)
) +
labs(fill = "Sample types", y = "Sample types")
# to view the notation, run
# sample_types_annotation
# Create annotation plot
sample_clusters_annotation <- ggplot(sample_data) +
scale_y_discrete(position = "right", expand = c(0,0)) +
geom_tile(aes(y = NA, x = sample, fill = clusters)) +
coord_equal(ratio = 1) +
theme(
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.title.x=element_blank(),
axis.ticks.x=element_blank(),
plot.margin=margin(0,0,0,0),
axis.title.y.right = element_text(angle=0, vjust = 0.5)
) +
labs(fill = "Clusters", y = "Clusters")
# to view the notation, run
# sample_clusters_annotation
# Order data based on clusters and sample types
mat <- mat[unfactor(taxa_clusters$Feature), unfactor(sample_data$sample)]
# ggplot requires data in melted format
melted_mat <- melt(mat)
colnames(melted_mat) <- c("Taxa", "Sample", "clr_z")
# Determines the scaling of colorss
maxval <- round(max(abs(melted_mat$clr_z)))
limits <- c(-maxval, maxval)
breaks <- seq(from = min(limits), to = max(limits), by = 0.5)
colours <- c("darkblue", "blue", "white", "red", "darkred")
heatmap <- ggplot(melted_mat) +
geom_tile(aes(x = Sample, y = Taxa, fill = clr_z)) +
theme(
axis.title.y=element_blank(),
axis.title.x=element_blank(),
axis.ticks.y=element_blank(),
axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
plot.margin=margin(0,0,0,0), # removes margins
legend.key.height= unit(1, 'cm')
) +
scale_fill_gradientn(name = "CLR + Z transform",
breaks = breaks,
limits = limits,
colours = colours) +
scale_y_discrete(position = "right")
heatmap
## ----more_complex_heatmap2, fig.width = 10, fig.height = 8, eval=FALSE--------
## library(patchwork)
##
## # Create layout
## design <- c(
## patchwork::area(3, 1, 4, 1),
## patchwork::area(1, 2, 1, 3),
## patchwork::area(2, 2, 2, 3),
## patchwork::area(3, 2, 4, 3)
## )
## # to view the design, run
## # plot(design)
##
## # Combine plots
## plot <- row_annotation + sample_clusters_annotation +
## sample_types_annotation +
## heatmap +
## plot_layout(design = design, guides = "collect",
## # Specify layout, collect legends
##
## # Adjust widths and heights to align plots.
## # When annotation plot is larger, it might not fit into
## # its column/row.
## # Then you need to make column/row larger.
##
## # Relative widths and heights of each column and row:
## # Currently, the width of the first column is 15 % and the height of
## # first two rows are 30 % the size of others
##
## # To get this work most of the times, you can adjust all sizes to be 1, i.e. equal,
## # but then the gaps between plots are larger.
## widths = c(0.15, 1, 1),
## heights = c(0.3, 0.3, 1, 1))
##
## # plot
## ----more_complex_heatmap3, fig.width = 10, fig.height = 8, eval=FALSE--------
## # Create layout
## design <- c(
## patchwork::area(4, 1, 5, 1),
## patchwork::area(4, 2, 5, 2),
## patchwork::area(1, 3, 1, 4),
## patchwork::area(2, 3, 2, 4),
## patchwork::area(3, 3, 3, 4),
## patchwork::area(4, 3, 5, 4)
## )
##
## # to view the design, run
## # plot(design)
##
## # Combine plots
## plot <- taxa_tree +
## row_annotation +
## sample_tree +
## sample_clusters_annotation +
## sample_types_annotation +
## heatmap +
## plot_layout(design = design, guides = "collect", # Specify layout, collect legends
## widths = c(0.2, 0.15, 1, 1, 1),
## heights = c(0.1, 0.15, 0.15, 0.25, 1, 1))
##
## plot
## ----sessionInfo, echo = FALSE, results = "asis"------------------------------
prettySessionInfo()
| /R/19_visualization_techniques.R | no_license | microbiome/OMA | R | false | false | 16,526 | r | ## ----setup, echo=FALSE, results="asis"----------------------------------------
library(rebook)
chapterPreamble()
## ---- include = FALSE---------------------------------------------------------
library(ggplot2)
theme_set(theme_classic())
library(mia)
library(scater)
library(patchwork)
library(miaViz)
library(sechm)
library(reshape2)
library(pheatmap)
library(ape)
library(ggtree)
# essential data
data("GlobalPatterns", package = "mia")
tse <- GlobalPatterns
## -----------------------------------------------------------------------------
# list row meta data
names(rowData(tse))
# list column meta data
names(colData(tse))
## ---- warning = FALSE---------------------------------------------------------
# obtain QC data
tse <- addPerCellQC(tse)
tse <- addPerFeatureQC(tse)
# plot QC Mean against Species
plotRowData(tse, "mean", "Species") +
theme(axis.text.x = element_blank()) +
labs(x = "Species", y = "QC Mean")
# plot QC Sum against Sample ID, colour-labeled by Sample Type
plotColData(tse, "sum", "X.SampleID", colour_by = "SampleType") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
labs(x = "Sample ID", y = "QC Sum")
## -----------------------------------------------------------------------------
# store colData into a data frame
coldata <- as.data.frame(colData(tse))
# plot Number of Samples against Sampling Site
ggplot(coldata, aes(x = SampleType)) +
geom_bar(width = 0.5) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
labs(x = "Sampling Site",
y = "Number of Samples")
## -----------------------------------------------------------------------------
# estimate shannon diversity index
tse <- mia::estimateDiversity(tse,
assay.type = "counts",
index = "shannon",
name = "shannon")
# plot shannon diversity index, colour-labeled by Sample Type
plotColData(tse, "shannon", colour_by = "SampleType")
## -----------------------------------------------------------------------------
# estimate faith diversity index
tse <- mia::estimateFaith(tse,
assay.type = "counts")
# store colData into a data frame
coldata <- as.data.frame(colData(tse))
# generate plots for shannon and faith indices
# and store them into a list
plots <- lapply(c("shannon", "faith"),
function(i) ggplot(coldata, aes_string(y = i)) +
geom_boxplot() +
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank()))
# combine plots with patchwork
plots[[1]] + plots[[2]]
## -----------------------------------------------------------------------------
# perform NMDS coordination method
tse <- runNMDS(tse,
FUN = vegan::vegdist,
name = "NMDS")
# plot results of a 2-component NMDS on tse,
# coloured-scaled by shannon diversity index
plotReducedDim(tse, "NMDS", colour_by = "shannon")
## -----------------------------------------------------------------------------
# perform MDS coordination method
tse <- runMDS(tse,
FUN = vegan::vegdist,
method = "bray",
name = "MDS",
assay.type = "counts",
ncomponents = 3)
# plot results of a 3-component MDS on tse,
# coloured-scaled by faith diversity index
plotReducedDim(tse, "MDS", ncomponents = c(1:3), colour_by = "faith")
## -----------------------------------------------------------------------------
# generate plots for MDS and NMDS methods
# and store them into a list
plots <- lapply(c("MDS", "NMDS"),
plotReducedDim,
object = tse,
colour_by = "shannon")
# combine plots with patchwork
plots[[1]] + plots[[2]] +
plot_layout(guides = "collect")
## ----plotAbundance1-----------------------------------------------------------
# agglomerate tse by Order
tse_order <- mergeFeaturesByRank(tse,
rank = "Order",
onRankOnly = TRUE)
# transform counts into relative abundance
tse_order <- transformAssay(tse_order,
assay.type = "counts",
method = "relabundance")
# get top orders
top_taxa <- getTopFeatures(tse_order,
top = 10,
assay.type = "relabundance")
# leave only names for top 10 orders and label the rest with "Other"
order_renamed <- lapply(rowData(tse_order)$Order,
function(x){if (x %in% top_taxa) {x} else {"Other"}})
rowData(tse_order)$Order <- as.character(order_renamed)
# plot composition as a bar plot
plotAbundance(tse_order,
assay.type = "relabundance",
rank = "Order",
order_rank_by = "abund",
order_sample_by = "Clostridiales")
## ----plotAbundance2-----------------------------------------------------------
# Create plots
plots <- plotAbundance(tse_order,
assay.type = "relabundance",
rank = "Order",
order_rank_by = "abund",
order_sample_by = "Clostridiales",
features = "SampleType")
# Modify the legend of the first plot to be smaller
plots[[1]] <- plots[[1]] +
theme(legend.key.size = unit(0.3, 'cm'),
legend.text = element_text(size = 6),
legend.title = element_text(size = 8))
# Modify the legend of the second plot to be smaller
plots[[2]] <- plots[[2]] +
theme(legend.key.height = unit(0.3, 'cm'),
legend.key.width = unit(0.3, 'cm'),
legend.text = element_text(size = 6),
legend.title = element_text(size = 8),
legend.direction = "vertical")
# Load required packages
if( !require("ggpubr") ){
install.packages("ggpubr")
library("ggpubr")
}
# Load required packages
if( !require("patchwork") ){
install.packages("patchwork")
library("patchwork")
}
# Combine legends
legend <- wrap_plots(as_ggplot(get_legend(plots[[1]])), as_ggplot(get_legend(plots[[2]])), ncol = 1)
# Remove legends from the plots
plots[[1]] <- plots[[1]] + theme(legend.position = "none")
plots[[2]] <- plots[[2]] + theme(legend.position = "none", axis.title.x=element_blank())
# Combine plots
plot <- wrap_plots(plots[[2]], plots[[1]], ncol = 1, heights = c(2, 10))
# Combine the plot with the legend
wrap_plots(plot, legend, nrow = 1, widths = c(2, 1))
## ----pheatmap1----------------------------------------------------------------
# Agglomerate tse by phylum
tse_phylum <- mergeFeaturesByRank(tse,
rank = "Phylum",
onRankOnly = TRUE)
# Add clr-transformation on samples
tse_phylum <- transformAssay(tse_phylum, MARGIN = "samples", method = "clr", assay.type = "counts", pseudocount=1)
# Add z-transformation on features (taxa)
tse_phylum <- transformAssay(tse_phylum, assay.type = "clr",
MARGIN = "features",
method = "z", name = "clr_z")
# Take subset: only samples from feces, skin, or tongue
tse_phylum_subset <- tse_phylum[ , tse_phylum$SampleType %in% c("Feces", "Skin", "Tongue") ]
# Add clr-transformation
tse_phylum_subset <- transformAssay(tse_phylum_subset, method = "clr",
MARGIN="samples",
assay.type = "counts", pseudocount=1)
# Does z-transformation
tse_phylum_subset <- transformAssay(tse_phylum_subset, assay.type = "clr",
MARGIN = "features",
method = "z", name = "clr_z")
# Get n most abundant taxa, and subsets the data by them
top_taxa <- getTopFeatures(tse_phylum_subset, top = 20)
tse_phylum_subset <- tse_phylum_subset[top_taxa, ]
# Gets the assay table
mat <- assay(tse_phylum_subset, "clr_z")
# Creates the heatmap
pheatmap(mat)
## ----pheatmap2----------------------------------------------------------------
# Hierarchical clustering
taxa_hclust <- hclust(dist(mat), method = "complete")
# Creates a phylogenetic tree
taxa_tree <- as.phylo(taxa_hclust)
# Plot taxa tree
taxa_tree <- ggtree(taxa_tree) +
theme(plot.margin=margin(0,0,0,0)) # removes margins
# Get order of taxa in plot
taxa_ordered <- get_taxa_name(taxa_tree)
# to view the tree, run
# taxa_tree
## ----pheatmap3----------------------------------------------------------------
# Creates clusters
taxa_clusters <- cutree(tree = taxa_hclust, k = 3)
# Converts into data frame
taxa_clusters <- data.frame(clusters = taxa_clusters)
taxa_clusters$clusters <- factor(taxa_clusters$clusters)
# Order data so that it's same as in phylo tree
taxa_clusters <- taxa_clusters[taxa_ordered, , drop = FALSE]
# Prints taxa and their clusters
taxa_clusters
## ----pheatmap4----------------------------------------------------------------
# Adds information to rowData
rowData(tse_phylum_subset)$clusters <- taxa_clusters[order(match(rownames(taxa_clusters), rownames(tse_phylum_subset))), ]
# Prints taxa and their clusters
rowData(tse_phylum_subset)$clusters
## ----pheatmap5----------------------------------------------------------------
# Hierarchical clustering
sample_hclust <- hclust(dist(t(mat)), method = "complete")
# Creates a phylogenetic tree
sample_tree <- as.phylo(sample_hclust)
# Plot sample tree
sample_tree <- ggtree(sample_tree) + layout_dendrogram() +
theme(plot.margin=margin(0,0,0,0)) # removes margins
# Get order of samples in plot
samples_ordered <- rev(get_taxa_name(sample_tree))
# to view the tree, run
# sample_tree
# Creates clusters
sample_clusters <- factor(cutree(tree = sample_hclust, k = 3))
# Converts into data frame
sample_data <- data.frame(clusters = sample_clusters)
# Order data so that it's same as in phylo tree
sample_data <- sample_data[samples_ordered, , drop = FALSE]
# Order data based on
tse_phylum_subset <- tse_phylum_subset[ , rownames(sample_data)]
# Add sample type data
sample_data$sample_types <- unfactor(colData(tse_phylum_subset)$SampleType)
sample_data
## ----pheatmap6----------------------------------------------------------------
# Determines the scaling of colorss
# Scale colors
breaks <- seq(-ceiling(max(abs(mat))), ceiling(max(abs(mat))),
length.out = ifelse( max(abs(mat))>5, 2*ceiling(max(abs(mat))), 10 ) )
colors <- colorRampPalette(c("darkblue", "blue", "white", "red", "darkred"))(length(breaks)-1)
pheatmap(mat, annotation_row = taxa_clusters,
annotation_col = sample_data,
breaks = breaks,
color = colors)
## ----sechm--------------------------------------------------------------------
# Stores annotation colros to metadata
metadata(tse_phylum_subset)$anno_colors$SampleType <- c(Feces = "blue",
Skin = "red",
Tongue = "gray")
# Create a plot
sechm(tse_phylum_subset,
features = rownames(tse_phylum_subset),
assayName = "clr",
do.scale = TRUE,
top_annotation = c("SampleType"),
gaps_at = "SampleType",
cluster_cols = TRUE, cluster_rows = TRUE)
## ----more_complex_heatmap-----------------------------------------------------
# Add feature names to column as a factor
taxa_clusters$Feature <- rownames(taxa_clusters)
taxa_clusters$Feature <- factor(taxa_clusters$Feature, levels = taxa_clusters$Feature)
# Create annotation plot
row_annotation <- ggplot(taxa_clusters) +
geom_tile(aes(x = NA, y = Feature, fill = clusters)) +
coord_equal(ratio = 1) +
theme(
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
axis.title.y=element_blank(),
axis.title.x = element_text(angle = 90, vjust = 0.5, hjust=1),
plot.margin=margin(0,0,0,0),
) +
labs(fill = "Clusters", x = "Clusters")
# to view the notation, run
# row_annotation
# Add sample names to one of the columns
sample_data$sample <- factor(rownames(sample_data), levels = rownames(sample_data))
# Create annotation plot
sample_types_annotation <- ggplot(sample_data) +
scale_y_discrete(position = "right", expand = c(0,0)) +
geom_tile(aes(y = NA, x = sample, fill = sample_types)) +
coord_equal(ratio = 1) +
theme(
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.title.x=element_blank(),
axis.ticks.x=element_blank(),
plot.margin=margin(0,0,0,0),
axis.title.y.right = element_text(angle=0, vjust = 0.5)
) +
labs(fill = "Sample types", y = "Sample types")
# to view the notation, run
# sample_types_annotation
# Create annotation plot
sample_clusters_annotation <- ggplot(sample_data) +
scale_y_discrete(position = "right", expand = c(0,0)) +
geom_tile(aes(y = NA, x = sample, fill = clusters)) +
coord_equal(ratio = 1) +
theme(
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.title.x=element_blank(),
axis.ticks.x=element_blank(),
plot.margin=margin(0,0,0,0),
axis.title.y.right = element_text(angle=0, vjust = 0.5)
) +
labs(fill = "Clusters", y = "Clusters")
# to view the notation, run
# sample_clusters_annotation
# Order data based on clusters and sample types
mat <- mat[unfactor(taxa_clusters$Feature), unfactor(sample_data$sample)]
# ggplot requires data in melted format
melted_mat <- melt(mat)
colnames(melted_mat) <- c("Taxa", "Sample", "clr_z")
# Determines the scaling of colorss
maxval <- round(max(abs(melted_mat$clr_z)))
limits <- c(-maxval, maxval)
breaks <- seq(from = min(limits), to = max(limits), by = 0.5)
colours <- c("darkblue", "blue", "white", "red", "darkred")
heatmap <- ggplot(melted_mat) +
geom_tile(aes(x = Sample, y = Taxa, fill = clr_z)) +
theme(
axis.title.y=element_blank(),
axis.title.x=element_blank(),
axis.ticks.y=element_blank(),
axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
plot.margin=margin(0,0,0,0), # removes margins
legend.key.height= unit(1, 'cm')
) +
scale_fill_gradientn(name = "CLR + Z transform",
breaks = breaks,
limits = limits,
colours = colours) +
scale_y_discrete(position = "right")
heatmap
## ----more_complex_heatmap2, fig.width = 10, fig.height = 8, eval=FALSE--------
## library(patchwork)
##
## # Create layout
## design <- c(
## patchwork::area(3, 1, 4, 1),
## patchwork::area(1, 2, 1, 3),
## patchwork::area(2, 2, 2, 3),
## patchwork::area(3, 2, 4, 3)
## )
## # to view the design, run
## # plot(design)
##
## # Combine plots
## plot <- row_annotation + sample_clusters_annotation +
## sample_types_annotation +
## heatmap +
## plot_layout(design = design, guides = "collect",
## # Specify layout, collect legends
##
## # Adjust widths and heights to align plots.
## # When annotation plot is larger, it might not fit into
## # its column/row.
## # Then you need to make column/row larger.
##
## # Relative widths and heights of each column and row:
## # Currently, the width of the first column is 15 % and the height of
## # first two rows are 30 % the size of others
##
## # To get this work most of the times, you can adjust all sizes to be 1, i.e. equal,
## # but then the gaps between plots are larger.
## widths = c(0.15, 1, 1),
## heights = c(0.3, 0.3, 1, 1))
##
## # plot
## ----more_complex_heatmap3, fig.width = 10, fig.height = 8, eval=FALSE--------
## # Create layout
## design <- c(
## patchwork::area(4, 1, 5, 1),
## patchwork::area(4, 2, 5, 2),
## patchwork::area(1, 3, 1, 4),
## patchwork::area(2, 3, 2, 4),
## patchwork::area(3, 3, 3, 4),
## patchwork::area(4, 3, 5, 4)
## )
##
## # to view the design, run
## # plot(design)
##
## # Combine plots
## plot <- taxa_tree +
## row_annotation +
## sample_tree +
## sample_clusters_annotation +
## sample_types_annotation +
## heatmap +
## plot_layout(design = design, guides = "collect", # Specify layout, collect legends
## widths = c(0.2, 0.15, 1, 1, 1),
## heights = c(0.1, 0.15, 0.15, 0.25, 1, 1))
##
## plot
## ----sessionInfo, echo = FALSE, results = "asis"------------------------------
prettySessionInfo()
|
MDepths <- read.csv("Depths.csv", header=TRUE)
X01<- MDepths[,01]
X01<-X01[!is.na(X01)]
as.data.frame(X01)
dat01<- data.frame(X01)
p01<-ggplot(dat01, aes(X01)) +
labs(title = "Profile Nr.01", x = "Depths, m", y = "Density") +
theme(
plot.title = element_text(family = "Skia", face = 2, size = 10),
panel.background=ggplot2::element_rect(fill = "gray91"),
legend.position = c(.90, .90),
legend.justification = c("right", "top"),
legend.box.just = "right",
legend.margin = margin(6, 6, 6, 6),
legend.direction = "vertical",
legend.background = element_blank(),
legend.key.width = unit(.5,"cm"),
legend.key.height = unit(.3,"cm"),
legend.spacing = unit(.3,"cm"),
legend.box.background = element_rect(colour = "honeydew4",size=0.2),
legend.text = element_text(family = "Arial", colour="black", size=6, face=1),
legend.title = element_blank(),
strip.text.x = element_text(colour = "white"),
panel.grid.major = element_line("white", size = 0.3),
panel.grid.minor = element_line("white", size = 0.3, linetype = "dotted"),
axis.text.x = element_text(family = "Arial", face = 3, color = "gray24",size = 6, angle = 15),
axis.ticks.length=unit(.2,"cm"),
axis.text.y = element_text(family = "Arial", face = 3, color = "gray24",size = 6, angle = 15),
axis.line = element_line(colour = "darkblue", size = .3, linetype = "solid"),
axis.title.y = element_text(margin = margin(t = 20, r = .3), family = "Times New Roman", face = 1, size = 6),
axis.title.x = element_text(family = "Times New Roman", face = 1, size = 6,margin = margin(t = .2))) +
scale_x_continuous(breaks = pretty(dat01$X01, n = 4), minor_breaks = seq(min(dat01$X01), max(dat01$X01), by = 500)) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 4),labels = scales :: percent) +
scale_fill_distiller(palette = "RdGy") +
scale_color_manual(name = "Statistics:", values = c(median = "purple", mean = "green4",density = "blue", norm_dist = "black")) +
geom_histogram(binwidth = 200,aes(fill = ..density..,x = dat01$X01,y = ..density..),color = "blue",size = .1) +
stat_function(fun = dnorm, args = list(mean = mean(dat01$X01), sd = sd(dat01$X01)), lwd = 0.2, color = 'black') +
stat_density(geom = "line", size = .3, aes(color = "density")) +
geom_vline(aes(color = "mean", xintercept = mean(X01)), lty = 4, size = .3) +
geom_vline(aes(color = "median", xintercept = median(X01)), lty = 2, size = .3) +
geom_vline(aes(color = "norm_dist", xintercept = dnorm(X01)), lty = 2, size = .3)
p01
| /Script-01.r | permissive | paulinelemenkova/R-1-Histograms | R | false | false | 2,570 | r | MDepths <- read.csv("Depths.csv", header=TRUE)
X01<- MDepths[,01]
X01<-X01[!is.na(X01)]
as.data.frame(X01)
dat01<- data.frame(X01)
p01<-ggplot(dat01, aes(X01)) +
labs(title = "Profile Nr.01", x = "Depths, m", y = "Density") +
theme(
plot.title = element_text(family = "Skia", face = 2, size = 10),
panel.background=ggplot2::element_rect(fill = "gray91"),
legend.position = c(.90, .90),
legend.justification = c("right", "top"),
legend.box.just = "right",
legend.margin = margin(6, 6, 6, 6),
legend.direction = "vertical",
legend.background = element_blank(),
legend.key.width = unit(.5,"cm"),
legend.key.height = unit(.3,"cm"),
legend.spacing = unit(.3,"cm"),
legend.box.background = element_rect(colour = "honeydew4",size=0.2),
legend.text = element_text(family = "Arial", colour="black", size=6, face=1),
legend.title = element_blank(),
strip.text.x = element_text(colour = "white"),
panel.grid.major = element_line("white", size = 0.3),
panel.grid.minor = element_line("white", size = 0.3, linetype = "dotted"),
axis.text.x = element_text(family = "Arial", face = 3, color = "gray24",size = 6, angle = 15),
axis.ticks.length=unit(.2,"cm"),
axis.text.y = element_text(family = "Arial", face = 3, color = "gray24",size = 6, angle = 15),
axis.line = element_line(colour = "darkblue", size = .3, linetype = "solid"),
axis.title.y = element_text(margin = margin(t = 20, r = .3), family = "Times New Roman", face = 1, size = 6),
axis.title.x = element_text(family = "Times New Roman", face = 1, size = 6,margin = margin(t = .2))) +
scale_x_continuous(breaks = pretty(dat01$X01, n = 4), minor_breaks = seq(min(dat01$X01), max(dat01$X01), by = 500)) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 4),labels = scales :: percent) +
scale_fill_distiller(palette = "RdGy") +
scale_color_manual(name = "Statistics:", values = c(median = "purple", mean = "green4",density = "blue", norm_dist = "black")) +
geom_histogram(binwidth = 200,aes(fill = ..density..,x = dat01$X01,y = ..density..),color = "blue",size = .1) +
stat_function(fun = dnorm, args = list(mean = mean(dat01$X01), sd = sd(dat01$X01)), lwd = 0.2, color = 'black') +
stat_density(geom = "line", size = .3, aes(color = "density")) +
geom_vline(aes(color = "mean", xintercept = mean(X01)), lty = 4, size = .3) +
geom_vline(aes(color = "median", xintercept = median(X01)), lty = 2, size = .3) +
geom_vline(aes(color = "norm_dist", xintercept = dnorm(X01)), lty = 2, size = .3)
p01
|
source("setupFunctions.R")
context("semiParametricFitting")
test_that("using_endpoint_not_in_SurvivalData_object_gives_error",{
survivalData <- createSurvivalDataObject()
expect_error(fitSemiParametric(survivalData,endPoint="nonsense"))
# The defined endpoints are not vector-valued
expect_error(fitSemiParametric(survivalData,endPoint=c("relapse","relapse")))
})
test_that("using_subgroup_not_in_SurvivalData_object_gives_error",{
survivalData <- createSurvivalDataObject()
expect_error(fitSemiParametric(survivalData,endPoint="relapse",subgroup="mysubgroup"))
})
test_that("error_if_an_arm_contains_no_data", {
data("sibylData")
for (s in c("patchOnly", "combination")){
# Create subgroup that is just an indicator for arm membership, so that
# subsetting by it results in no data in any other arm
sibylData$sub.isMale <- sibylData$grp == s
inputs <- survivalDataConstuctorTestSetUp()
survivalData <- SurvivalData(data = sibylData,
armDef = inputs[["arm"]],
covDef = inputs[["cov"]],
subgroupDef = inputs[["sub"]],
subjectCol = "ID",
endPointNames = c("relapse", "newEndpoint"),
censorCol = c("ttr.cens", "cens.2"),
timeCol = c("ttr", "end.2"))
expect_error(fitSemiParametric(survivalData, endPoint="relapse", subgroup = "sub.isMale"))
}
})
test_that("error_if_arm_has_no_events", {
data("sibylData")
for (a in c("patchOnly", "combination")){
# Censor all subjects on one arm
sibylData$ttr.cens <- sibylData$grp == a
inputs <- survivalDataConstuctorTestSetUp()
survivalData <- SurvivalData(data = sibylData,
armDef = inputs[["arm"]],
covDef = inputs[["cov"]],
subgroupDef = inputs[["sub"]],
subjectCol = "ID",
endPointNames = c("relapse", "newEndpoint"),
censorCol = c("ttr.cens", "cens.2"),
timeCol = c("ttr", "end.2"))
for (s in list(as.character(NA), "sub.isMale")){
expect_error(fitSemiParametric(survivalData, endPoint="relapse", subgroup = s))
}
}
})
test_that("using_covariate_or_strata_not_in_SurvivalData_gives_error",{
survivalData <- createSurvivalDataObject()
expect_error(fitSemiParametric(survivalData,endPoint="relapse",covariates=c("age","otherCovar")))
expect_error(fitSemiParametric(survivalData,endPoint="relapse",strata="otherCovar"))
})
test_that("invalid_conf.type_throws_error",{
survivalData <- createSurvivalDataObject()
expect_error(fitSemiParametric(survivalData,endPoint="relapse", conf.type="invalid"))
})
test_that("SemiParametricModelObjects_can_be_created_with_KM_and_Cox_fitted_approrpriately",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse")
expect_equal(class(sP)[1],"SemiParametricModel")
#km:
km <- survfit(Surv(ttr,!ttr.cens) ~ arm, data=survivalData@subject.data)
#set calls to be the same
km$call <- sP@km$call
expect_equal(sP@km, km)
#Cox:
cox <- coxph(Surv(ttr,!ttr.cens) ~ arm, data=survivalData@subject.data, ties="breslow", model=TRUE)
cox$call <- sP@cox$call
expect_equal(sP@cox, cox)
})
test_that("conf.type_argument_is_passed_to_survfit",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse", conf.type="log-log")
#km:
km <- survfit(Surv(ttr,!ttr.cens) ~ arm, data=survivalData@subject.data, conf.type="log-log")
expect_equal(quantile(sP@km, prob=0.5, conf.int=TRUE), quantile(km, prob=0.5, conf.int=TRUE) )
})
test_that("SemiParametricModelObjects_can_be_created_with_covariates",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse",covariates=c("age","race"))
#km:
km <- survfit(Surv(ttr,!ttr.cens) ~ arm, data=survivalData@subject.data)
#set calls to be the same
km$call <- sP@km$call
expect_equal(sP@km, km)
#Cox:
cox <- coxph(Surv(ttr,!ttr.cens) ~ arm+age+race, data=survivalData@subject.data, ties="breslow", model=TRUE)
cox$call <- sP@coxWithStrata$call
expect_equal(sP@coxWithStrata, cox)
})
test_that("SemiParametricModelObjects_can_be_created_with_subgroups_and_strata",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse",strata="race",subgroup="sub.isMale")
df <- survivalData@subject.data[survivalData@subject.data$sub.isMale,]
#km:
km <- survfit(Surv(ttr,!ttr.cens) ~ arm, data=df)
#set calls to be the same
km$call <- sP@km$call
expect_equal(sP@km, km)
#Cox:
cox <- coxph(Surv(ttr,!ttr.cens) ~ arm+strata(race), data=df, ties="breslow", model=TRUE)
cox$call <- sP@coxWithStrata$call
expect_equal(sP@coxWithStrata, cox)
})
test_that("only_appropriate_subgroup_data_is_added_to_survdata_slot",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse",strata="race",subgroup="sub.isMale")
expect_true(all(sP@survData@subject.data$sub.isMale))
expect_equal(nrow(sP@survData@subject.data),
nrow(survivalData@subject.data[survivalData@subject.data$sub.isMale,]))
})
test_that("subjects_with_missing_endpoint_data_are_not_added_to_survdata_slot",{
survivalData <- createSurvivalDataObject()
survivalData@subject.data$ttr[1] <- NA
survivalData@subject.data$ttr.cens[1] <- NA
sP <- fitSemiParametric(survivalData,endPoint="relapse",strata="race")
expect_equal(sP@survData@subject.data, survivalData@subject.data[2:nrow(survivalData@subject.data),])
})
test_that("all_data_is_added_to_survdata_slot_if_no_subgroups",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse")
expect_equal(sP@survData,survivalData)
})
test_that("isSingleArm_is_FALSE_if_created_from_SurvivalData_object_with_more_than_one_arm",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse")
expect_false(isSingleArm(sP))
})
context("semiParametricFittingOutput")
test_that("logrank_test_matches_independentCoxFit_with_strata",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse",strata="race")
logrankOutput <- coxphLogRankTest(sP)
coxWithStrata <- coxph(Surv(ttr,!ttr.cens)~ arm + strata(race),
data=survivalData@subject.data)
summStrata <- summary(coxWithStrata)$sctest
names(summStrata) <- NULL
expect_equal(logrankOutput[2,1],summStrata[1])
expect_equal(logrankOutput[2,2],summStrata[2])
expect_equal(logrankOutput[2,3],summStrata[3])
})
test_that("logrank_test_with_no_strata_matches_even_strata_also_used",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse")
logrankOutput <- coxphLogRankTest(sP)
cox <- coxph(Surv(ttr,!ttr.cens)~ arm ,
data=survivalData@subject.data)
summ <- summary(cox)$sctest
names(summ) <- NULL
expect_equal(logrankOutput[1,1],summ[1])
expect_equal(logrankOutput[1,2],summ[2])
expect_equal(logrankOutput[1,3],summ[3])
})
test_that("number_of_events_is_correctly_calculated",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse",subgroup="sub.isMale")
summarysP <- summary(sP, class="data.frame")
subgroupData <- survivalData@subject.data[survivalData@subject.data$sub.isMale,]
numEvents <- c(combination=nrow(subgroupData[subgroupData$arm=="patchOnly" & !subgroupData$ttr.cens,]),
patchOnly=nrow(subgroupData[subgroupData$arm=="combination" & !subgroupData$ttr.cens,]))
expect_equal(summarysP[1,1:2],numEvents)
})
context("extractCumHazData")
test_that("outputs_one_dataframe_per_arm",{
data("sibylData")
km <- survfit(Surv(ttr,!ttr.cens)~grp, data=sibylData)
results <- extractCumHazData(km,armNames=c("B","A"), isSingleArm=FALSE)
expect_equal(length(results),2)
expect_true(is.data.frame(results[[1]]))
})
test_that("adds_given_armnames_to_output_dataframe",{
data("sibylData")
km <- survfit(Surv(ttr,!ttr.cens)~grp, data=sibylData)
results <- extractCumHazData(km,armNames=c("B","A"), isSingleArm=FALSE)
expect_true(all(results[[1]]$Arm=="B"))
expect_true(all(results[[2]]$Arm=="A"))
})
test_that("outputs_confidence_intervals_when_requested",{
data("sibylData")
km <- survfit(Surv(ttr,!ttr.cens)~grp, data=sibylData)
results <- extractCumHazData(km,armNames=c("B","A"),outputCI = TRUE, isSingleArm=FALSE)
expect_equal(colnames(results[[1]]),c("t","S","Arm","lower","upper"))
})
test_that("t0_S1_row_is added_to_dataframes",{
data("sibylData")
km <- survfit(Surv(ttr,!ttr.cens)~grp, data=sibylData)
results <- extractCumHazData(km,armNames=c("B","A"), isSingleArm=FALSE)
expect_equal(results[[1]][1,1],0) #t
expect_equal(results[[1]][1,2],1) #S
})
| /tests/testthat/test-semiParametric.R | no_license | scientific-computing-solutions/sibyl | R | false | false | 9,292 | r | source("setupFunctions.R")
context("semiParametricFitting")
test_that("using_endpoint_not_in_SurvivalData_object_gives_error",{
survivalData <- createSurvivalDataObject()
expect_error(fitSemiParametric(survivalData,endPoint="nonsense"))
# The defined endpoints are not vector-valued
expect_error(fitSemiParametric(survivalData,endPoint=c("relapse","relapse")))
})
test_that("using_subgroup_not_in_SurvivalData_object_gives_error",{
survivalData <- createSurvivalDataObject()
expect_error(fitSemiParametric(survivalData,endPoint="relapse",subgroup="mysubgroup"))
})
test_that("error_if_an_arm_contains_no_data", {
data("sibylData")
for (s in c("patchOnly", "combination")){
# Create subgroup that is just an indicator for arm membership, so that
# subsetting by it results in no data in any other arm
sibylData$sub.isMale <- sibylData$grp == s
inputs <- survivalDataConstuctorTestSetUp()
survivalData <- SurvivalData(data = sibylData,
armDef = inputs[["arm"]],
covDef = inputs[["cov"]],
subgroupDef = inputs[["sub"]],
subjectCol = "ID",
endPointNames = c("relapse", "newEndpoint"),
censorCol = c("ttr.cens", "cens.2"),
timeCol = c("ttr", "end.2"))
expect_error(fitSemiParametric(survivalData, endPoint="relapse", subgroup = "sub.isMale"))
}
})
test_that("error_if_arm_has_no_events", {
data("sibylData")
for (a in c("patchOnly", "combination")){
# Censor all subjects on one arm
sibylData$ttr.cens <- sibylData$grp == a
inputs <- survivalDataConstuctorTestSetUp()
survivalData <- SurvivalData(data = sibylData,
armDef = inputs[["arm"]],
covDef = inputs[["cov"]],
subgroupDef = inputs[["sub"]],
subjectCol = "ID",
endPointNames = c("relapse", "newEndpoint"),
censorCol = c("ttr.cens", "cens.2"),
timeCol = c("ttr", "end.2"))
for (s in list(as.character(NA), "sub.isMale")){
expect_error(fitSemiParametric(survivalData, endPoint="relapse", subgroup = s))
}
}
})
test_that("using_covariate_or_strata_not_in_SurvivalData_gives_error",{
survivalData <- createSurvivalDataObject()
expect_error(fitSemiParametric(survivalData,endPoint="relapse",covariates=c("age","otherCovar")))
expect_error(fitSemiParametric(survivalData,endPoint="relapse",strata="otherCovar"))
})
test_that("invalid_conf.type_throws_error",{
survivalData <- createSurvivalDataObject()
expect_error(fitSemiParametric(survivalData,endPoint="relapse", conf.type="invalid"))
})
test_that("SemiParametricModelObjects_can_be_created_with_KM_and_Cox_fitted_approrpriately",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse")
expect_equal(class(sP)[1],"SemiParametricModel")
#km:
km <- survfit(Surv(ttr,!ttr.cens) ~ arm, data=survivalData@subject.data)
#set calls to be the same
km$call <- sP@km$call
expect_equal(sP@km, km)
#Cox:
cox <- coxph(Surv(ttr,!ttr.cens) ~ arm, data=survivalData@subject.data, ties="breslow", model=TRUE)
cox$call <- sP@cox$call
expect_equal(sP@cox, cox)
})
test_that("conf.type_argument_is_passed_to_survfit",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse", conf.type="log-log")
#km:
km <- survfit(Surv(ttr,!ttr.cens) ~ arm, data=survivalData@subject.data, conf.type="log-log")
expect_equal(quantile(sP@km, prob=0.5, conf.int=TRUE), quantile(km, prob=0.5, conf.int=TRUE) )
})
test_that("SemiParametricModelObjects_can_be_created_with_covariates",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse",covariates=c("age","race"))
#km:
km <- survfit(Surv(ttr,!ttr.cens) ~ arm, data=survivalData@subject.data)
#set calls to be the same
km$call <- sP@km$call
expect_equal(sP@km, km)
#Cox:
cox <- coxph(Surv(ttr,!ttr.cens) ~ arm+age+race, data=survivalData@subject.data, ties="breslow", model=TRUE)
cox$call <- sP@coxWithStrata$call
expect_equal(sP@coxWithStrata, cox)
})
test_that("SemiParametricModelObjects_can_be_created_with_subgroups_and_strata",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse",strata="race",subgroup="sub.isMale")
df <- survivalData@subject.data[survivalData@subject.data$sub.isMale,]
#km:
km <- survfit(Surv(ttr,!ttr.cens) ~ arm, data=df)
#set calls to be the same
km$call <- sP@km$call
expect_equal(sP@km, km)
#Cox:
cox <- coxph(Surv(ttr,!ttr.cens) ~ arm+strata(race), data=df, ties="breslow", model=TRUE)
cox$call <- sP@coxWithStrata$call
expect_equal(sP@coxWithStrata, cox)
})
test_that("only_appropriate_subgroup_data_is_added_to_survdata_slot",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse",strata="race",subgroup="sub.isMale")
expect_true(all(sP@survData@subject.data$sub.isMale))
expect_equal(nrow(sP@survData@subject.data),
nrow(survivalData@subject.data[survivalData@subject.data$sub.isMale,]))
})
test_that("subjects_with_missing_endpoint_data_are_not_added_to_survdata_slot",{
survivalData <- createSurvivalDataObject()
survivalData@subject.data$ttr[1] <- NA
survivalData@subject.data$ttr.cens[1] <- NA
sP <- fitSemiParametric(survivalData,endPoint="relapse",strata="race")
expect_equal(sP@survData@subject.data, survivalData@subject.data[2:nrow(survivalData@subject.data),])
})
test_that("all_data_is_added_to_survdata_slot_if_no_subgroups",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse")
expect_equal(sP@survData,survivalData)
})
test_that("isSingleArm_is_FALSE_if_created_from_SurvivalData_object_with_more_than_one_arm",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse")
expect_false(isSingleArm(sP))
})
context("semiParametricFittingOutput")
test_that("logrank_test_matches_independentCoxFit_with_strata",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse",strata="race")
logrankOutput <- coxphLogRankTest(sP)
coxWithStrata <- coxph(Surv(ttr,!ttr.cens)~ arm + strata(race),
data=survivalData@subject.data)
summStrata <- summary(coxWithStrata)$sctest
names(summStrata) <- NULL
expect_equal(logrankOutput[2,1],summStrata[1])
expect_equal(logrankOutput[2,2],summStrata[2])
expect_equal(logrankOutput[2,3],summStrata[3])
})
test_that("logrank_test_with_no_strata_matches_even_strata_also_used",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse")
logrankOutput <- coxphLogRankTest(sP)
cox <- coxph(Surv(ttr,!ttr.cens)~ arm ,
data=survivalData@subject.data)
summ <- summary(cox)$sctest
names(summ) <- NULL
expect_equal(logrankOutput[1,1],summ[1])
expect_equal(logrankOutput[1,2],summ[2])
expect_equal(logrankOutput[1,3],summ[3])
})
test_that("number_of_events_is_correctly_calculated",{
survivalData <- createSurvivalDataObject()
sP <- fitSemiParametric(survivalData,endPoint="relapse",subgroup="sub.isMale")
summarysP <- summary(sP, class="data.frame")
subgroupData <- survivalData@subject.data[survivalData@subject.data$sub.isMale,]
numEvents <- c(combination=nrow(subgroupData[subgroupData$arm=="patchOnly" & !subgroupData$ttr.cens,]),
patchOnly=nrow(subgroupData[subgroupData$arm=="combination" & !subgroupData$ttr.cens,]))
expect_equal(summarysP[1,1:2],numEvents)
})
context("extractCumHazData")
test_that("outputs_one_dataframe_per_arm",{
data("sibylData")
km <- survfit(Surv(ttr,!ttr.cens)~grp, data=sibylData)
results <- extractCumHazData(km,armNames=c("B","A"), isSingleArm=FALSE)
expect_equal(length(results),2)
expect_true(is.data.frame(results[[1]]))
})
test_that("adds_given_armnames_to_output_dataframe",{
data("sibylData")
km <- survfit(Surv(ttr,!ttr.cens)~grp, data=sibylData)
results <- extractCumHazData(km,armNames=c("B","A"), isSingleArm=FALSE)
expect_true(all(results[[1]]$Arm=="B"))
expect_true(all(results[[2]]$Arm=="A"))
})
test_that("outputs_confidence_intervals_when_requested",{
data("sibylData")
km <- survfit(Surv(ttr,!ttr.cens)~grp, data=sibylData)
results <- extractCumHazData(km,armNames=c("B","A"),outputCI = TRUE, isSingleArm=FALSE)
expect_equal(colnames(results[[1]]),c("t","S","Arm","lower","upper"))
})
test_that("t0_S1_row_is added_to_dataframes",{
data("sibylData")
km <- survfit(Surv(ttr,!ttr.cens)~grp, data=sibylData)
results <- extractCumHazData(km,armNames=c("B","A"), isSingleArm=FALSE)
expect_equal(results[[1]][1,1],0) #t
expect_equal(results[[1]][1,2],1) #S
})
|
### Boosted Sparse Nonlinear Metric Learning
### Auxillary function: Compute nearest neighbor orders based on distance with metric W
### Author: Yuting Ma
### Date: 04/14/2015
compute_dist <- function(X, y, W){
n <- nrow(X)
n_pos <- sum(y == 1)
n_neg <- sum(y == -1)
L <- chol(W)
dist.X <- as.matrix(dist(X%*%t(L), diag=T, upper=T))
S <- matrix(rep((0.5*y + 0.5), n),n,n,byrow=T) #pos.class=1, neg.class=0
S.pos <- (1-S)*99999 + S*dist.X #neg.class=999, pos.class=original dist
S.neg <- S*99999 + (1-S)*dist.X #pos.class=999, neg.class=original dist
diag(S.pos) <- diag(S.neg) <- rep(99999,n) # set self-to-self dist to 999
pos_order <- t(matrix(apply(S.pos, 1, function(x) order(x)[1:(n_pos-1)]),n_pos-1,n)) # each row indicates the index of positive nearest neighbors of X[i,] (in order)
neg_order <- t(matrix(apply(S.neg, 1, function(x) order(x)[1:(n_neg-1)]),n_neg-1,n))
return(list(pos_order=pos_order, neg_order=neg_order))
} | /lib/sDist_compute_dist.R | no_license | yuting27/sDist | R | false | false | 960 | r | ### Boosted Sparse Nonlinear Metric Learning
### Auxillary function: Compute nearest neighbor orders based on distance with metric W
### Author: Yuting Ma
### Date: 04/14/2015
compute_dist <- function(X, y, W){
n <- nrow(X)
n_pos <- sum(y == 1)
n_neg <- sum(y == -1)
L <- chol(W)
dist.X <- as.matrix(dist(X%*%t(L), diag=T, upper=T))
S <- matrix(rep((0.5*y + 0.5), n),n,n,byrow=T) #pos.class=1, neg.class=0
S.pos <- (1-S)*99999 + S*dist.X #neg.class=999, pos.class=original dist
S.neg <- S*99999 + (1-S)*dist.X #pos.class=999, neg.class=original dist
diag(S.pos) <- diag(S.neg) <- rep(99999,n) # set self-to-self dist to 999
pos_order <- t(matrix(apply(S.pos, 1, function(x) order(x)[1:(n_pos-1)]),n_pos-1,n)) # each row indicates the index of positive nearest neighbors of X[i,] (in order)
neg_order <- t(matrix(apply(S.neg, 1, function(x) order(x)[1:(n_neg-1)]),n_neg-1,n))
return(list(pos_order=pos_order, neg_order=neg_order))
} |
options()$repos
options()$BioC_mirror
options(bio_mirror="https://mirrors.ustc.edu.cn/bioc/")
options("repos"=c(CRAN="https://mirrors.tuna.tsinghua.edu.cn/CRAN/"))
# > options()$repos
# CRAN CRANextra
# "http://cran.rstudio.com/" "http://www.stats.ox.ac.uk/pub/RWin"
# attr(,"RStudio")
# [1] TRUE
# > options()$BioC_mirror
# NULL
# > options(bio_mirror="https://mirrors.ustc.edu.cn/bioc/")
# > options("repos"=c(CRAN="https://mirrors.tuna.tsinghua.edu.cn/CRAN/"))
| /Homework/Homework_1_options.R | no_license | LucasZhengrui/R_Lauguage_Study | R | false | false | 540 | r | options()$repos
options()$BioC_mirror
options(bio_mirror="https://mirrors.ustc.edu.cn/bioc/")
options("repos"=c(CRAN="https://mirrors.tuna.tsinghua.edu.cn/CRAN/"))
# > options()$repos
# CRAN CRANextra
# "http://cran.rstudio.com/" "http://www.stats.ox.ac.uk/pub/RWin"
# attr(,"RStudio")
# [1] TRUE
# > options()$BioC_mirror
# NULL
# > options(bio_mirror="https://mirrors.ustc.edu.cn/bioc/")
# > options("repos"=c(CRAN="https://mirrors.tuna.tsinghua.edu.cn/CRAN/"))
|
if (interactive()) savehistory();
library("aroma.affymetrix");
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Allocate UGC file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
cdf <- AffymetrixCdfFile$byChipType("GenomeWideSNP_6", tags="Full");
ugc <- AromaUnitGcContentFile$allocateFromCdf(cdf, tags="na27,h=500kb,HB20090322");
print(ugc);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Import GC contents from NetAffx files
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
csvList <- list(
AffymetrixNetAffxCsvFile$byChipType("GenomeWideSNP_6", tags=".cn.na27"),
AffymetrixNetAffxCsvFile$byChipType("GenomeWideSNP_6", tags=".na27.1")
);
colClasses <- c("^(probeSetID|%GC)$"="character");
for (csv in csvList) {
data <- readDataFrame(csv, colClasses=colClasses);
units <- indexOf(cdf, names=data$probeSetID);
ugc[units,1] <- as.double(data[["%GC"]]);
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Update the file footer
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
srcFileTags <- list();
srcFiles <- c(list(cdf), csvList);
for (kk in seq(along=srcFiles)) {
srcFile <- srcFiles[[kk]];
tags <- list(
filename=getFilename(srcFile),
filesize=getFileSize(srcFile),
checksum=getChecksum(srcFile)
);
srcFileTags[[kk]] <- tags;
}
print(srcFileTags);
footer <- readFooter(ugc);
footer$createdOn <- format(Sys.time(), "%Y%m%d %H:%M:%S", usetz=TRUE);
footer$createdBy = list(
fullname = "Henrik Bengtsson",
email = sprintf("%s@%s", "henrik.bengtsson", "aroma-project.org")
);
names(srcFileTags) <- sprintf("srcFile%d", seq(along=srcFileTags));
footer$srcFiles <- srcFileTags;
footer$gcBinWidth <- as.integer(500e3);
writeFooter(ugc, footer);
print(ugc);
print(summary(ugc));
print(range(ugc[,1]));
| /inst/buildScripts/chipTypes/GenomeWideSNP_6/na27/GenomeWideSNP_6,UGC,na27.R | no_license | microarray/aroma.affymetrix | R | false | false | 1,911 | r | if (interactive()) savehistory();
library("aroma.affymetrix");
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Allocate UGC file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
cdf <- AffymetrixCdfFile$byChipType("GenomeWideSNP_6", tags="Full");
ugc <- AromaUnitGcContentFile$allocateFromCdf(cdf, tags="na27,h=500kb,HB20090322");
print(ugc);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Import GC contents from NetAffx files
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
csvList <- list(
AffymetrixNetAffxCsvFile$byChipType("GenomeWideSNP_6", tags=".cn.na27"),
AffymetrixNetAffxCsvFile$byChipType("GenomeWideSNP_6", tags=".na27.1")
);
colClasses <- c("^(probeSetID|%GC)$"="character");
for (csv in csvList) {
data <- readDataFrame(csv, colClasses=colClasses);
units <- indexOf(cdf, names=data$probeSetID);
ugc[units,1] <- as.double(data[["%GC"]]);
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Update the file footer
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
srcFileTags <- list();
srcFiles <- c(list(cdf), csvList);
for (kk in seq(along=srcFiles)) {
srcFile <- srcFiles[[kk]];
tags <- list(
filename=getFilename(srcFile),
filesize=getFileSize(srcFile),
checksum=getChecksum(srcFile)
);
srcFileTags[[kk]] <- tags;
}
print(srcFileTags);
footer <- readFooter(ugc);
footer$createdOn <- format(Sys.time(), "%Y%m%d %H:%M:%S", usetz=TRUE);
footer$createdBy = list(
fullname = "Henrik Bengtsson",
email = sprintf("%s@%s", "henrik.bengtsson", "aroma-project.org")
);
names(srcFileTags) <- sprintf("srcFile%d", seq(along=srcFileTags));
footer$srcFiles <- srcFileTags;
footer$gcBinWidth <- as.integer(500e3);
writeFooter(ugc, footer);
print(ugc);
print(summary(ugc));
print(range(ugc[,1]));
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getRdFileNames.R
\name{getRdFileNames}
\alias{getRdFileNames}
\title{Track Rd file names at which 'topic' is documented}
\usage{
getRdFileNames(topic, package = NULL)
}
\arguments{
\item{topic}{A length-one character vector specifying the topic (alias).}
\item{package}{A character vector given the packages to search for Rd file
names that document the \code{topic} , or 'NULL'. By default, all the
packages in the search path are used.}
}
\description{
Tracks the Rd file names at which a given 'topic' (alias) is documented.
}
\examples{
getRdFileNames("rbind")
isInstalled <- function(pkg)
inherits(suppressWarnings(packageDescription(pkg)), "packageDescription")
if (isInstalled("IRanges"))
getRdFileNames("rbind", package=c("base", "IRanges"))
if (isInstalled("Biobase"))
getRdFileNames("ExpressionSet", "Biobase")
}
\author{
Chao-Jen Wong \email{cwon2@fhcrc.org}
}
\keyword{programming}
| /man/getRdFileNames.Rd | no_license | federicomarini/codetoolsBioC | R | false | true | 984 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getRdFileNames.R
\name{getRdFileNames}
\alias{getRdFileNames}
\title{Track Rd file names at which 'topic' is documented}
\usage{
getRdFileNames(topic, package = NULL)
}
\arguments{
\item{topic}{A length-one character vector specifying the topic (alias).}
\item{package}{A character vector given the packages to search for Rd file
names that document the \code{topic} , or 'NULL'. By default, all the
packages in the search path are used.}
}
\description{
Tracks the Rd file names at which a given 'topic' (alias) is documented.
}
\examples{
getRdFileNames("rbind")
isInstalled <- function(pkg)
inherits(suppressWarnings(packageDescription(pkg)), "packageDescription")
if (isInstalled("IRanges"))
getRdFileNames("rbind", package=c("base", "IRanges"))
if (isInstalled("Biobase"))
getRdFileNames("ExpressionSet", "Biobase")
}
\author{
Chao-Jen Wong \email{cwon2@fhcrc.org}
}
\keyword{programming}
|
row.has.na <- apply(complete, 1, function(x){any(is.na(x))})
sum(row.has.na)
column.has.na <- apply(complete, 2, function(x){any(is.na(x))})
sum(column.has.na)
complete.filtered <- complete[,!column.has.na,]
findCorrelation(complete.filtered) # nie będzie działać, bo pluje się, że są stringowe wartości z tego co widzę
#Error in Math.data.frame(x) :
# non-numeric variable in data frame: namefull_namebirth_datebody_typereal_faceflagnationalityphotowork_rate_attwork_rate_defpreferred_foot1_on_1_rush_traitacrobatic_clearance_traitargues_with_officials_traitavoids_using_weaker_foot_traitbacks_into_player_traitbicycle_kicks_traitcautious_with_crosses_traitchip_shot_traitchipped_penalty_traitcomes_for_crosses_traitcorner_specialist_traitdiver_traitdives_into_tackles_traitdiving_header_traitdriven_pass_traitearly_crosser_traitfan's_favourite_traitfancy_flicks_traitfinesse_shot_traitflair_traitflair_passes_traitgk_flat_kick_traitgk_long_throw_traitgk_up_for_corners_traitgiant_throw_in_traitinflexible_traitinjury_free_traitinjury_prone_traitleadership_traitlong_passer_traitlong_shot_taker_traitlong_throw_in_traitone_club_player_traitoutside_foot_shot_traitplaymaker_traitpower_free_kick_traitpower_header_traitpuncher_traitrushes_out_of_goal_traitsaves_with_feet_traitsecond_wind_traitselfish_traitskilled_dribbling_traitstutter_penalt
# wyrzucenie kolumn o podanych nazwach
complete.filtered <- complete.filtered[ , !names(complete.filtered) %in% c("flag","club_logo","photo")]
# dla testów okroić zestaw danych
complete <- head(complete,50)
# get full names
nameData = complete[,3]
# count them
nameFreq = as.data.frame(table(nameData))
# get repeated
repeatedNames = subset(nameFreq, Freq > 1)[,1]
nrow(subset(nameFreq, Freq > 1))
# erase repeating values from data
complete.filtered <- subset(complete, !full_name %in% repeatedNames)
max(complete$eur_value)
discretize(complete$overall)
table(discretize(complete$overall, method="frequency", breaks = 10))
complete$name[duplicated(complete$name)]
# count duplicated entries
sum(duplicated(complete$name))
| /testing.R | no_license | Krysol11111/MOW_fifa18 | R | false | false | 2,094 | r | row.has.na <- apply(complete, 1, function(x){any(is.na(x))})
sum(row.has.na)
column.has.na <- apply(complete, 2, function(x){any(is.na(x))})
sum(column.has.na)
complete.filtered <- complete[,!column.has.na,]
findCorrelation(complete.filtered) # nie będzie działać, bo pluje się, że są stringowe wartości z tego co widzę
#Error in Math.data.frame(x) :
# non-numeric variable in data frame: namefull_namebirth_datebody_typereal_faceflagnationalityphotowork_rate_attwork_rate_defpreferred_foot1_on_1_rush_traitacrobatic_clearance_traitargues_with_officials_traitavoids_using_weaker_foot_traitbacks_into_player_traitbicycle_kicks_traitcautious_with_crosses_traitchip_shot_traitchipped_penalty_traitcomes_for_crosses_traitcorner_specialist_traitdiver_traitdives_into_tackles_traitdiving_header_traitdriven_pass_traitearly_crosser_traitfan's_favourite_traitfancy_flicks_traitfinesse_shot_traitflair_traitflair_passes_traitgk_flat_kick_traitgk_long_throw_traitgk_up_for_corners_traitgiant_throw_in_traitinflexible_traitinjury_free_traitinjury_prone_traitleadership_traitlong_passer_traitlong_shot_taker_traitlong_throw_in_traitone_club_player_traitoutside_foot_shot_traitplaymaker_traitpower_free_kick_traitpower_header_traitpuncher_traitrushes_out_of_goal_traitsaves_with_feet_traitsecond_wind_traitselfish_traitskilled_dribbling_traitstutter_penalt
# wyrzucenie kolumn o podanych nazwach
complete.filtered <- complete.filtered[ , !names(complete.filtered) %in% c("flag","club_logo","photo")]
# dla testów okroić zestaw danych
complete <- head(complete,50)
# get full names
nameData = complete[,3]
# count them
nameFreq = as.data.frame(table(nameData))
# get repeated
repeatedNames = subset(nameFreq, Freq > 1)[,1]
nrow(subset(nameFreq, Freq > 1))
# erase repeating values from data
complete.filtered <- subset(complete, !full_name %in% repeatedNames)
max(complete$eur_value)
discretize(complete$overall)
table(discretize(complete$overall, method="frequency", breaks = 10))
complete$name[duplicated(complete$name)]
# count duplicated entries
sum(duplicated(complete$name))
|
`LLTM` <-
function(X, W, mpoints = 1, groupvec = 1, se = TRUE, sum0 = TRUE, etaStart)
{
#...X: person*(item*times) matrix (T1|T2|...)
model <- "LLTM"
call<-match.call()
if (missing(W)) W <- NA
else W <- as.matrix(W)
if (missing(etaStart)) etaStart <- NA
else etaStart <- as.vector(etaStart)
XWcheck <- datcheck(X,W,mpoints,groupvec,model) #inital check of X and W
X <- XWcheck$X
lres <- likLR(X,W,mpoints,groupvec,model,st.err=se,sum0,etaStart)
parest <- lres$parest #full groups for parameter estimation
loglik <- -parest$minimum #log-likelihood value
iter <- parest$iterations #number of iterations
convergence <- parest$code
etapar <- parest$estimate #eta estimates
betapar <- as.vector(lres$W%*% etapar) #beta estimates
if (se) {
se.eta <- sqrt(diag(solve(parest$hessian))) #standard errors
se.beta <- sqrt(diag(lres$W%*%solve(parest$hessian)%*%t(lres$W))) #se beta
} else {
se.eta <- rep(NA,length(etapar))
se.beta <- rep(NA,length(betapar))
}
X01 <- lres$X01
labs <- labeling.internal(model,X,X01,lres$W,etapar,betapar,mpoints,max(groupvec)) #labeling for L-models
W <- labs$W
etapar <- labs$etapar
betapar <- labs$betapar
npar <- dim(lres$W)[2] #number of parameters
result <- list(X=X,X01=X01,model=model,loglik=loglik,npar=npar,iter=iter,convergence=convergence,
etapar=etapar,se.eta=se.eta,hessian=parest$hessian,betapar=betapar,
se.beta=se.beta,W=W,mpoints=mpoints,ngroups=max(groupvec),groupvec=groupvec,call=call)
class(result) <- "eRm" #classes: simple RM and extended RM
result
}
| /R/LLTM.R | no_license | cran/eRm | R | false | false | 1,797 | r | `LLTM` <-
function(X, W, mpoints = 1, groupvec = 1, se = TRUE, sum0 = TRUE, etaStart)
{
#...X: person*(item*times) matrix (T1|T2|...)
model <- "LLTM"
call<-match.call()
if (missing(W)) W <- NA
else W <- as.matrix(W)
if (missing(etaStart)) etaStart <- NA
else etaStart <- as.vector(etaStart)
XWcheck <- datcheck(X,W,mpoints,groupvec,model) #inital check of X and W
X <- XWcheck$X
lres <- likLR(X,W,mpoints,groupvec,model,st.err=se,sum0,etaStart)
parest <- lres$parest #full groups for parameter estimation
loglik <- -parest$minimum #log-likelihood value
iter <- parest$iterations #number of iterations
convergence <- parest$code
etapar <- parest$estimate #eta estimates
betapar <- as.vector(lres$W%*% etapar) #beta estimates
if (se) {
se.eta <- sqrt(diag(solve(parest$hessian))) #standard errors
se.beta <- sqrt(diag(lres$W%*%solve(parest$hessian)%*%t(lres$W))) #se beta
} else {
se.eta <- rep(NA,length(etapar))
se.beta <- rep(NA,length(betapar))
}
X01 <- lres$X01
labs <- labeling.internal(model,X,X01,lres$W,etapar,betapar,mpoints,max(groupvec)) #labeling for L-models
W <- labs$W
etapar <- labs$etapar
betapar <- labs$betapar
npar <- dim(lres$W)[2] #number of parameters
result <- list(X=X,X01=X01,model=model,loglik=loglik,npar=npar,iter=iter,convergence=convergence,
etapar=etapar,se.eta=se.eta,hessian=parest$hessian,betapar=betapar,
se.beta=se.beta,W=W,mpoints=mpoints,ngroups=max(groupvec),groupvec=groupvec,call=call)
class(result) <- "eRm" #classes: simple RM and extended RM
result
}
|
testlist <- list(Rs = c(NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, -2.24711641857789e+307, 7.2911220195564e-304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), atmp = NaN, relh = NaN, temp = NaN)
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/libFuzzer_ET0_Makkink/ET0_Makkink_valgrind_files/1612737879-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 414 | r | testlist <- list(Rs = c(NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, -2.24711641857789e+307, 7.2911220195564e-304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), atmp = NaN, relh = NaN, temp = NaN)
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/render.R
\name{as.htmlwidget.formattable}
\alias{as.htmlwidget.formattable}
\title{Convert formattable to an htmlwidget}
\usage{
\method{as.htmlwidget}{formattable}(x, width = "100\%", height = NULL, ...)
}
\arguments{
\item{x}{a \code{formattable} object to convert}
\item{width}{a valid \code{CSS} width}
\item{height}{a valid \code{CSS} height}
\item{...}{reserved for more parameters}
}
\value{
a \code{htmlwidget} object
}
\description{
formattable was originally designed to work in \code{rmarkdown} environments.
Conversion of a formattable to a htmlwidget will allow use in other contexts
such as console, RStudio Viewer, and Shiny.
}
\examples{
\dontrun{
library(formattable)
# mtcars (mpg background in gradient: the higher, the redder)
as.htmlwidget(
formattable(mtcars, list(mpg = formatter("span",
style = x ~ style(display = "block",
"border-radius" = "4px",
"padding-right" = "4px",
color = "white",
"background-color" = rgb(x/max(x), 0, 0))))
)
)
# since an htmlwidget, composes well with other tags
library(htmltools)
browsable(
tagList(
tags$div( class="jumbotron"
,tags$h1( class = "text-center"
,tags$span(class = "glyphicon glyphicon-fire")
,"experimental as.htmlwidget at work"
)
)
,tags$div( class = "row"
,tags$div( class = "col-sm-2"
,tags$p(class="bg-primary", "Hi, I am formattable htmlwidget.")
)
,tags$div( class = "col-sm-6"
,as.htmlwidget( formattable( mtcars ) )
)
)
)
)
}
}
| /man/as.htmlwidget.formattable.Rd | permissive | githubfun/formattable | R | false | false | 1,726 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/render.R
\name{as.htmlwidget.formattable}
\alias{as.htmlwidget.formattable}
\title{Convert formattable to an htmlwidget}
\usage{
\method{as.htmlwidget}{formattable}(x, width = "100\%", height = NULL, ...)
}
\arguments{
\item{x}{a \code{formattable} object to convert}
\item{width}{a valid \code{CSS} width}
\item{height}{a valid \code{CSS} height}
\item{...}{reserved for more parameters}
}
\value{
a \code{htmlwidget} object
}
\description{
formattable was originally designed to work in \code{rmarkdown} environments.
Conversion of a formattable to a htmlwidget will allow use in other contexts
such as console, RStudio Viewer, and Shiny.
}
\examples{
\dontrun{
library(formattable)
# mtcars (mpg background in gradient: the higher, the redder)
as.htmlwidget(
formattable(mtcars, list(mpg = formatter("span",
style = x ~ style(display = "block",
"border-radius" = "4px",
"padding-right" = "4px",
color = "white",
"background-color" = rgb(x/max(x), 0, 0))))
)
)
# since an htmlwidget, composes well with other tags
library(htmltools)
browsable(
tagList(
tags$div( class="jumbotron"
,tags$h1( class = "text-center"
,tags$span(class = "glyphicon glyphicon-fire")
,"experimental as.htmlwidget at work"
)
)
,tags$div( class = "row"
,tags$div( class = "col-sm-2"
,tags$p(class="bg-primary", "Hi, I am formattable htmlwidget.")
)
,tags$div( class = "col-sm-6"
,as.htmlwidget( formattable( mtcars ) )
)
)
)
)
}
}
|
## Leading species Raster Derivation##
#needed libraries
library(rgdal)
library(sp)
library(raster)
library(snow)
startTime <- Sys.time()
setwd("C:/Users/bsmiley/Documents/Sask_work/Sask/species_prop")
beginCluster(30)
#Extract list of file names from working directory
filenames <- list.files(pattern=".tif$", full.names=FALSE)
#create raster stack of raster in 'filenames' list
species <- stack(filenames)
#create two rasters 1) the leading species proportion raster (% compostion for each cell (<1)) and
# 2) leading species layer where cell value equals raster with highest proportion according to:
#[1] "predictBETUPAP2_PROJ_CLIP.tif"
#[2] "predictLARILAR3_PROJ_CLIP.tif"
#[3] "predictPICEGLA2_PROJ_CLIP.tif"
#[4] "predictPICEMAR2_PROJ_CLIP.tif"
#[5] "predictPINUBAN2_PROJ_CLIP.tif"
#[6] "predictPOPUTRE2_PROJ_CLIP.tif"
leadSp_prop <- stackApply(species, indices=c(1,1,1,1,1,1), max)
leadSp_layer <- which.max(species)
##Add in unknwn species and non forest classes
setwd("C:/Users/bsmiley/Documents/Sask_work/Sask/SKmask")
mask2 <- raster("mask2_reproj.tif") # add forest-nonforest mask (projection=species projection)
leadSp_layer_newExtent <- extend(leadSp_layer, mask2, value=0) #make species extent=to mask
species_classes<- data.frame(oldclass=c(NA,0:6), newclass=c(7,7,1:6)) # create reclassify table
leadSp_reclass0 <- subs(leadSp_layer_newExtent, species_classes, by="oldclass", which="newclass") #reclassify
# "0" values (unknown species) to "7"
leadSp_laterwMask3 <- mask(leadSp_reclass0, mask2, maskvalue=0, updatevalue=0) # mask out non-forest
# areas with species to equal "0" non-forest
Sask30new <- raster("Sask30_new.tif")
leadSp_wMask_reproj <- projectRaster(leadSp_laterwMask3, crs="+proj=lcc +lat_1=49 +lat_2=77
+lat_0=0 +lon_0=-95 +x_0=0 +y_0=0 +ellps=GRS80
+towgs84=0,0,0,0,0,0,0 +units=m +no_defs")
leadSp_crop <- leadSp_wMask_reproj*Sask30new # crop to Saskatchewan borders
#export Rasters (proportion raster = signed floating, leading species raster = unsigned integer)
setwd("C:/Users/bsmiley/Documents/Sask_work/Sask/Leading_species")
writeRaster(leadSp_prop, "leadSp_prop.tif", format= "GTiff", datatype="FLT4S")
writeRaster(leadSp_crop, "leadSp_layer_reclass_v2.tif", format= "GTiff", datatype="INT1U")
endTime <- Sys.time()
elapsedTime <- endTime - startTime
#####WORKING#####################################################################################
leadSp_wMask2 <- cover(leadSp_layer, mask2)
writeRaster(mask2, "mask2_reproj.tif", format= "GTiff", datatype="INT1U")
leadSp_layer2 <- crop(leadSp_layer, mask2)
leadSp_wMask <- mask(leadSp_layer, mask3, maskvalue=0)
leadSp_2 <- merge(leadSp_layer2,mask2, overlap=TRUE)
leadSp_2_crop <- crop(leadSp_2,leadSp_layer)
leadSp_3_crop <- crop(leadSp_2_crop,leadSp_layer)
beginCluster(30)
setExtent(, mask, keepres=TRUE, snap=TRUE)
setExtent(mask2, leadSp_layer)
writeRaster(leadSp_layer_newExtent, "leadSp_newExtent.tif", format= "GTiff", datatype="INT1U")
extend(leadSp_layer, mask,value=NA)
##OTHER########################################################################################
# CONTROL READ/WRITE BLOCKSIZE
( tr <- blockSize(rb1000) )
s <- writeStart(rb1000[[1]], filename="test.tif", format="GTiff", overwrite=TRUE)
for (i in 1:tr$n) {
v <- getValuesBlock(rb1000, row=tr$row[i], nrows=tr$nrows)
writeValues(s, apply(v, MARGIN=1, FUN=which.max), tr$row[i])
}
s <- writeStop(s)
| /Byron_leading_species_derivation_v2.r | no_license | cboisvenue/RCodeSK | R | false | false | 3,489 | r | ## Leading species Raster Derivation##
#needed libraries
library(rgdal)
library(sp)
library(raster)
library(snow)
startTime <- Sys.time()
setwd("C:/Users/bsmiley/Documents/Sask_work/Sask/species_prop")
beginCluster(30)
#Extract list of file names from working directory
filenames <- list.files(pattern=".tif$", full.names=FALSE)
#create raster stack of raster in 'filenames' list
species <- stack(filenames)
#create two rasters 1) the leading species proportion raster (% compostion for each cell (<1)) and
# 2) leading species layer where cell value equals raster with highest proportion according to:
#[1] "predictBETUPAP2_PROJ_CLIP.tif"
#[2] "predictLARILAR3_PROJ_CLIP.tif"
#[3] "predictPICEGLA2_PROJ_CLIP.tif"
#[4] "predictPICEMAR2_PROJ_CLIP.tif"
#[5] "predictPINUBAN2_PROJ_CLIP.tif"
#[6] "predictPOPUTRE2_PROJ_CLIP.tif"
leadSp_prop <- stackApply(species, indices=c(1,1,1,1,1,1), max)
leadSp_layer <- which.max(species)
##Add in unknwn species and non forest classes
setwd("C:/Users/bsmiley/Documents/Sask_work/Sask/SKmask")
mask2 <- raster("mask2_reproj.tif") # add forest-nonforest mask (projection=species projection)
leadSp_layer_newExtent <- extend(leadSp_layer, mask2, value=0) #make species extent=to mask
species_classes<- data.frame(oldclass=c(NA,0:6), newclass=c(7,7,1:6)) # create reclassify table
leadSp_reclass0 <- subs(leadSp_layer_newExtent, species_classes, by="oldclass", which="newclass") #reclassify
# "0" values (unknown species) to "7"
leadSp_laterwMask3 <- mask(leadSp_reclass0, mask2, maskvalue=0, updatevalue=0) # mask out non-forest
# areas with species to equal "0" non-forest
Sask30new <- raster("Sask30_new.tif")
leadSp_wMask_reproj <- projectRaster(leadSp_laterwMask3, crs="+proj=lcc +lat_1=49 +lat_2=77
+lat_0=0 +lon_0=-95 +x_0=0 +y_0=0 +ellps=GRS80
+towgs84=0,0,0,0,0,0,0 +units=m +no_defs")
leadSp_crop <- leadSp_wMask_reproj*Sask30new # crop to Saskatchewan borders
#export Rasters (proportion raster = signed floating, leading species raster = unsigned integer)
setwd("C:/Users/bsmiley/Documents/Sask_work/Sask/Leading_species")
writeRaster(leadSp_prop, "leadSp_prop.tif", format= "GTiff", datatype="FLT4S")
writeRaster(leadSp_crop, "leadSp_layer_reclass_v2.tif", format= "GTiff", datatype="INT1U")
endTime <- Sys.time()
elapsedTime <- endTime - startTime
#####WORKING#####################################################################################
leadSp_wMask2 <- cover(leadSp_layer, mask2)
writeRaster(mask2, "mask2_reproj.tif", format= "GTiff", datatype="INT1U")
leadSp_layer2 <- crop(leadSp_layer, mask2)
leadSp_wMask <- mask(leadSp_layer, mask3, maskvalue=0)
leadSp_2 <- merge(leadSp_layer2,mask2, overlap=TRUE)
leadSp_2_crop <- crop(leadSp_2,leadSp_layer)
leadSp_3_crop <- crop(leadSp_2_crop,leadSp_layer)
beginCluster(30)
setExtent(, mask, keepres=TRUE, snap=TRUE)
setExtent(mask2, leadSp_layer)
writeRaster(leadSp_layer_newExtent, "leadSp_newExtent.tif", format= "GTiff", datatype="INT1U")
extend(leadSp_layer, mask,value=NA)
##OTHER########################################################################################
# CONTROL READ/WRITE BLOCKSIZE
( tr <- blockSize(rb1000) )
s <- writeStart(rb1000[[1]], filename="test.tif", format="GTiff", overwrite=TRUE)
for (i in 1:tr$n) {
v <- getValuesBlock(rb1000, row=tr$row[i], nrows=tr$nrows)
writeValues(s, apply(v, MARGIN=1, FUN=which.max), tr$row[i])
}
s <- writeStop(s)
|
#Set working directory
#workingDir = args[1];
workingDir="~/PfrenderLab/WGCNA_PA42_v4.1"
setwd(workingDir);
# Load libraries
library(ggplot2)
# Load the expression and trait data saved in the first part
lnames1 = load(file = "PA42_v4.1_dataInputTol.RData");
# Load network data saved in the second part.
lnames2 = load(file = "PA42_v4.1_networkConstructionTol_auto_threshold8_signedNowick.RData");
ddr <- read.csv(file="~/PfrenderLab/PA42_v4.1/DDRGOTF_Dmel_PA42_v4.1_combined_geneIDs_uniq.csv")
SETDDR <- ddr[,1]
#Get module color list
colorList = unique(moduleColors)
numRow = length(colorList)
colorSets <- data.frame(matrix(ncol = 2, nrow = numRow))
#Retrieve the percent of genes in each module
for(var in 1:length(colorList))
{
#Print the color of the module
#print(colorList[var])
#Add DDR data for the current module
numDDR <- which(names(datExprTol)[moduleColors==colorList[var]] %in% SETDDR)
colorSets[var,1] <- colorList[var]
colorSets[var,2] <- length(numDDR)
#colorSets[var,2] <- length(numDDR)/length(names(datExprInter)[moduleColors==colorList[var]])
#Print the number of DDR genes in the current module
#print(length(numDDR))
}
#Set column names
names(colorSets) = c("Color","Genes")
#Create stacked bar plot
jpeg("barPlotTol_numberDDR_signedNowick.jpg", width = 844, height = 596)
colorPlot <- ggplot(colorSets, aes(y=Genes, x=Color)) +
geom_bar(position="stack", stat="identity", fill="steelblue")
colorPlot + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
dev.off()
| /Archived/NetworkAnalysis/barPlot_effectSubsets_DDR_tolSubset.R | no_license | ElizabethBrooks/TranscriptomeAnalysisPipeline_DaphniaUVTolerance | R | false | false | 1,535 | r | #Set working directory
#workingDir = args[1];
workingDir="~/PfrenderLab/WGCNA_PA42_v4.1"
setwd(workingDir);
# Load libraries
library(ggplot2)
# Load the expression and trait data saved in the first part
lnames1 = load(file = "PA42_v4.1_dataInputTol.RData");
# Load network data saved in the second part.
lnames2 = load(file = "PA42_v4.1_networkConstructionTol_auto_threshold8_signedNowick.RData");
ddr <- read.csv(file="~/PfrenderLab/PA42_v4.1/DDRGOTF_Dmel_PA42_v4.1_combined_geneIDs_uniq.csv")
SETDDR <- ddr[,1]
#Get module color list
colorList = unique(moduleColors)
numRow = length(colorList)
colorSets <- data.frame(matrix(ncol = 2, nrow = numRow))
#Retrieve the percent of genes in each module
for(var in 1:length(colorList))
{
#Print the color of the module
#print(colorList[var])
#Add DDR data for the current module
numDDR <- which(names(datExprTol)[moduleColors==colorList[var]] %in% SETDDR)
colorSets[var,1] <- colorList[var]
colorSets[var,2] <- length(numDDR)
#colorSets[var,2] <- length(numDDR)/length(names(datExprInter)[moduleColors==colorList[var]])
#Print the number of DDR genes in the current module
#print(length(numDDR))
}
#Set column names
names(colorSets) = c("Color","Genes")
#Create stacked bar plot
jpeg("barPlotTol_numberDDR_signedNowick.jpg", width = 844, height = 596)
colorPlot <- ggplot(colorSets, aes(y=Genes, x=Color)) +
geom_bar(position="stack", stat="identity", fill="steelblue")
colorPlot + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
dev.off()
|
# Import helper function to download file
source("get_file.R")
file <- get_file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "./data", "data.zip", unzip = TRUE)
# Extract the file
unzip(file)
data_file_path <- file.path("./household_power_consumption.txt")
# Read the raw data into data
data <- read.table(data_file_path, header = TRUE, sep = ";", colClasses = c(rep("character",2), rep("numeric",7)), na.strings = "?")
# Use dplyr for data manipulation
library(dplyr)
# Use lubridate for date parsing
library(lubridate)
d <- tbl_df(data)
rm("data")
# Filter only the specified date range
# then process the date and time column into one new column called DateTime
# Select the sub_metering data along with DateTime
selected_data <- filter(d, Date == "2/2/2007" | Date == '1/2/2007') %>%
mutate(DateTime = dmy_hms(paste(Date, Time))) %>%
select(Sub_metering_1, Sub_metering_2, Sub_metering_3, DateTime)
# Plot the image and save it as plot3.png
png('./plot3.png')
with(selected_data, {
plot(DateTime, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(DateTime, Sub_metering_2, col = "red")
lines(DateTime, Sub_metering_3, col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
})
dev.off() | /plot3.R | no_license | kylase-learning/ExData_Plotting1 | R | false | false | 1,368 | r | # Import helper function to download file
source("get_file.R")
file <- get_file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "./data", "data.zip", unzip = TRUE)
# Extract the file
unzip(file)
data_file_path <- file.path("./household_power_consumption.txt")
# Read the raw data into data
data <- read.table(data_file_path, header = TRUE, sep = ";", colClasses = c(rep("character",2), rep("numeric",7)), na.strings = "?")
# Use dplyr for data manipulation
library(dplyr)
# Use lubridate for date parsing
library(lubridate)
d <- tbl_df(data)
rm("data")
# Filter only the specified date range
# then process the date and time column into one new column called DateTime
# Select the sub_metering data along with DateTime
selected_data <- filter(d, Date == "2/2/2007" | Date == '1/2/2007') %>%
mutate(DateTime = dmy_hms(paste(Date, Time))) %>%
select(Sub_metering_1, Sub_metering_2, Sub_metering_3, DateTime)
# Plot the image and save it as plot3.png
png('./plot3.png')
with(selected_data, {
plot(DateTime, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(DateTime, Sub_metering_2, col = "red")
lines(DateTime, Sub_metering_3, col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
})
dev.off() |
library(magrittr)
library(readr)
loadCovid = function(pth)
paste0('extdata/covid_data/', pth) %>%
system.file(package = "readtv", mustWork = TRUE) %>%
read_csv
covid_global = loadCovid('global/owid-covid-data.csv')
usethis::use_data(covid_global, overwrite = TRUE)
covid_usa = loadCovid('states/us-states.csv')
usethis::use_data(covid_usa, overwrite = TRUE)
| /data-raw/covid.R | permissive | JDMusc/READ-TV | R | false | false | 367 | r | library(magrittr)
library(readr)
loadCovid = function(pth)
paste0('extdata/covid_data/', pth) %>%
system.file(package = "readtv", mustWork = TRUE) %>%
read_csv
covid_global = loadCovid('global/owid-covid-data.csv')
usethis::use_data(covid_global, overwrite = TRUE)
covid_usa = loadCovid('states/us-states.csv')
usethis::use_data(covid_usa, overwrite = TRUE)
|
## This file is a hack to remove false flags in R CMD check when using foreach
## iterations.
utils::globalVariables(c("iter", "k"))
| /R/zzz.R | no_license | cran/monoClust | R | false | false | 136 | r | ## This file is a hack to remove false flags in R CMD check when using foreach
## iterations.
utils::globalVariables(c("iter", "k"))
|
/bit64/R/sortuse64.R | no_license | ingted/R-Examples | R | false | false | 20,478 | r | ||
#!/usr/bin/Rscript
library(RdbiPgSQL)
conn <- dbConnect(PgSQL(), host="localhost", dbname="ruby", user="dmg", password="patito32")
#res <- dbSendQuery(conn, "select date, avg(churn) from (
#select extract(year from datecomm) * 12 + extract(month from datecomm) as date, sumadd-sumrem as churn from commitsum natural join metadata
#) as rip group by date;");
#
res <- dbSendQuery(conn, "select extract(year from datecomm) * 12 + extract(month from datecomm) as date, sumadd-sumrem as churn from commitsum natural join metadata ;");
bydate <- dbGetResult(res)
| /softChange/pg/size.r | no_license | hackerlank/hacking | R | false | false | 570 | r | #!/usr/bin/Rscript
library(RdbiPgSQL)
conn <- dbConnect(PgSQL(), host="localhost", dbname="ruby", user="dmg", password="patito32")
#res <- dbSendQuery(conn, "select date, avg(churn) from (
#select extract(year from datecomm) * 12 + extract(month from datecomm) as date, sumadd-sumrem as churn from commitsum natural join metadata
#) as rip group by date;");
#
res <- dbSendQuery(conn, "select extract(year from datecomm) * 12 + extract(month from datecomm) as date, sumadd-sumrem as churn from commitsum natural join metadata ;");
bydate <- dbGetResult(res)
|
# Calculate daily respiration rates for roots, wood and foliage
# Calculate daily below ground root respiration rates
# Coarse root respiration rates
branch.resp = read.csv("raw_data/WTC_TEMP_CM_GX-RBRANCH_20140513-20140522_L1_v1.csv") # Rbranch: branch respiration (nmol CO2 g-1 s-1)
branch.resp$date = as.Date(branch.resp$date)
branch.resp = subset(branch.resp, date %in% as.Date("2014-05-13")) # Only consider the pre-girdling data
branch.resp$chamber_type = as.factor( ifelse(branch.resp$chamber %in% drought.chamb, "drought", "watered") )
branch.resp$Treatment <- as.factor(paste(branch.resp$T_treatment, branch.resp$chamber_type))
# # Test for any significant difference between the treatment groups
# boxplot(branch.resp$Rbranch ~ branch.resp$Treatment, xlab="Treatment", ylab=(expression("Branch wood respiration"~"(nmol CO2 "*g^"-1"*" "*s^"-1"*")")))
summary(aov(Rbranch ~ T_treatment * chamber_type, data = branch.resp)) # YES, there is significant difference only accross the temperature treatments
# summary(aov(Rbranch ~ Treatment, data = branch.resp)) # YES, there is significant difference accross the treatments
# t.test(branch.resp$Rbranch ~ branch.resp$T_treatment) # YES, there is significant difference accross temperatures
# t.test(branch.resp$Rbranch ~ branch.resp$chamber_type) # NO, there is no significant difference accross drought/watered treatments
############ So how to group the treatments????????
rd15.root <- summaryBy(Rbranch ~ T_treatment, data=branch.resp, FUN=c(mean))
names(rd15.root)[ncol(rd15.root)] = c("rd15.coarseroot")
rd15.root$rd15.coarseroot = rd15.root$rd15.coarseroot * (10^-9 * 12) * (3600 * 24) # unit conversion from nmolCO2 g-1 s-1 to gC gC-1 d-1
# rd15.root$rd15.coarseroot = rd15.root$rd15.coarseroot * (10^-9 * 12) * (3600 * 24) * (1/c1) # unit conversion from nmolCO2 g-1 s-1 to gC gC-1 d-1
# rd15.coarseroot$rd15.coarseroot_SE = rd15.coarseroot$rd15.coarseroot_SE * (10^-9 * 12) * (3600 * 24) * (1/c1) # unit conversion from nmolCO2 g-1 s-1 to gC gC-1 d-1
# Bole and big tap root respiration rates
bole.resp = read.csv("raw_data/WTC_TEMP_CM_WTCFLUX-STEM_20140528_L1_v1.csv") # Bole root respiration (nmol CO2 g-1 s-1)
bole.resp$chamber_type = as.factor( ifelse(bole.resp$chamber %in% drought.chamb, "drought", "watered") )
bole.resp$Treatment <- as.factor(paste(bole.resp$T_treatment, bole.resp$chamber_type))
# # Test for any significant difference between the treatment groups
# boxplot(bole.resp$R_stem_nmol ~ bole.resp$Treatment, xlab="Treatment", ylab=(expression("Bole wood respiration"~"(nmol CO2 "*g^"-1"*" "*s^"-1"*")")))
summary(aov(R_stem_nmol ~ T_treatment * chamber_type, data = bole.resp)) # NO, there is no significant difference accross the treatments
# summary(aov(R_stem_nmol ~ Treatment, data = bole.resp)) # NO, there is no significant difference accross the treatments
# t.test(bole.resp$R_stem_nmol ~ bole.resp$T_treatment) # NO, there is no significant difference accross tepmeratures
# t.test(bole.resp$R_stem_nmol ~ bole.resp$chamber_type) # NO, there is no significant difference accross drought/watered treatments
rd15.root$rd15.boleroot = mean(bole.resp$R_stem_nmol)
rd15.root$rd15.boleroot = rd15.root$rd15.boleroot * (10^-9 * 12) * (3600 * 24) # unit conversion from nmolCO2 g-1 s-1 to gC gC-1 d-1
# rd15.root$rd15.boleroot = rd15.root$rd15.boleroot * (10^-9 * 12) * (3600 * 24) * (1/c1) # unit conversion from nmolCO2 g-1 s-1 to gC gC-1 d-1
# Fine root respiration rates (Constant)
# Fine root respiration rate = 10 nmolCO2 g-1 s-1 (Ref: Drake et al. 2017: GREAT exp data; Mark's Email)
rd25.fineroot = 10 * (10^-9 * 12) * (3600 * 24) # unit conversion from nmolCO2 g-1 s-1 to gC gC-1 d-1
# rd25.fineroot = 10 * (10^-9 * 12) * (3600 * 24) * (1/c1) # unit conversion from nmolCO2 g-1 s-1 to gC gC-1 d-1
rd15.root$rd15.fineroot = rd25.fineroot * q10^((15-25)/10)
# Intermediate root respiration rates
rd15.root$rd15.intermediateroot = exp ((log(rd15.root$rd15.coarseroot) + log(rd15.root$rd15.fineroot))/2 ) # unit = gC gC-1 d-1
#----------------------------------------------------------------------------------------------------------------
# import site weather data, take only soil temperatures at 10 cm depth, format date stuff
files <- list.files(path = "raw_data/WTC_TEMP_CM_WTCMET", pattern = ".csv", full.names = TRUE)
temp <- lapply(files, fread, sep=",")
met.data <- rbindlist( temp )
met.data <- met.data[ , c("chamber","DateTime","Tair_al","SoilTemp_Avg.1.","SoilTemp_Avg.2.")]
met.data$SoilTemp <- rowMeans(met.data[,c("SoilTemp_Avg.1.","SoilTemp_Avg.2.")], na.rm=TRUE)
met.data$Date <- as.Date(met.data$DateTime)
# need to turn the datetime into hms
met.data$DateTime <- ymd_hms(met.data$DateTime)
met.data$time <- format(met.data$DateTime, format='%H:%M:%S')
# subset by Date range of experiment
met.data <- subset(met.data[, c("chamber","Date","time","Tair_al","SoilTemp")], Date >= "2013-09-17" & Date <= "2014-05-26")
met.data$chamber = as.factor(met.data$chamber)
met.data = merge(met.data, unique(height.dia[,c("chamber","T_treatment")]), by="chamber")
# Remove the data with missing air and soil temperatures from met data
# met.data = met.data[complete.cases(met.data$SoilTemp),]
# met.data = met.data[complete.cases(met.data$Tair_al),]
# met.data.na1 = met.data[is.na(met.data$SoilTemp),] # Check any NA values for soil temperature
# met.data.na2 = met.data[is.na(met.data$Tair_al),] # Check any NA values for air temperature
# met.data[Date == as.Date("2013-10-06")]
# need to calculate Rdark through time using rdarkq10 equation by treatment
met.data <- merge(met.data, rd15.root, by=c("T_treatment"))
met.data <- merge(met.data, Tair.final[,c("Date","T_treatment","rd25.foliage")], by=c("Date","T_treatment"))
met.data[,c("Rd.fineroot","Rd.intermediateroot","Rd.coarseroot","Rd.boleroot")] =
with(met.data, met.data[,c("rd15.fineroot","rd15.intermediateroot","rd15.coarseroot","rd15.boleroot")] *
q10^((SoilTemp-15)/10)) # unit (gC per gC root per day)
# calculate daily stem and branch respiration rates
met.data[,c("Rd.stem","Rd.branch")] =
with(met.data, met.data[,c("rd15.boleroot","rd15.coarseroot")] * q10^((Tair_al-15)/10)) # unit (gC per gC wood per day)
# calculate foliage respiration rates in 15-mins interval
met.data[,"Rd.foliage"] = with(met.data, met.data[,"rd25.foliage"] * q10^((Tair_al-25)/10)) # unit (gC per gC foliage per day)
# Calculate daily mean respiration rates for all tree components by summing all 15-mins data for each day
Rd <- summaryBy(Rd.foliage+Rd.stem+Rd.branch+Rd.fineroot+Rd.intermediateroot+Rd.coarseroot+Rd.boleroot ~ Date+T_treatment, data=met.data, FUN=mean, na.rm=TRUE) # Sum of all same day Rd
# colSums(is.na(Rd)) # Check any NA values for Rd
# Rd.na = Rd[is.na(Rd$Rd.foliage.mean),]
# Fill missing values due to atmospheric data gaps
Rd.sub1 = subset(Rd, T_treatment %in% as.factor("ambient"))
for (i in 3:ncol(Rd.sub1)) {
Rd.sub1[,i] = na.approx(Rd.sub1[,..i])
}
Rd.sub2 = subset(Rd, T_treatment %in% as.factor("elevated"))
for (i in 3:ncol(Rd.sub2)) {
Rd.sub2[,i] = na.approx(Rd.sub2[,..i])
}
Rd = rbind(Rd.sub1,Rd.sub2)
# names(Rd)[3:ncol(Rd)] = c("Rd.foliage","Rd.stem","Rd.branch","Rd.fineroot","Rd.intermediateroot","Rd.coarseroot","Rd.boleroot")
# colSums(is.na(Rd.fill)) # Check any NA values for Rd
# Merge respiration rates with daily woodmass, rootmass partitioning, GPP, Ra, LA, mass pool data
data.all = merge(data.all, Rd, by=c("Date","T_treatment"), all=TRUE)
data.all$Treatment <- as.factor(paste(data.all$T_treatment, data.all$chamber_type))
# #----------------------------------------------------------------------------------------------------------------
# # write csv file with daily respiration rates for roots, wood and foliage
# write.csv(Rd, "processed_data/Rd.csv", row.names=FALSE) # unit: gC per gC plant per day
#----------------------------------------------------------------------------------------------------------------
# Plot daily respiration rates for roots, wood and foliage
Rd.melt <- melt(Rd, id.vars = c("Date","T_treatment"))
i = 0
font.size = 10
plot = list()
meas = as.factor(c("Rd.foliage.mean","Rd.stem.mean","Rd.branch.mean","Rd.fineroot.mean","Rd.intermediateroot.mean","Rd.coarseroot.mean","Rd.boleroot.mean"))
# title = as.character(c("A","B","C","D"))
pd <- position_dodge(0) # move the overlapped errorbars horizontally
for (p in 1:length(meas)) {
Rd.melt.sub = subset(Rd.melt,variable %in% meas[p])
i = i + 1
plot[[i]] = ggplot(Rd.melt.sub, aes(x=Date, y=value, group = T_treatment, colour=T_treatment)) +
geom_point(position=pd) +
geom_line(position=pd,data = Rd.melt.sub, aes(x = Date, y = value, group = T_treatment, colour=T_treatment)) +
ylab(expression(R[foliage]~"(g C "*g^"-1"*" C "*d^"-1"*")")) + xlab("") +
scale_x_date(date_labels="%b %y",date_breaks ="1 month",limits = c(min(Rd$Date)-2, max(Rd$Date)+2)) +
labs(colour="Temperature") +
scale_color_manual(labels = c("ambient", "elevated"), values = c("blue", "red")) +
theme_bw() +
theme(legend.title = element_text(colour="black", size=font.size)) +
theme(legend.text = element_text(colour="black", size=font.size)) +
theme(legend.position = c(0.9,0.75), legend.box = "horizontal") + theme(legend.key.height=unit(0.9,"line")) +
theme(legend.key = element_blank()) +
theme(text = element_text(size=font.size)) +
theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_text(size = font.size, vjust=0.3)) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
if (p==2) {
plot[[i]] = plot[[i]] + ylab(expression(R[bolewood]~"(g C "*g^"-1"*" C "*d^"-1"*")"))
# plot[[i]] = plot[[i]] + theme(plot.margin=unit(c(0.4, 0.4, 0.4, 0.75), units="line"))
}
if (p==3) {
plot[[i]] = plot[[i]] + ylab(expression(R[branchwood]~"(g C "*g^"-1"*" C "*d^"-1"*")"))
# plot[[i]] = plot[[i]] + theme(plot.margin=unit(c(0.4, 0.4, 0.4, 0.75), units="line"))
}
if (p==4) {
plot[[i]] = plot[[i]] + ylab(expression(R[fineroot]~"(g C "*g^"-1"*" C "*d^"-1"*")"))
# plot[[i]] = plot[[i]] + theme(plot.margin=unit(c(0.4, 0.4, 0.4, 0.75), units="line"))
}
if (p==5) {
plot[[i]] = plot[[i]] + ylab(expression(R[intermediateroot]~"(g C "*g^"-1"*" C "*d^"-1"*")"))
# plot[[i]] = plot[[i]] + theme(plot.margin=unit(c(0.4, 0.4, 0.4, 0.75), units="line"))
}
if (p==6) {
plot[[i]] = plot[[i]] + ylab(expression(R[coarseroot]~"(g C "*g^"-1"*" C "*d^"-1"*")"))
}
if (p==7) {
plot[[i]] = plot[[i]] + ylab(expression(R[boleroot]~"(g C "*g^"-1"*" C "*d^"-1"*")"))
}
}
png("output/3.Rd.png", units="px", width=3000, height=1500, res=130)
# do.call(grid.arrange, plot)
ncols = 1
do.call("grid.arrange", c(plot, ncol=ncols))
dev.off()
do.call("grid.arrange", c(plot, ncol=ncols))
| /R/daily_R_rates.R | no_license | DataFusion18/DA_WTC3 | R | false | false | 10,845 | r | # Calculate daily respiration rates for roots, wood and foliage
# Calculate daily below ground root respiration rates
# Coarse root respiration rates
branch.resp = read.csv("raw_data/WTC_TEMP_CM_GX-RBRANCH_20140513-20140522_L1_v1.csv") # Rbranch: branch respiration (nmol CO2 g-1 s-1)
branch.resp$date = as.Date(branch.resp$date)
branch.resp = subset(branch.resp, date %in% as.Date("2014-05-13")) # Only consider the pre-girdling data
branch.resp$chamber_type = as.factor( ifelse(branch.resp$chamber %in% drought.chamb, "drought", "watered") )
branch.resp$Treatment <- as.factor(paste(branch.resp$T_treatment, branch.resp$chamber_type))
# # Test for any significant difference between the treatment groups
# boxplot(branch.resp$Rbranch ~ branch.resp$Treatment, xlab="Treatment", ylab=(expression("Branch wood respiration"~"(nmol CO2 "*g^"-1"*" "*s^"-1"*")")))
summary(aov(Rbranch ~ T_treatment * chamber_type, data = branch.resp)) # YES, there is significant difference only accross the temperature treatments
# summary(aov(Rbranch ~ Treatment, data = branch.resp)) # YES, there is significant difference accross the treatments
# t.test(branch.resp$Rbranch ~ branch.resp$T_treatment) # YES, there is significant difference accross temperatures
# t.test(branch.resp$Rbranch ~ branch.resp$chamber_type) # NO, there is no significant difference accross drought/watered treatments
############ So how to group the treatments????????
rd15.root <- summaryBy(Rbranch ~ T_treatment, data=branch.resp, FUN=c(mean))
names(rd15.root)[ncol(rd15.root)] = c("rd15.coarseroot")
rd15.root$rd15.coarseroot = rd15.root$rd15.coarseroot * (10^-9 * 12) * (3600 * 24) # unit conversion from nmolCO2 g-1 s-1 to gC gC-1 d-1
# rd15.root$rd15.coarseroot = rd15.root$rd15.coarseroot * (10^-9 * 12) * (3600 * 24) * (1/c1) # unit conversion from nmolCO2 g-1 s-1 to gC gC-1 d-1
# rd15.coarseroot$rd15.coarseroot_SE = rd15.coarseroot$rd15.coarseroot_SE * (10^-9 * 12) * (3600 * 24) * (1/c1) # unit conversion from nmolCO2 g-1 s-1 to gC gC-1 d-1
# Bole and big tap root respiration rates
bole.resp = read.csv("raw_data/WTC_TEMP_CM_WTCFLUX-STEM_20140528_L1_v1.csv") # Bole root respiration (nmol CO2 g-1 s-1)
bole.resp$chamber_type = as.factor( ifelse(bole.resp$chamber %in% drought.chamb, "drought", "watered") )
bole.resp$Treatment <- as.factor(paste(bole.resp$T_treatment, bole.resp$chamber_type))
# # Test for any significant difference between the treatment groups
# boxplot(bole.resp$R_stem_nmol ~ bole.resp$Treatment, xlab="Treatment", ylab=(expression("Bole wood respiration"~"(nmol CO2 "*g^"-1"*" "*s^"-1"*")")))
summary(aov(R_stem_nmol ~ T_treatment * chamber_type, data = bole.resp)) # NO, there is no significant difference accross the treatments
# summary(aov(R_stem_nmol ~ Treatment, data = bole.resp)) # NO, there is no significant difference accross the treatments
# t.test(bole.resp$R_stem_nmol ~ bole.resp$T_treatment) # NO, there is no significant difference accross tepmeratures
# t.test(bole.resp$R_stem_nmol ~ bole.resp$chamber_type) # NO, there is no significant difference accross drought/watered treatments
rd15.root$rd15.boleroot = mean(bole.resp$R_stem_nmol)
rd15.root$rd15.boleroot = rd15.root$rd15.boleroot * (10^-9 * 12) * (3600 * 24) # unit conversion from nmolCO2 g-1 s-1 to gC gC-1 d-1
# rd15.root$rd15.boleroot = rd15.root$rd15.boleroot * (10^-9 * 12) * (3600 * 24) * (1/c1) # unit conversion from nmolCO2 g-1 s-1 to gC gC-1 d-1
# Fine root respiration rates (Constant)
# Fine root respiration rate = 10 nmolCO2 g-1 s-1 (Ref: Drake et al. 2017: GREAT exp data; Mark's Email)
rd25.fineroot = 10 * (10^-9 * 12) * (3600 * 24) # unit conversion from nmolCO2 g-1 s-1 to gC gC-1 d-1
# rd25.fineroot = 10 * (10^-9 * 12) * (3600 * 24) * (1/c1) # unit conversion from nmolCO2 g-1 s-1 to gC gC-1 d-1
rd15.root$rd15.fineroot = rd25.fineroot * q10^((15-25)/10)
# Intermediate root respiration rates
rd15.root$rd15.intermediateroot = exp ((log(rd15.root$rd15.coarseroot) + log(rd15.root$rd15.fineroot))/2 ) # unit = gC gC-1 d-1
#----------------------------------------------------------------------------------------------------------------
# import site weather data, take only soil temperatures at 10 cm depth, format date stuff
files <- list.files(path = "raw_data/WTC_TEMP_CM_WTCMET", pattern = ".csv", full.names = TRUE)
temp <- lapply(files, fread, sep=",")
met.data <- rbindlist( temp )
met.data <- met.data[ , c("chamber","DateTime","Tair_al","SoilTemp_Avg.1.","SoilTemp_Avg.2.")]
met.data$SoilTemp <- rowMeans(met.data[,c("SoilTemp_Avg.1.","SoilTemp_Avg.2.")], na.rm=TRUE)
met.data$Date <- as.Date(met.data$DateTime)
# need to turn the datetime into hms
met.data$DateTime <- ymd_hms(met.data$DateTime)
met.data$time <- format(met.data$DateTime, format='%H:%M:%S')
# subset by Date range of experiment
met.data <- subset(met.data[, c("chamber","Date","time","Tair_al","SoilTemp")], Date >= "2013-09-17" & Date <= "2014-05-26")
met.data$chamber = as.factor(met.data$chamber)
met.data = merge(met.data, unique(height.dia[,c("chamber","T_treatment")]), by="chamber")
# Remove the data with missing air and soil temperatures from met data
# met.data = met.data[complete.cases(met.data$SoilTemp),]
# met.data = met.data[complete.cases(met.data$Tair_al),]
# met.data.na1 = met.data[is.na(met.data$SoilTemp),] # Check any NA values for soil temperature
# met.data.na2 = met.data[is.na(met.data$Tair_al),] # Check any NA values for air temperature
# met.data[Date == as.Date("2013-10-06")]
# need to calculate Rdark through time using rdarkq10 equation by treatment
met.data <- merge(met.data, rd15.root, by=c("T_treatment"))
met.data <- merge(met.data, Tair.final[,c("Date","T_treatment","rd25.foliage")], by=c("Date","T_treatment"))
met.data[,c("Rd.fineroot","Rd.intermediateroot","Rd.coarseroot","Rd.boleroot")] =
with(met.data, met.data[,c("rd15.fineroot","rd15.intermediateroot","rd15.coarseroot","rd15.boleroot")] *
q10^((SoilTemp-15)/10)) # unit (gC per gC root per day)
# calculate daily stem and branch respiration rates
met.data[,c("Rd.stem","Rd.branch")] =
with(met.data, met.data[,c("rd15.boleroot","rd15.coarseroot")] * q10^((Tair_al-15)/10)) # unit (gC per gC wood per day)
# calculate foliage respiration rates in 15-mins interval
met.data[,"Rd.foliage"] = with(met.data, met.data[,"rd25.foliage"] * q10^((Tair_al-25)/10)) # unit (gC per gC foliage per day)
# Calculate daily mean respiration rates for all tree components by summing all 15-mins data for each day
Rd <- summaryBy(Rd.foliage+Rd.stem+Rd.branch+Rd.fineroot+Rd.intermediateroot+Rd.coarseroot+Rd.boleroot ~ Date+T_treatment, data=met.data, FUN=mean, na.rm=TRUE) # Sum of all same day Rd
# colSums(is.na(Rd)) # Check any NA values for Rd
# Rd.na = Rd[is.na(Rd$Rd.foliage.mean),]
# Fill missing values due to atmospheric data gaps
Rd.sub1 = subset(Rd, T_treatment %in% as.factor("ambient"))
for (i in 3:ncol(Rd.sub1)) {
Rd.sub1[,i] = na.approx(Rd.sub1[,..i])
}
Rd.sub2 = subset(Rd, T_treatment %in% as.factor("elevated"))
for (i in 3:ncol(Rd.sub2)) {
Rd.sub2[,i] = na.approx(Rd.sub2[,..i])
}
Rd = rbind(Rd.sub1,Rd.sub2)
# names(Rd)[3:ncol(Rd)] = c("Rd.foliage","Rd.stem","Rd.branch","Rd.fineroot","Rd.intermediateroot","Rd.coarseroot","Rd.boleroot")
# colSums(is.na(Rd.fill)) # Check any NA values for Rd
# Merge respiration rates with daily woodmass, rootmass partitioning, GPP, Ra, LA, mass pool data
data.all = merge(data.all, Rd, by=c("Date","T_treatment"), all=TRUE)
data.all$Treatment <- as.factor(paste(data.all$T_treatment, data.all$chamber_type))
# #----------------------------------------------------------------------------------------------------------------
# # write csv file with daily respiration rates for roots, wood and foliage
# write.csv(Rd, "processed_data/Rd.csv", row.names=FALSE) # unit: gC per gC plant per day
#----------------------------------------------------------------------------------------------------------------
# Plot daily respiration rates for roots, wood and foliage
Rd.melt <- melt(Rd, id.vars = c("Date","T_treatment"))
i = 0
font.size = 10
plot = list()
meas = as.factor(c("Rd.foliage.mean","Rd.stem.mean","Rd.branch.mean","Rd.fineroot.mean","Rd.intermediateroot.mean","Rd.coarseroot.mean","Rd.boleroot.mean"))
# title = as.character(c("A","B","C","D"))
pd <- position_dodge(0) # move the overlapped errorbars horizontally
for (p in 1:length(meas)) {
Rd.melt.sub = subset(Rd.melt,variable %in% meas[p])
i = i + 1
plot[[i]] = ggplot(Rd.melt.sub, aes(x=Date, y=value, group = T_treatment, colour=T_treatment)) +
geom_point(position=pd) +
geom_line(position=pd,data = Rd.melt.sub, aes(x = Date, y = value, group = T_treatment, colour=T_treatment)) +
ylab(expression(R[foliage]~"(g C "*g^"-1"*" C "*d^"-1"*")")) + xlab("") +
scale_x_date(date_labels="%b %y",date_breaks ="1 month",limits = c(min(Rd$Date)-2, max(Rd$Date)+2)) +
labs(colour="Temperature") +
scale_color_manual(labels = c("ambient", "elevated"), values = c("blue", "red")) +
theme_bw() +
theme(legend.title = element_text(colour="black", size=font.size)) +
theme(legend.text = element_text(colour="black", size=font.size)) +
theme(legend.position = c(0.9,0.75), legend.box = "horizontal") + theme(legend.key.height=unit(0.9,"line")) +
theme(legend.key = element_blank()) +
theme(text = element_text(size=font.size)) +
theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_text(size = font.size, vjust=0.3)) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
if (p==2) {
plot[[i]] = plot[[i]] + ylab(expression(R[bolewood]~"(g C "*g^"-1"*" C "*d^"-1"*")"))
# plot[[i]] = plot[[i]] + theme(plot.margin=unit(c(0.4, 0.4, 0.4, 0.75), units="line"))
}
if (p==3) {
plot[[i]] = plot[[i]] + ylab(expression(R[branchwood]~"(g C "*g^"-1"*" C "*d^"-1"*")"))
# plot[[i]] = plot[[i]] + theme(plot.margin=unit(c(0.4, 0.4, 0.4, 0.75), units="line"))
}
if (p==4) {
plot[[i]] = plot[[i]] + ylab(expression(R[fineroot]~"(g C "*g^"-1"*" C "*d^"-1"*")"))
# plot[[i]] = plot[[i]] + theme(plot.margin=unit(c(0.4, 0.4, 0.4, 0.75), units="line"))
}
if (p==5) {
plot[[i]] = plot[[i]] + ylab(expression(R[intermediateroot]~"(g C "*g^"-1"*" C "*d^"-1"*")"))
# plot[[i]] = plot[[i]] + theme(plot.margin=unit(c(0.4, 0.4, 0.4, 0.75), units="line"))
}
if (p==6) {
plot[[i]] = plot[[i]] + ylab(expression(R[coarseroot]~"(g C "*g^"-1"*" C "*d^"-1"*")"))
}
if (p==7) {
plot[[i]] = plot[[i]] + ylab(expression(R[boleroot]~"(g C "*g^"-1"*" C "*d^"-1"*")"))
}
}
png("output/3.Rd.png", units="px", width=3000, height=1500, res=130)
# do.call(grid.arrange, plot)
ncols = 1
do.call("grid.arrange", c(plot, ncol=ncols))
dev.off()
do.call("grid.arrange", c(plot, ncol=ncols))
|
testlist <- list(doy = -1.72131968218895e+83, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.33179089256876e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) | /meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615828077-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 735 | r | testlist <- list(doy = -1.72131968218895e+83, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.33179089256876e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) |
# plot metro level data
# =============================================================================
# load data
metro_data <- read_csv("data/MO_HEALTH_Covid_Tracking/data/metro_all/metro_full.csv")
# =============================================================================
# define colors
cols <- c("Cape Girardeau" = values$pal[6], "Columbia" = values$pal[3],
"Jefferson City" = values$pal[4], "Joplin" = values$pal[7],
"Kansas City" = values$pal[2], "Springfield" = values$pal[5],
"St. Joseph" = values$pal[8], "St. Louis" = values$pal[1])
# =============================================================================
# subset data
## create end points
metro_points <- filter(metro_data, report_date == values$date)
# =============================================================================
# plot confirmed rate
## subset data
metro_subset <- filter(metro_data, report_date >= values$plot_date)
## define top_val
top_val <- round_any(x = max(metro_subset$case_rate), accuracy = 20, f = ceiling)
## create factors
metro_subset <- mutate(metro_subset, factor_var = fct_reorder2(short_name, report_date, case_rate))
metro_points <- mutate(metro_points, factor_var = fct_reorder2(short_name, report_date, case_rate))
## create plot
p <- cumulative_rate(metro_subset,
point_data = metro_points,
type = "metro",
plot_values = values,
highlight = unique(metro_subset$geoid),
y_upper_limit = top_val,
pal = cols,
title = "Reported COVID-19 Cases by Metro Area",
caption = values$caption_text_census)
## save plot
save_plots(filename = "results/high_res/metro/b_case_rate.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/metro/b_case_rate.png", plot = p, preset = "lg", dpi = 72)
#===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===#
# create days from 10th confirmed infection data
## subset data
metro_data %>%
calculate_days(group_var = "geoid", stat_var = "cases", val = 5) %>%
select(day, report_date, short_name, cases) %>%
arrange(short_name, day) -> metro_subset
## define top_val
top_val <- round_any(x = max(metro_subset$day), accuracy = 5, f = ceiling)
## identify max day
metro_subset %>%
group_by(short_name) %>%
summarise(day = max(day), .groups = "drop_last") %>%
left_join(metro_points, ., by = "short_name") -> metro_day_points
## create factors
metro_subset <- mutate(metro_subset, factor_var = fct_reorder2(short_name, day, cases))
metro_day_points <- mutate(metro_day_points, factor_var = fct_reorder2(short_name, day, cases))
## create plot
p <- ggplot(data = metro_subset) +
geom_line(mapping = aes(x = day, y = cases, color = factor_var), size = 2) +
geom_point(metro_day_points, mapping = aes(x = day, y = cases, color = factor_var),
size = 4, show.legend = FALSE) +
scale_colour_manual(values = cols, name = "Metro Area") +
scale_y_log10(
limits = c(5, 1000000),
breaks = c(5,10,30,100,300,1000,3000,10000,30000,100000,300000,1000000),
labels = comma_format(accuracy = 1)
) +
scale_x_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = values$date_breaks_log)) +
labs(
title = "Pace of COVID-19 Cases by Metro Area",
subtitle = paste0("Current as of ", as.character(values$date)),
caption = values$caption_text,
x = "Days Since Fifth Case Reported",
y = "Count of Reported Cases (Log)"
) +
sequoia_theme(base_size = 22, background = "white")
## save plots
# save_plots(filename = "results/high_res/metro/c_case_log.png", plot = p, preset = "lg")
# save_plots(filename = "results/low_res/metro/c_case_log.png", plot = p, preset = "lg", dpi = 72)
#===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===#
# per-capita 7-day average ####
## subset data
metro_subset <- filter(metro_data, report_date >= values$plot_date) %>%
filter(report_date < as.Date("2021-01-11") | report_date >= as.Date("2021-01-18")) %>%
filter(report_date < as.Date("2021-03-08") | report_date >= as.Date("2021-03-15")) %>%
filter(report_date < as.Date("2021-04-17") | report_date >= as.Date("2021-04-24")) %>%
filter(report_date < as.Date("2021-11-17") | report_date >= as.Date("2021-12-06")) %>%
filter(report_date < as.Date("2021-12-24") | report_date >= as.Date("2021-12-27"))
## address negative values
metro_subset <- mutate(metro_subset, case_avg_rate = ifelse(case_avg_rate < 0, 0, case_avg_rate))
## modify Cape Girardeau
# metro_subset %>%
# mutate(case_avg_rate = ifelse(short_name == "Cape Girardeau" &
# (report_date == "2020-11-20" | report_date == "2020-11-22"), 160, case_avg_rate),
# case_avg_rate = ifelse(short_name == "Cape Girardeau" & report_date == "2020-11-21", NA, case_avg_rate)
# ) -> metro_subset
## define top_val
top_val <- round_any(x = max(metro_subset$case_avg_rate, na.rm = TRUE), accuracy = 50, f = ceiling)
## create factors
metro_subset <- mutate(metro_subset, factor_var = fct_reorder2(short_name, report_date, case_avg_rate))
## create plot
p <- facet_rate(metro_subset,
type = "metro",
pal = cols,
x_breaks = values$date_breaks_facet,
y_breaks = 50,
y_upper_limit = top_val,
highlight = unique(metro_subset$geoid),
plot_date = values$plot_date,
date = values$date,
title = "Pace of New COVID-19 Cases by Metro Area",
caption = values$caption_text_census)
# paste0(values$caption_text_census,"\nValues above 160 for Cape Girardeau truncated to increase readability")
# values$caption_text_census
## save plot
save_plots(filename = "results/high_res/metro/e_new_case.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/metro/e_new_case.png", plot = p, preset = "lg", dpi = 72)
#===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===#
# per-capita 7-day average ####
## subset data
metro_subset <- filter(metro_data, report_date >= values$date-20)
## address negative values
metro_subset <- mutate(metro_subset, case_avg_rate = ifelse(case_avg_rate < 0, 0, case_avg_rate))
## define top_val
top_val <- round_any(x = max(metro_subset$case_avg_rate, na.rm = TRUE), accuracy = 10, f = ceiling)
## create factors
metro_subset <- mutate(metro_subset, factor_var = fct_reorder2(short_name, report_date, case_avg_rate))
## create plot
p <- facet_rate(metro_subset,
type = "metro",
pal = cols,
x_breaks = values$date_breaks_facet,
y_breaks = 10,
y_upper_limit = top_val,
highlight = unique(metro_subset$geoid),
plot_date = values$plot_date,
date = values$date,
title = "Pace of New COVID-19 Cases by Metro Area",
caption = values$caption_text_census,
last3 = TRUE)
# values$caption_text_census
## save plot
# save_plots(filename = "results/high_res/metro/e_new_case_last21.png", plot = p, preset = "lg")
# save_plots(filename = "results/low_res/metro/e_new_case_last21.png", plot = p, preset = "lg", dpi = 72)
#===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===#
# create days from first day where average confirmed infections were at least 5
## subset data
metro_data %>%
calculate_days(group_var = "geoid", stat_var = "case_avg", val = 5) %>%
select(day, report_date, short_name, case_avg) %>%
arrange(short_name, day) %>%
mutate(case_avg = ifelse(case_avg < .1, .1, case_avg)) -> metro_subset
# define top_val
top_val <- round_any(x = max(metro_subset$day), accuracy = 5, f = ceiling)
## identify max day
metro_subset %>%
group_by(short_name) %>%
summarise(day = max(day), .groups = "drop_last") %>%
left_join(metro_points, ., by = "short_name") %>%
filter(short_name %in% metro_subset$short_name) %>%
mutate(case_avg = ifelse(case_avg < .1, .1, case_avg)) -> metro_day_points
## create factors
metro_subset <- mutate(metro_subset, factor_var = fct_reorder2(short_name, day, case_avg))
metro_day_points <- mutate(metro_day_points, factor_var = fct_reorder2(short_name, day, case_avg))
## create plot
p <- ggplot(data = metro_subset) +
geom_line(mapping = aes(x = day, y = case_avg, color = factor_var), size = 2) +
geom_point(metro_day_points, mapping = aes(x = day, y = case_avg, color = factor_var),
size = 4, show.legend = FALSE) +
scale_colour_manual(values = cols, name = "Metro Area") +
scale_y_log10(limits = c(.1, 3000), breaks = c(.1, .3, 1, 3, 10, 30, 100, 300, 1000, 3000),
labels = comma_format(accuracy = .2)) +
scale_x_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = values$date_breaks_log)) +
labs(
title = "Pace of New COVID-19 Cases by Metro Area",
subtitle = paste0("Current as of ", as.character(values$date)),
caption = values$caption_text,
x = "Days Since Average of Five Cases Reached",
y = "7-day Average of Reported Cases (Log)"
) +
sequoia_theme(base_size = 22, background = "white")
## save plots
# save_plots(filename = "results/high_res/metro/f_new_case_log.png", preset = "lg")
# save_plots(filename = "results/low_res/metro/f_new_case_log.png", preset = "lg", dpi = 72)
#===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===#
# plot mortality rate
## subset data
metro_subset <- filter(metro_data, report_date >= values$plot_date)
## define top_val
top_val <- round_any(x = max(metro_subset$mortality_rate), accuracy = .25, f = ceiling)
## create factors
metro_subset <- mutate(metro_subset, factor_var = fct_reorder2(short_name, report_date, mortality_rate))
metro_points <- mutate(metro_points, factor_var = fct_reorder2(short_name, report_date, mortality_rate))
## create plot
p <- ggplot() +
geom_line(metro_subset, mapping = aes(x = report_date, y = mortality_rate, color = factor_var), size = 2) +
geom_point(metro_points, mapping = aes(x = report_date, y = mortality_rate, color = factor_var),
size = 4, show.legend = FALSE) +
scale_colour_manual(values = cols, name = "Metro Area") +
scale_x_date(date_breaks = values$date_breaks, date_labels = "%b") +
scale_y_continuous(limits = c(0,top_val), breaks = seq(0, top_val, by = .25)) +
labs(
title = "Reported COVID-19 Mortality by Metro Area",
subtitle = paste0(as.character(values$plot_date), " through ", as.character(values$date)),
x = "Date",
y = "Mortality Rate per 1,000",
caption = values$caption_text_census
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = values$x_angle))
## save plot
save_plots(filename = "results/high_res/metro/h_mortality_rate.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/metro/h_mortality_rate.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# plot case fatality rate
## re-subset data
metro_subset <- filter(metro_data, report_date >= values$plot_date)
## create factors
metro_subset <- mutate(metro_subset, factor_var = fct_reorder2(short_name, report_date, case_fatality_rate))
metro_points <- mutate(metro_points, factor_var = fct_reorder2(short_name, report_date, case_fatality_rate))
## create plot
p <- ggplot() +
geom_line(metro_subset, mapping = aes(x = report_date, y = case_fatality_rate, color = factor_var), size = 2) +
geom_point(metro_points, mapping = aes(x = report_date, y = case_fatality_rate, color = factor_var),
size = 4, show.legend = FALSE) +
geom_vline(xintercept = as.Date("2021-03-08"), lwd = .8) +
scale_colour_manual(values = cols, name = "Metro Area") +
scale_x_date(date_breaks = values$date_breaks, date_labels = "%b") +
scale_y_continuous(limits = c(0,12), breaks = seq(0, 12, by = 1)) +
labs(
title = "COVID-19 Case Fatality by Metro Area",
subtitle = paste0(as.character(values$plot_date), " through ", as.character(values$date)),
x = "Date",
y = "Case Fatality (%)",
caption = paste0(values$caption_text,"\nVertical line represents addition of antigen test data for most Missouri counties on 2021-03-08")
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = values$x_angle))
## save plot
save_plots(filename = "results/high_res/metro/m_case_fatality_rate.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/metro/m_case_fatality_rate.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# clean-up
rm(metro_data, metro_subset, metro_points, metro_day_points)
rm(top_val, p)
| /source/workflow/05_metro_plots.R | permissive | slu-openGIS/covid_daily_viz | R | false | false | 12,990 | r | # plot metro level data
# =============================================================================
# load data
metro_data <- read_csv("data/MO_HEALTH_Covid_Tracking/data/metro_all/metro_full.csv")
# =============================================================================
# define colors
cols <- c("Cape Girardeau" = values$pal[6], "Columbia" = values$pal[3],
"Jefferson City" = values$pal[4], "Joplin" = values$pal[7],
"Kansas City" = values$pal[2], "Springfield" = values$pal[5],
"St. Joseph" = values$pal[8], "St. Louis" = values$pal[1])
# =============================================================================
# subset data
## create end points
metro_points <- filter(metro_data, report_date == values$date)
# =============================================================================
# plot confirmed rate
## subset data
metro_subset <- filter(metro_data, report_date >= values$plot_date)
## define top_val
top_val <- round_any(x = max(metro_subset$case_rate), accuracy = 20, f = ceiling)
## create factors
metro_subset <- mutate(metro_subset, factor_var = fct_reorder2(short_name, report_date, case_rate))
metro_points <- mutate(metro_points, factor_var = fct_reorder2(short_name, report_date, case_rate))
## create plot
p <- cumulative_rate(metro_subset,
point_data = metro_points,
type = "metro",
plot_values = values,
highlight = unique(metro_subset$geoid),
y_upper_limit = top_val,
pal = cols,
title = "Reported COVID-19 Cases by Metro Area",
caption = values$caption_text_census)
## save plot
save_plots(filename = "results/high_res/metro/b_case_rate.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/metro/b_case_rate.png", plot = p, preset = "lg", dpi = 72)
#===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===#
# create days from 10th confirmed infection data
## subset data
metro_data %>%
calculate_days(group_var = "geoid", stat_var = "cases", val = 5) %>%
select(day, report_date, short_name, cases) %>%
arrange(short_name, day) -> metro_subset
## define top_val
top_val <- round_any(x = max(metro_subset$day), accuracy = 5, f = ceiling)
## identify max day
metro_subset %>%
group_by(short_name) %>%
summarise(day = max(day), .groups = "drop_last") %>%
left_join(metro_points, ., by = "short_name") -> metro_day_points
## create factors
metro_subset <- mutate(metro_subset, factor_var = fct_reorder2(short_name, day, cases))
metro_day_points <- mutate(metro_day_points, factor_var = fct_reorder2(short_name, day, cases))
## create plot
p <- ggplot(data = metro_subset) +
geom_line(mapping = aes(x = day, y = cases, color = factor_var), size = 2) +
geom_point(metro_day_points, mapping = aes(x = day, y = cases, color = factor_var),
size = 4, show.legend = FALSE) +
scale_colour_manual(values = cols, name = "Metro Area") +
scale_y_log10(
limits = c(5, 1000000),
breaks = c(5,10,30,100,300,1000,3000,10000,30000,100000,300000,1000000),
labels = comma_format(accuracy = 1)
) +
scale_x_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = values$date_breaks_log)) +
labs(
title = "Pace of COVID-19 Cases by Metro Area",
subtitle = paste0("Current as of ", as.character(values$date)),
caption = values$caption_text,
x = "Days Since Fifth Case Reported",
y = "Count of Reported Cases (Log)"
) +
sequoia_theme(base_size = 22, background = "white")
## save plots
# save_plots(filename = "results/high_res/metro/c_case_log.png", plot = p, preset = "lg")
# save_plots(filename = "results/low_res/metro/c_case_log.png", plot = p, preset = "lg", dpi = 72)
#===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===#
# per-capita 7-day average ####
## subset data
metro_subset <- filter(metro_data, report_date >= values$plot_date) %>%
filter(report_date < as.Date("2021-01-11") | report_date >= as.Date("2021-01-18")) %>%
filter(report_date < as.Date("2021-03-08") | report_date >= as.Date("2021-03-15")) %>%
filter(report_date < as.Date("2021-04-17") | report_date >= as.Date("2021-04-24")) %>%
filter(report_date < as.Date("2021-11-17") | report_date >= as.Date("2021-12-06")) %>%
filter(report_date < as.Date("2021-12-24") | report_date >= as.Date("2021-12-27"))
## address negative values
metro_subset <- mutate(metro_subset, case_avg_rate = ifelse(case_avg_rate < 0, 0, case_avg_rate))
## modify Cape Girardeau
# metro_subset %>%
# mutate(case_avg_rate = ifelse(short_name == "Cape Girardeau" &
# (report_date == "2020-11-20" | report_date == "2020-11-22"), 160, case_avg_rate),
# case_avg_rate = ifelse(short_name == "Cape Girardeau" & report_date == "2020-11-21", NA, case_avg_rate)
# ) -> metro_subset
## define top_val
top_val <- round_any(x = max(metro_subset$case_avg_rate, na.rm = TRUE), accuracy = 50, f = ceiling)
## create factors
metro_subset <- mutate(metro_subset, factor_var = fct_reorder2(short_name, report_date, case_avg_rate))
## create plot
p <- facet_rate(metro_subset,
type = "metro",
pal = cols,
x_breaks = values$date_breaks_facet,
y_breaks = 50,
y_upper_limit = top_val,
highlight = unique(metro_subset$geoid),
plot_date = values$plot_date,
date = values$date,
title = "Pace of New COVID-19 Cases by Metro Area",
caption = values$caption_text_census)
# paste0(values$caption_text_census,"\nValues above 160 for Cape Girardeau truncated to increase readability")
# values$caption_text_census
## save plot
save_plots(filename = "results/high_res/metro/e_new_case.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/metro/e_new_case.png", plot = p, preset = "lg", dpi = 72)
#===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===#
# per-capita 7-day average ####
## subset data
metro_subset <- filter(metro_data, report_date >= values$date-20)
## address negative values
metro_subset <- mutate(metro_subset, case_avg_rate = ifelse(case_avg_rate < 0, 0, case_avg_rate))
## define top_val
top_val <- round_any(x = max(metro_subset$case_avg_rate, na.rm = TRUE), accuracy = 10, f = ceiling)
## create factors
metro_subset <- mutate(metro_subset, factor_var = fct_reorder2(short_name, report_date, case_avg_rate))
## create plot
p <- facet_rate(metro_subset,
type = "metro",
pal = cols,
x_breaks = values$date_breaks_facet,
y_breaks = 10,
y_upper_limit = top_val,
highlight = unique(metro_subset$geoid),
plot_date = values$plot_date,
date = values$date,
title = "Pace of New COVID-19 Cases by Metro Area",
caption = values$caption_text_census,
last3 = TRUE)
# values$caption_text_census
## save plot
# save_plots(filename = "results/high_res/metro/e_new_case_last21.png", plot = p, preset = "lg")
# save_plots(filename = "results/low_res/metro/e_new_case_last21.png", plot = p, preset = "lg", dpi = 72)
#===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===#
# create days from first day where average confirmed infections were at least 5
## subset data
metro_data %>%
calculate_days(group_var = "geoid", stat_var = "case_avg", val = 5) %>%
select(day, report_date, short_name, case_avg) %>%
arrange(short_name, day) %>%
mutate(case_avg = ifelse(case_avg < .1, .1, case_avg)) -> metro_subset
# define top_val
top_val <- round_any(x = max(metro_subset$day), accuracy = 5, f = ceiling)
## identify max day
metro_subset %>%
group_by(short_name) %>%
summarise(day = max(day), .groups = "drop_last") %>%
left_join(metro_points, ., by = "short_name") %>%
filter(short_name %in% metro_subset$short_name) %>%
mutate(case_avg = ifelse(case_avg < .1, .1, case_avg)) -> metro_day_points
## create factors
metro_subset <- mutate(metro_subset, factor_var = fct_reorder2(short_name, day, case_avg))
metro_day_points <- mutate(metro_day_points, factor_var = fct_reorder2(short_name, day, case_avg))
## create plot
p <- ggplot(data = metro_subset) +
geom_line(mapping = aes(x = day, y = case_avg, color = factor_var), size = 2) +
geom_point(metro_day_points, mapping = aes(x = day, y = case_avg, color = factor_var),
size = 4, show.legend = FALSE) +
scale_colour_manual(values = cols, name = "Metro Area") +
scale_y_log10(limits = c(.1, 3000), breaks = c(.1, .3, 1, 3, 10, 30, 100, 300, 1000, 3000),
labels = comma_format(accuracy = .2)) +
scale_x_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = values$date_breaks_log)) +
labs(
title = "Pace of New COVID-19 Cases by Metro Area",
subtitle = paste0("Current as of ", as.character(values$date)),
caption = values$caption_text,
x = "Days Since Average of Five Cases Reached",
y = "7-day Average of Reported Cases (Log)"
) +
sequoia_theme(base_size = 22, background = "white")
## save plots
# save_plots(filename = "results/high_res/metro/f_new_case_log.png", preset = "lg")
# save_plots(filename = "results/low_res/metro/f_new_case_log.png", preset = "lg", dpi = 72)
#===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===# #===#
# plot mortality rate
## subset data
metro_subset <- filter(metro_data, report_date >= values$plot_date)
## define top_val
top_val <- round_any(x = max(metro_subset$mortality_rate), accuracy = .25, f = ceiling)
## create factors
metro_subset <- mutate(metro_subset, factor_var = fct_reorder2(short_name, report_date, mortality_rate))
metro_points <- mutate(metro_points, factor_var = fct_reorder2(short_name, report_date, mortality_rate))
## create plot
p <- ggplot() +
geom_line(metro_subset, mapping = aes(x = report_date, y = mortality_rate, color = factor_var), size = 2) +
geom_point(metro_points, mapping = aes(x = report_date, y = mortality_rate, color = factor_var),
size = 4, show.legend = FALSE) +
scale_colour_manual(values = cols, name = "Metro Area") +
scale_x_date(date_breaks = values$date_breaks, date_labels = "%b") +
scale_y_continuous(limits = c(0,top_val), breaks = seq(0, top_val, by = .25)) +
labs(
title = "Reported COVID-19 Mortality by Metro Area",
subtitle = paste0(as.character(values$plot_date), " through ", as.character(values$date)),
x = "Date",
y = "Mortality Rate per 1,000",
caption = values$caption_text_census
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = values$x_angle))
## save plot
save_plots(filename = "results/high_res/metro/h_mortality_rate.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/metro/h_mortality_rate.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# plot case fatality rate
## re-subset data
metro_subset <- filter(metro_data, report_date >= values$plot_date)
## create factors
metro_subset <- mutate(metro_subset, factor_var = fct_reorder2(short_name, report_date, case_fatality_rate))
metro_points <- mutate(metro_points, factor_var = fct_reorder2(short_name, report_date, case_fatality_rate))
## create plot
p <- ggplot() +
geom_line(metro_subset, mapping = aes(x = report_date, y = case_fatality_rate, color = factor_var), size = 2) +
geom_point(metro_points, mapping = aes(x = report_date, y = case_fatality_rate, color = factor_var),
size = 4, show.legend = FALSE) +
geom_vline(xintercept = as.Date("2021-03-08"), lwd = .8) +
scale_colour_manual(values = cols, name = "Metro Area") +
scale_x_date(date_breaks = values$date_breaks, date_labels = "%b") +
scale_y_continuous(limits = c(0,12), breaks = seq(0, 12, by = 1)) +
labs(
title = "COVID-19 Case Fatality by Metro Area",
subtitle = paste0(as.character(values$plot_date), " through ", as.character(values$date)),
x = "Date",
y = "Case Fatality (%)",
caption = paste0(values$caption_text,"\nVertical line represents addition of antigen test data for most Missouri counties on 2021-03-08")
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = values$x_angle))
## save plot
save_plots(filename = "results/high_res/metro/m_case_fatality_rate.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/metro/m_case_fatality_rate.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# clean-up
rm(metro_data, metro_subset, metro_points, metro_day_points)
rm(top_val, p)
|
server <- function(input, output, clientData, session) {
############ homepage tab ###########
output$hp <- renderUI({
tags$iframe(
srcdoc = paste(readLines('try.html'), collapse = '\n'),
width = "100%",
height = "1000px"
)
})
############## table tab ##############
output$ziptable <- DT::renderDataTable({
weather_state <- weather %>%
filter(state %in% input$states)
date_begin <- month(strptime(as.character(input$data1),format = "%Y-%m-%d")) * 100 + day(strptime(as.character(input$data1),format = "%Y-%m-%d"))
date_end <- month(strptime(as.character(input$data2),format = "%Y-%m-%d")) * 100 + day(strptime(as.character(input$data2),format = "%Y-%m-%d"))
if (date_begin <= date_end){
weather_date <- weather_state[which(weather_state$month_day>=date_begin & weather_state$month_day<=date_end),]
}else{
weather_date <- weather_state[which((weather_state$month_day>=date_begin & weather_state$month_day<=1231)|(weather_state$month_day>=101 & weather_state$month_day<=date_end)),]
}
this.user <- user_data %>%
filter(userID==as.character(input$text))
weather_date <- merge(weather_date, this.user[,c("weather_type","rate")], by = "weather_type")
my_count <- aggregate(weather_date$rate, list(weather_date$station), mean)
names(my_count) <- c("station", "score")
my_count <- my_count[order(my_count$score, decreasing = TRUE),]
rownames(my_count) <- NULL
action <- DT::dataTableAjax(session, my_count)
DT::datatable(my_count, options = list(ajax = list(url = action)),escape = FALSE)
},rownames=FALSE)
############# graph plots1 ##########
output$user_graph <- renderPlot({
this.user <- user_data[which(user_data$userID==as.character(input$text)),]
this.user$is_self <- as.factor(this.user$is_self)
ggplot(data=this.user, aes(x=as.factor(weather_type), y=as.numeric(rate),fill=is_self)) +
ggtitle(sprintf("%s's Weather Taste", this.user$userID)) +
#geom_bar(stat="identity", fill=c("#999999", "#56B4E9")[this.user$is_self+1]) +
geom_bar(stat="identity") +
scale_fill_manual(values = c("#999999", "#56B4E9"),name="Data Scource",breaks =c(0,1),labels = c("Predicted","User Rated"))+
geom_text(aes(label=round(rate,1)), vjust=1.6, color="white", size=3.5) +
#scale_fill_manual(values = c("#999999", "#56B4E9")[this.user$is_self+1])+
ylim(0,5) + theme(plot.title = element_text(size = rel(2.5),hjust = 0.5))
})
############# graph plots2 ##########
output$user_station<- renderPlot({
weather_state <- weather %>%
filter(state %in% input$states)
date_begin <- month(strptime(as.character(input$data1),format = "%Y-%m-%d")) * 100 + day(strptime(as.character(input$data1),format = "%Y-%m-%d"))
date_end <- month(strptime(as.character(input$data2),format = "%Y-%m-%d")) * 100 + day(strptime(as.character(input$data2),format = "%Y-%m-%d"))
if (date_begin <= date_end){
weather_date <- weather_state[which(weather_state$month_day>=date_begin & weather_state$month_day<=date_end),]
}else{
weather_date <- weather_state[which((weather_state$month_day>=date_begin & weather_state$month_day<=1231)|(weather_state$month_day>=101 & weather_state$month_day<=date_end)),]
}
this.user <- user_data %>%
filter(userID==as.character(input$text))
weather_date <- merge(weather_date, this.user[,c("weather_type","rate")], by = "weather_type")
my_count <- aggregate(weather_date$rate, list(weather_date$station), mean)
names(my_count) <- c("station", "score")
my_count <- my_count[order(my_count$score, decreasing = TRUE),]
chosen_station <- as.character(my_count$station[as.integer(input$rank)])
this.city <- weather_date[which(weather_date$station==chosen_station),]
this.city.count <- ddply(this.city, .(weather_type),nrow)
create_table <- data.frame(weather_type = c(1:10))
create_table <- merge(create_table, this.city.count, by = "weather_type", all.x = TRUE)
names(create_table)[2] <- "Occurence"
create_table$Occurence <- create_table$Occurence / sum(create_table$Occurence, na.rm = TRUE)
barplot(create_table$Occurence,ylim = c(0,1))
#ggplot(create_table)
ggplot(data=create_table, aes(x=as.factor(weather_type), y=as.numeric(Occurence))) +
ggtitle(sprintf("Weather type occurence in %s", chosen_station)) +
geom_bar(stat="identity", fill=c("#56B4E9")) +
geom_text(aes(label=round(Occurence,3)), vjust=1.6, color="white", size=3.5) +
ylim(0,1) + theme(plot.title = element_text(size = rel(2.5),hjust = 0.5))
})
############## map tab ##############
output$analysis1 <- renderPlot({
this.user <- user_data[which(user_data$userID==as.character(input$text1)),]
this.user$is_self <- as.factor(this.user$is_self)
ggplot(data=this.user, aes(x=as.factor(weather_type), y=as.numeric(rate),fill=is_self)) +
ggtitle(sprintf("%s's Weather Taste", this.user$userID)) +
#geom_bar(stat="identity", fill=c("#999999", "#56B4E9")[this.user$is_self+1]) +
geom_bar(stat = "identity") +
scale_fill_manual(values = c("#999999", "#56B4E9"),name="Data Scource",breaks =c(0,1),labels = c("Predicted","User Rated"))+
geom_text(aes(label=round(rate,1)), vjust=1.6, color="white", size=3.5) +
ylim(0,5) + theme(plot.title = element_text(size = rel(2.5),hjust = 0.5))
})
output$table <-DT::renderDataTable({
this.record <- pseudo_record[which(pseudo_record$userID==as.character(input$text1)),]
this.record <- arrange(this.record, Year, Month, Day)
action <- DT::dataTableAjax(session, this.record)
DT::datatable(this.record, options = list(ajax = list(url = action)),escape = FALSE)
})
output$table1 <- DT::renderDataTable({
weather_state <- weather %>%
filter(state %in% input$states)
date_begin <- month(strptime(as.character(input$data1),format = "%Y-%m-%d")) * 100 + day(strptime(as.character(input$data1),format = "%Y-%m-%d"))
date_end <- month(strptime(as.character(input$data2),format = "%Y-%m-%d")) * 100 + day(strptime(as.character(input$data2),format = "%Y-%m-%d"))
if (date_begin <= date_end){
weather_date <- weather_state[which(weather_state$month_day>=date_begin & weather_state$month_day<=date_end),]
}else{
weather_date <- weather_state[which((weather_state$month_day>=date_begin & weather_state$month_day<=1231)|(weather_state$month_day>=101 & weather_state$month_day<=date_end)),]
}
this.user <- user_data %>%
filter(userID==as.character(input$text))
weather_date <- merge(weather_date, this.user[,c("weather_type","rate")], by = "weather_type")
my_count <- aggregate(weather_date$rate, list(weather_date$station), mean)
names(my_count) <- c("station", "score")
my_count <- my_count[order(my_count$score, decreasing = TRUE),]
chosen_place <- station_list[which(station_list$airportCode %in% my_count$station),]
action <- DT::dataTableAjax(session, chosen_place)
DT::datatable(chosen_place, options = list(ajax = list(url = action)),escape = FALSE)
})
output$map <- renderLeaflet({
weather_state <- weather %>%
filter(state %in% input$states)
date_begin <- month(strptime(as.character(input$data1),format = "%Y-%m-%d")) * 100 + day(strptime(as.character(input$data1),format = "%Y-%m-%d"))
date_end <- month(strptime(as.character(input$data2),format = "%Y-%m-%d")) * 100 + day(strptime(as.character(input$data2),format = "%Y-%m-%d"))
if (date_begin <= date_end){
weather_date <- weather_state[which(weather_state$month_day>=date_begin & weather_state$month_day<=date_end),]
}else{
weather_date <- weather_state[which((weather_state$month_day>=date_begin & weather_state$month_day<=1231)|(weather_state$month_day>=101 & weather_state$month_day<=date_end)),]
}
this.user <- user_data %>%
filter(userID==as.character(input$text))
weather_date <- merge(weather_date, this.user[,c("weather_type","rate")], by = "weather_type")
my_count <- aggregate(weather_date$rate, list(weather_date$station), mean)
names(my_count) <- c("station", "score")
my_count <- my_count[order(my_count$score, decreasing = TRUE),]
chosen_number <- my_count[1:input$num,]
chosen_place <- station_url[which(station_url$airportCode %in% chosen_number$station),]
#icon.fa <- makeAwesomeIcon(icon = 'flag', markerColor = 'red', prefix='fa', iconColor = 'black')
leaflet(data = chosen_place) %>%
addTiles(
urlTemplate = "//{s}.tiles.mapbox.com/v3/jcheng.map-5ebohr46/{z}/{x}/{y}.png",
attribution = 'Maps by <a href="http://www.mapbox.com/">Mapbox</a>'
) %>%
addMarkers(~Lon, ~Lat, popup=paste("Station:",chosen_place$Station, ",State:",chosen_place$State,
",Elevation:",chosen_place$Elevation,",Url:",chosen_place[,8]))
})
}
| /City_Recommendation_Tool/server.R | no_license | jianitian/Weather_Taste_city_recommendation_tool | R | false | false | 9,014 | r | server <- function(input, output, clientData, session) {
############ homepage tab ###########
output$hp <- renderUI({
tags$iframe(
srcdoc = paste(readLines('try.html'), collapse = '\n'),
width = "100%",
height = "1000px"
)
})
############## table tab ##############
output$ziptable <- DT::renderDataTable({
weather_state <- weather %>%
filter(state %in% input$states)
date_begin <- month(strptime(as.character(input$data1),format = "%Y-%m-%d")) * 100 + day(strptime(as.character(input$data1),format = "%Y-%m-%d"))
date_end <- month(strptime(as.character(input$data2),format = "%Y-%m-%d")) * 100 + day(strptime(as.character(input$data2),format = "%Y-%m-%d"))
if (date_begin <= date_end){
weather_date <- weather_state[which(weather_state$month_day>=date_begin & weather_state$month_day<=date_end),]
}else{
weather_date <- weather_state[which((weather_state$month_day>=date_begin & weather_state$month_day<=1231)|(weather_state$month_day>=101 & weather_state$month_day<=date_end)),]
}
this.user <- user_data %>%
filter(userID==as.character(input$text))
weather_date <- merge(weather_date, this.user[,c("weather_type","rate")], by = "weather_type")
my_count <- aggregate(weather_date$rate, list(weather_date$station), mean)
names(my_count) <- c("station", "score")
my_count <- my_count[order(my_count$score, decreasing = TRUE),]
rownames(my_count) <- NULL
action <- DT::dataTableAjax(session, my_count)
DT::datatable(my_count, options = list(ajax = list(url = action)),escape = FALSE)
},rownames=FALSE)
############# graph plots1 ##########
output$user_graph <- renderPlot({
this.user <- user_data[which(user_data$userID==as.character(input$text)),]
this.user$is_self <- as.factor(this.user$is_self)
ggplot(data=this.user, aes(x=as.factor(weather_type), y=as.numeric(rate),fill=is_self)) +
ggtitle(sprintf("%s's Weather Taste", this.user$userID)) +
#geom_bar(stat="identity", fill=c("#999999", "#56B4E9")[this.user$is_self+1]) +
geom_bar(stat="identity") +
scale_fill_manual(values = c("#999999", "#56B4E9"),name="Data Scource",breaks =c(0,1),labels = c("Predicted","User Rated"))+
geom_text(aes(label=round(rate,1)), vjust=1.6, color="white", size=3.5) +
#scale_fill_manual(values = c("#999999", "#56B4E9")[this.user$is_self+1])+
ylim(0,5) + theme(plot.title = element_text(size = rel(2.5),hjust = 0.5))
})
############# graph plots2 ##########
output$user_station<- renderPlot({
weather_state <- weather %>%
filter(state %in% input$states)
date_begin <- month(strptime(as.character(input$data1),format = "%Y-%m-%d")) * 100 + day(strptime(as.character(input$data1),format = "%Y-%m-%d"))
date_end <- month(strptime(as.character(input$data2),format = "%Y-%m-%d")) * 100 + day(strptime(as.character(input$data2),format = "%Y-%m-%d"))
if (date_begin <= date_end){
weather_date <- weather_state[which(weather_state$month_day>=date_begin & weather_state$month_day<=date_end),]
}else{
weather_date <- weather_state[which((weather_state$month_day>=date_begin & weather_state$month_day<=1231)|(weather_state$month_day>=101 & weather_state$month_day<=date_end)),]
}
this.user <- user_data %>%
filter(userID==as.character(input$text))
weather_date <- merge(weather_date, this.user[,c("weather_type","rate")], by = "weather_type")
my_count <- aggregate(weather_date$rate, list(weather_date$station), mean)
names(my_count) <- c("station", "score")
my_count <- my_count[order(my_count$score, decreasing = TRUE),]
chosen_station <- as.character(my_count$station[as.integer(input$rank)])
this.city <- weather_date[which(weather_date$station==chosen_station),]
this.city.count <- ddply(this.city, .(weather_type),nrow)
create_table <- data.frame(weather_type = c(1:10))
create_table <- merge(create_table, this.city.count, by = "weather_type", all.x = TRUE)
names(create_table)[2] <- "Occurence"
create_table$Occurence <- create_table$Occurence / sum(create_table$Occurence, na.rm = TRUE)
barplot(create_table$Occurence,ylim = c(0,1))
#ggplot(create_table)
ggplot(data=create_table, aes(x=as.factor(weather_type), y=as.numeric(Occurence))) +
ggtitle(sprintf("Weather type occurence in %s", chosen_station)) +
geom_bar(stat="identity", fill=c("#56B4E9")) +
geom_text(aes(label=round(Occurence,3)), vjust=1.6, color="white", size=3.5) +
ylim(0,1) + theme(plot.title = element_text(size = rel(2.5),hjust = 0.5))
})
############## map tab ##############
output$analysis1 <- renderPlot({
this.user <- user_data[which(user_data$userID==as.character(input$text1)),]
this.user$is_self <- as.factor(this.user$is_self)
ggplot(data=this.user, aes(x=as.factor(weather_type), y=as.numeric(rate),fill=is_self)) +
ggtitle(sprintf("%s's Weather Taste", this.user$userID)) +
#geom_bar(stat="identity", fill=c("#999999", "#56B4E9")[this.user$is_self+1]) +
geom_bar(stat = "identity") +
scale_fill_manual(values = c("#999999", "#56B4E9"),name="Data Scource",breaks =c(0,1),labels = c("Predicted","User Rated"))+
geom_text(aes(label=round(rate,1)), vjust=1.6, color="white", size=3.5) +
ylim(0,5) + theme(plot.title = element_text(size = rel(2.5),hjust = 0.5))
})
output$table <-DT::renderDataTable({
this.record <- pseudo_record[which(pseudo_record$userID==as.character(input$text1)),]
this.record <- arrange(this.record, Year, Month, Day)
action <- DT::dataTableAjax(session, this.record)
DT::datatable(this.record, options = list(ajax = list(url = action)),escape = FALSE)
})
output$table1 <- DT::renderDataTable({
weather_state <- weather %>%
filter(state %in% input$states)
date_begin <- month(strptime(as.character(input$data1),format = "%Y-%m-%d")) * 100 + day(strptime(as.character(input$data1),format = "%Y-%m-%d"))
date_end <- month(strptime(as.character(input$data2),format = "%Y-%m-%d")) * 100 + day(strptime(as.character(input$data2),format = "%Y-%m-%d"))
if (date_begin <= date_end){
weather_date <- weather_state[which(weather_state$month_day>=date_begin & weather_state$month_day<=date_end),]
}else{
weather_date <- weather_state[which((weather_state$month_day>=date_begin & weather_state$month_day<=1231)|(weather_state$month_day>=101 & weather_state$month_day<=date_end)),]
}
this.user <- user_data %>%
filter(userID==as.character(input$text))
weather_date <- merge(weather_date, this.user[,c("weather_type","rate")], by = "weather_type")
my_count <- aggregate(weather_date$rate, list(weather_date$station), mean)
names(my_count) <- c("station", "score")
my_count <- my_count[order(my_count$score, decreasing = TRUE),]
chosen_place <- station_list[which(station_list$airportCode %in% my_count$station),]
action <- DT::dataTableAjax(session, chosen_place)
DT::datatable(chosen_place, options = list(ajax = list(url = action)),escape = FALSE)
})
output$map <- renderLeaflet({
weather_state <- weather %>%
filter(state %in% input$states)
date_begin <- month(strptime(as.character(input$data1),format = "%Y-%m-%d")) * 100 + day(strptime(as.character(input$data1),format = "%Y-%m-%d"))
date_end <- month(strptime(as.character(input$data2),format = "%Y-%m-%d")) * 100 + day(strptime(as.character(input$data2),format = "%Y-%m-%d"))
if (date_begin <= date_end){
weather_date <- weather_state[which(weather_state$month_day>=date_begin & weather_state$month_day<=date_end),]
}else{
weather_date <- weather_state[which((weather_state$month_day>=date_begin & weather_state$month_day<=1231)|(weather_state$month_day>=101 & weather_state$month_day<=date_end)),]
}
this.user <- user_data %>%
filter(userID==as.character(input$text))
weather_date <- merge(weather_date, this.user[,c("weather_type","rate")], by = "weather_type")
my_count <- aggregate(weather_date$rate, list(weather_date$station), mean)
names(my_count) <- c("station", "score")
my_count <- my_count[order(my_count$score, decreasing = TRUE),]
chosen_number <- my_count[1:input$num,]
chosen_place <- station_url[which(station_url$airportCode %in% chosen_number$station),]
#icon.fa <- makeAwesomeIcon(icon = 'flag', markerColor = 'red', prefix='fa', iconColor = 'black')
leaflet(data = chosen_place) %>%
addTiles(
urlTemplate = "//{s}.tiles.mapbox.com/v3/jcheng.map-5ebohr46/{z}/{x}/{y}.png",
attribution = 'Maps by <a href="http://www.mapbox.com/">Mapbox</a>'
) %>%
addMarkers(~Lon, ~Lat, popup=paste("Station:",chosen_place$Station, ",State:",chosen_place$State,
",Elevation:",chosen_place$Elevation,",Url:",chosen_place[,8]))
})
}
|
#Compare emissions from motor vehicle sources in Baltimore City with emissions
#from motor vehicle sources in Los Angeles County, California
#(\color{red}{\verb|fips == "06037"|}fips == "06037").
#Which city has seen greater changes over time in motor vehicle emissions?
library("data.table")
SCC <- data.table::as.data.table(x = readRDS(file = "Source_Classification_Code.rds"))
NEI <- data.table::as.data.table(x = readRDS(file = "summarySCC_PM25.rds"))
# Gather the subset of the NEI data which corresponds to vehicles
condition <- grepl("vehicle", SCC[, SCC.Level.Two], ignore.case=TRUE)
vehiclesSCC <- SCC[condition, SCC]
vehiclesNEI <- NEI[NEI[, SCC] %in% vehiclesSCC,]
# Subset the vehicles NEI data by each city's fip and add city name.
vehiclesBaltimoreNEI <- vehiclesNEI[fips == "24510",]
vehiclesBaltimoreNEI[, city := c("Baltimore City")]
vehiclesLANEI <- vehiclesNEI[fips == "06037",]
vehiclesLANEI[, city := c("Los Angeles")]
# Combine data.tables into one data.table
bothNEI <- rbind(vehiclesBaltimoreNEI,vehiclesLANEI)
png("plot6.png")
ggplot(bothNEI, aes(x=factor(year), y=Emissions, fill=city)) +
geom_bar(aes(fill=year),stat="identity") +
facet_grid(scales="free", space="free", .~city) +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (Kilo-Tons)")) +
labs(title=expression("PM"[2.5]*" Motor Vehicle Source Emissions in Baltimore & LA, 1999-2008"))
dev.off()
| /plot6.R | no_license | yurica24/Coursera4_exploratoryData | R | false | false | 1,407 | r | #Compare emissions from motor vehicle sources in Baltimore City with emissions
#from motor vehicle sources in Los Angeles County, California
#(\color{red}{\verb|fips == "06037"|}fips == "06037").
#Which city has seen greater changes over time in motor vehicle emissions?
library("data.table")
SCC <- data.table::as.data.table(x = readRDS(file = "Source_Classification_Code.rds"))
NEI <- data.table::as.data.table(x = readRDS(file = "summarySCC_PM25.rds"))
# Gather the subset of the NEI data which corresponds to vehicles
condition <- grepl("vehicle", SCC[, SCC.Level.Two], ignore.case=TRUE)
vehiclesSCC <- SCC[condition, SCC]
vehiclesNEI <- NEI[NEI[, SCC] %in% vehiclesSCC,]
# Subset the vehicles NEI data by each city's fip and add city name.
vehiclesBaltimoreNEI <- vehiclesNEI[fips == "24510",]
vehiclesBaltimoreNEI[, city := c("Baltimore City")]
vehiclesLANEI <- vehiclesNEI[fips == "06037",]
vehiclesLANEI[, city := c("Los Angeles")]
# Combine data.tables into one data.table
bothNEI <- rbind(vehiclesBaltimoreNEI,vehiclesLANEI)
png("plot6.png")
ggplot(bothNEI, aes(x=factor(year), y=Emissions, fill=city)) +
geom_bar(aes(fill=year),stat="identity") +
facet_grid(scales="free", space="free", .~city) +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (Kilo-Tons)")) +
labs(title=expression("PM"[2.5]*" Motor Vehicle Source Emissions in Baltimore & LA, 1999-2008"))
dev.off()
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(i) inv <<- i
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
inv <- solve(x$get())
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | lgandras/ProgrammingAssignment2 | R | false | false | 706 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(i) inv <<- i
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
inv <- solve(x$get())
x$setinverse(inv)
inv
}
|
#################################################
# Chapter 1
#################################################
library(bayesm)
library(dummies)
library(pROC)
load("stc-cbc-respondents-v3(1).RData")
str(resp.data.v3)
taskV3 <- read.csv("stc-dc-task-cbc -v3(1).csv", sep="\t")
str(taskV3)
load("efCode.RData")
str(efcode.att.f)
str(efcode.attmat.f)
apply(resp.data.v3[4:39], 2, function(x){tabulate(na.omit(x))})
task.mat <- as.matrix(taskV3[, c("screen", "RAM", "processor", "price", "brand")])
dim(task.mat)
head(task.mat)
X.mat=efcode.attmat.f(task.mat) # Here is where we do effects coding
dim(X.mat)
head(X.mat)
pricevec=taskV3$price-mean(taskV3$price)
head(pricevec)
str(pricevec)
X.brands=X.mat[,9:11]
dim(X.brands)
str(X.brands)
X.BrandByPrice = X.brands*pricevec
dim(X.BrandByPrice)
str(X.BrandByPrice)
X.matrix=cbind(X.mat,X.BrandByPrice)
dim(X.matrix)
str(X.matrix)
X2.matrix=X.matrix[,1:2]
dim(X2.matrix)
det(t(X.matrix) %*% X.matrix)
ydata=resp.data.v3[,4:39]
names(ydata)
str(ydata)
ydata=na.omit(ydata)
str(ydata)
ydata=as.matrix(ydata)
dim(ydata)
zowner <- 1*(!is.na(resp.data.v3$vList3))
lgtdata = NULL
for (i in 1:424) { lgtdata[[i]]=list( y=ydata[i,],X=X.matrix )}
length(lgtdata)
str(lgtdata)
#################################################
# Chapter 2
#################################################
mcmctest=list(R=5000, keep=5)
Data1=list(p=3,lgtdata=lgtdata)
testrun1=rhierMnlDP(Data=Data1,Mcmc=mcmctest)
names(testrun1)
betadraw1=testrun1$betadraw
dim(betadraw1)
plot(1:length(betadraw1[1,1,]),betadraw1[1,1,])
plot(density(betadraw1[1,1,701:1000],width=2))
summary(betadraw1[1,1,701:1000])
betameansoverall <- apply(betadraw1[,,701:1000],c(2),mean)
betameansoverall
perc <- apply(betadraw1[,,701:1000],2,quantile,probs=c(0.05,0.10,0.25,0.5 ,0.75,0.90,0.95))
perc
#################################################
# Chapter 3
#################################################
zownertest=matrix(scale(zowner,scale=FALSE),ncol=1)
Data2=list(p=3,lgtdata=lgtdata,Z=zownertest)
testrun2=rhierMnlDP(Data=Data2,Mcmc=mcmctest)
dim(testrun2$deltadraw)
apply(testrun2$Deltadraw[701:1000,],2,mean)
apply(testrun2$Deltadraw[701:1000,],2,quantile,probs=c(0.05,0.10,0.25,0.5 ,0.75,0.90,0.95))
betadraw2=testrun2$betadraw
dim(betadraw2)
#################################################
# Chapter 4
#################################################
betameans <- apply(betadraw1[,,701:1000],c(1,2),mean)
str(betameans)
dim(betameans)
xbeta=X.matrix%*%t(betameans)
dim(xbeta)
xbetamatrix=matrix(xbeta,ncol=3,byrow=TRUE)
dim(xbetamatrix)
expxbeta=exp(xbetamatrix)
rsumvec=rowSums(expxbeta)
pchoicemat=expxbeta/rsumvec
head(pchoicemat)
dim(pchoicemat)
custchoice <- max.col(pchoicemat)
str(custchoice)
head(custchoice)
ydatavec <- as.vector(t(ydata))
str(ydatavec)
table(custchoice,ydatavec)
roctest <- roc(ydatavec, custchoice, plot=TRUE)
auc(roctest)
logliketest <- testrun2$loglike
mean(logliketest)
m <- matrix(custchoice, nrow =36, byrow=F)
m2 <- t(m)
apply(m2, 2, function(x){tabulate(na.omit(x))})
##repeat this process for betadraw2##
betameans2 <- apply(betadraw2[,,701:1000],c(1,2),mean)
str(betameans2)
dim(betameans2)
xbeta2=X.matrix%*%t(betameans2)
dim(xbeta2)
xbetamatrix2=matrix(xbeta2,ncol=3,byrow=TRUE)
dim(xbetamatrix2)
expxbeta2=exp(xbetamatrix2)
rsumvec2=rowSums(expxbeta2)
pchoicemat2=expxbeta2/rsumvec2
head(pchoicemat2)
dim(pchoicemat2)
custchoice2 <- max.col(pchoicemat2)
str(custchoice2)
head(custchoice2)
ydatavec2 <- as.vector(t(ydata))
str(ydatavec2)
table(custchoice2,ydatavec2)
roctest2 <- roc(ydatavec2, custchoice2, plot=TRUE)
auc(roctest2)
logliketest2 <- testrun2$loglike
mean(logliketest2)
m_beta2 <- matrix(custchoice2, nrow =36, byrow=F)
m2_beta2 <- t(m_beta2)
apply(m2_beta2, 2, function(x){tabulate(na.omit(x))})
#################################################
# Chapter 5
#################################################
ex_scen <- read.csv("extra-scenarios(1).csv")
Xextra.matrix <- as.matrix(ex_scen[,c("V1","V2","V3","V4","V5","V6","V7","V8","V9", "V10","V11","V12","V13","V14")])
betavec=matrix(betameansoverall,ncol=1,byrow=TRUE)
xextrabeta=Xextra.matrix%*%(betavec)
xbetaextra2=matrix(xextrabeta,ncol=3,byrow=TRUE)
dim(xbetaextra2)
expxbetaextra2=exp(xbetaextra2)
rsumvec=rowSums(expxbetaextra2)
pchoicemat=expxbetaextra2/rsumvec
pchoicemat | /assign5_code.R | no_license | andrewburner/msds450_assign5 | R | false | false | 4,349 | r | #################################################
# Chapter 1
#################################################
library(bayesm)
library(dummies)
library(pROC)
load("stc-cbc-respondents-v3(1).RData")
str(resp.data.v3)
taskV3 <- read.csv("stc-dc-task-cbc -v3(1).csv", sep="\t")
str(taskV3)
load("efCode.RData")
str(efcode.att.f)
str(efcode.attmat.f)
apply(resp.data.v3[4:39], 2, function(x){tabulate(na.omit(x))})
task.mat <- as.matrix(taskV3[, c("screen", "RAM", "processor", "price", "brand")])
dim(task.mat)
head(task.mat)
X.mat=efcode.attmat.f(task.mat) # Here is where we do effects coding
dim(X.mat)
head(X.mat)
pricevec=taskV3$price-mean(taskV3$price)
head(pricevec)
str(pricevec)
X.brands=X.mat[,9:11]
dim(X.brands)
str(X.brands)
X.BrandByPrice = X.brands*pricevec
dim(X.BrandByPrice)
str(X.BrandByPrice)
X.matrix=cbind(X.mat,X.BrandByPrice)
dim(X.matrix)
str(X.matrix)
X2.matrix=X.matrix[,1:2]
dim(X2.matrix)
det(t(X.matrix) %*% X.matrix)
ydata=resp.data.v3[,4:39]
names(ydata)
str(ydata)
ydata=na.omit(ydata)
str(ydata)
ydata=as.matrix(ydata)
dim(ydata)
zowner <- 1*(!is.na(resp.data.v3$vList3))
lgtdata = NULL
for (i in 1:424) { lgtdata[[i]]=list( y=ydata[i,],X=X.matrix )}
length(lgtdata)
str(lgtdata)
#################################################
# Chapter 2
#################################################
mcmctest=list(R=5000, keep=5)
Data1=list(p=3,lgtdata=lgtdata)
testrun1=rhierMnlDP(Data=Data1,Mcmc=mcmctest)
names(testrun1)
betadraw1=testrun1$betadraw
dim(betadraw1)
plot(1:length(betadraw1[1,1,]),betadraw1[1,1,])
plot(density(betadraw1[1,1,701:1000],width=2))
summary(betadraw1[1,1,701:1000])
betameansoverall <- apply(betadraw1[,,701:1000],c(2),mean)
betameansoverall
perc <- apply(betadraw1[,,701:1000],2,quantile,probs=c(0.05,0.10,0.25,0.5 ,0.75,0.90,0.95))
perc
#################################################
# Chapter 3
#################################################
zownertest=matrix(scale(zowner,scale=FALSE),ncol=1)
Data2=list(p=3,lgtdata=lgtdata,Z=zownertest)
testrun2=rhierMnlDP(Data=Data2,Mcmc=mcmctest)
dim(testrun2$deltadraw)
apply(testrun2$Deltadraw[701:1000,],2,mean)
apply(testrun2$Deltadraw[701:1000,],2,quantile,probs=c(0.05,0.10,0.25,0.5 ,0.75,0.90,0.95))
betadraw2=testrun2$betadraw
dim(betadraw2)
#################################################
# Chapter 4
#################################################
betameans <- apply(betadraw1[,,701:1000],c(1,2),mean)
str(betameans)
dim(betameans)
xbeta=X.matrix%*%t(betameans)
dim(xbeta)
xbetamatrix=matrix(xbeta,ncol=3,byrow=TRUE)
dim(xbetamatrix)
expxbeta=exp(xbetamatrix)
rsumvec=rowSums(expxbeta)
pchoicemat=expxbeta/rsumvec
head(pchoicemat)
dim(pchoicemat)
custchoice <- max.col(pchoicemat)
str(custchoice)
head(custchoice)
ydatavec <- as.vector(t(ydata))
str(ydatavec)
table(custchoice,ydatavec)
roctest <- roc(ydatavec, custchoice, plot=TRUE)
auc(roctest)
logliketest <- testrun2$loglike
mean(logliketest)
m <- matrix(custchoice, nrow =36, byrow=F)
m2 <- t(m)
apply(m2, 2, function(x){tabulate(na.omit(x))})
##repeat this process for betadraw2##
betameans2 <- apply(betadraw2[,,701:1000],c(1,2),mean)
str(betameans2)
dim(betameans2)
xbeta2=X.matrix%*%t(betameans2)
dim(xbeta2)
xbetamatrix2=matrix(xbeta2,ncol=3,byrow=TRUE)
dim(xbetamatrix2)
expxbeta2=exp(xbetamatrix2)
rsumvec2=rowSums(expxbeta2)
pchoicemat2=expxbeta2/rsumvec2
head(pchoicemat2)
dim(pchoicemat2)
custchoice2 <- max.col(pchoicemat2)
str(custchoice2)
head(custchoice2)
ydatavec2 <- as.vector(t(ydata))
str(ydatavec2)
table(custchoice2,ydatavec2)
roctest2 <- roc(ydatavec2, custchoice2, plot=TRUE)
auc(roctest2)
logliketest2 <- testrun2$loglike
mean(logliketest2)
m_beta2 <- matrix(custchoice2, nrow =36, byrow=F)
m2_beta2 <- t(m_beta2)
apply(m2_beta2, 2, function(x){tabulate(na.omit(x))})
#################################################
# Chapter 5
#################################################
ex_scen <- read.csv("extra-scenarios(1).csv")
Xextra.matrix <- as.matrix(ex_scen[,c("V1","V2","V3","V4","V5","V6","V7","V8","V9", "V10","V11","V12","V13","V14")])
betavec=matrix(betameansoverall,ncol=1,byrow=TRUE)
xextrabeta=Xextra.matrix%*%(betavec)
xbetaextra2=matrix(xextrabeta,ncol=3,byrow=TRUE)
dim(xbetaextra2)
expxbetaextra2=exp(xbetaextra2)
rsumvec=rowSums(expxbetaextra2)
pchoicemat=expxbetaextra2/rsumvec
pchoicemat |
#' Add GitHub links to select visualizations
#'
#' This function alters the internal representation of a plot to include links back to the actual
#' GitHub issue. This is currently implemented for \code{viz_taskboard()} and \code{viz_gantt()}
#'
#' Credit goes to this Stack Overflow answer for figuring out how to do this:
#' https://stackoverflow.com/questions/42259826/hyperlinking-text-in-a-ggplot2-visualization/42262407
#'
#' @param g ggplot2 object returned by \code{viz_gantt()} or \code{viz_taskboard()}
#' @param filepath Location to save resulting SVG file of ggplot2, if desired. Leave blank for
#' function to output message precisely as needed to render in HTML RMarkdown with chunk
#' option \code{results = 'asis'}
#'
#' @return SVG version of ggplot2 object with links to relevant GitHub issues. Either writes output
#' to file or to console (to be captured in RMarkdown) depending on existence of \code{filepath} argument
#' @export
#'
#' @examples
#' \dontrun{
#' # In R, to save to file:
#' taskboard <- viz_taskboard(issues)
#' viz_linked(taskboard, "my_folder/my_file.svg")
#'
#' # In RMarkdown chunk, to print as output:
#' ```{r results = 'asis', echo = FALSE}
#' gantt <- viz_gantt(issues)
#' viz_linked(gantt)
#' ````
#' }
viz_linked <- function(g, filepath){
if (!requireNamespace("xml2", quietly = TRUE)) {
message(
paste0("Package \"xml2\" is needed to edit SVG.",
"Please install \"xml2\" or use the non-linked version."),
call. = FALSE)
}
if (!requireNamespace("ggplot2", quietly = TRUE)) {
message(
paste0("Package \"ggplot2\" is needed to save the image.",
"Please install \"ggplot2\" or use the non-linked version."),
call. = FALSE)
}
if (!requireNamespace("purrr", quietly = TRUE)) {
message(
paste0("Package \"purrr\" is needed for image conversion.",
"Please install \"purrr\" or use the non-linked version."),
call. = FALSE)
}
# create text-link mapping
links <- get_text_link_map(g)
# save current ggplot at svg
tf <- tempfile(fileext = ".svg")
suppressMessages( ggplot2::ggsave(tf , g ) )
# add links to svg
xml <- xml2::read_xml(tf)
xml %>%
xml2::xml_find_all(xpath="//d1:text") %>%
purrr::keep(xml2::xml_text(.) %in% names(links)) %>%
xml2::xml_add_parent("a", "xlink:href" = links[xml2::xml_text(.)], target = "_blank")
if(missing(filepath)){
xml2::write_xml(xml, tf)
cat( readLines(tf), sep = "\n" )
}
else{
xml2::write_xml(xml, filepath )
}
# clean up environment
unlink(tf)
}
# internal functions/methods for deriving links ----
#' @keywords internal
get_text_link_map <- function(g){
# ensure graph data has preserved links
if(!("url" %in% names(g$data))){
stop( paste(
"url column was not included in dataset passed to viz funcion.",
"Please remake the plot with this field included before passing to viz_linked.",
sep = "\n"
))
}
# throw more readable error message if type unsupported
supported_plots <- c("gantt", "taskboard")
if(intersect(class(g), supported_plots) == 0) {
stop( paste(
"Object provided does not have an implementation for adding links.",
"Supported plots types are:",
paste(supported_plots, collapse = ", "),
sep = "\n"
))
}
# dispatch to S3 method
UseMethod('get_text_link_map', g)
}
#' @keywords internal
get_text_link_map.gantt <- function(g){
link_text_fmt <- g$data$title
link_text <- lapply(link_text_fmt, FUN = function(x) strwrap(x, width = g[['str_wrap_width']] ))
link_length <- vapply(link_text, FUN = length, FUN.VALUE = integer(1))
url_repeat <- rep(g$data$url, link_length)
links <- stats::setNames(url_repeat, link_text)
return(links)
}
#' @keywords internal
get_text_link_map.taskboard <- function(g){
link_text_fmt <- paste0("#", g$data$number, ": ", g$data$title)
link_text <- lapply(link_text_fmt, FUN = function(x) strwrap(x, width = g[['str_wrap_width']] ))
link_length <- vapply(link_text, FUN = length, FUN.VALUE = integer(1))
url_repeat <- rep(g$data$url, link_length)
links <- stats::setNames(url_repeat, link_text)
return(links)
}
| /R/viz-linked.R | permissive | jwinget/projmgr | R | false | false | 4,220 | r | #' Add GitHub links to select visualizations
#'
#' This function alters the internal representation of a plot to include links back to the actual
#' GitHub issue. This is currently implemented for \code{viz_taskboard()} and \code{viz_gantt()}
#'
#' Credit goes to this Stack Overflow answer for figuring out how to do this:
#' https://stackoverflow.com/questions/42259826/hyperlinking-text-in-a-ggplot2-visualization/42262407
#'
#' @param g ggplot2 object returned by \code{viz_gantt()} or \code{viz_taskboard()}
#' @param filepath Location to save resulting SVG file of ggplot2, if desired. Leave blank for
#' function to output message precisely as needed to render in HTML RMarkdown with chunk
#' option \code{results = 'asis'}
#'
#' @return SVG version of ggplot2 object with links to relevant GitHub issues. Either writes output
#' to file or to console (to be captured in RMarkdown) depending on existence of \code{filepath} argument
#' @export
#'
#' @examples
#' \dontrun{
#' # In R, to save to file:
#' taskboard <- viz_taskboard(issues)
#' viz_linked(taskboard, "my_folder/my_file.svg")
#'
#' # In RMarkdown chunk, to print as output:
#' ```{r results = 'asis', echo = FALSE}
#' gantt <- viz_gantt(issues)
#' viz_linked(gantt)
#' ````
#' }
viz_linked <- function(g, filepath){
if (!requireNamespace("xml2", quietly = TRUE)) {
message(
paste0("Package \"xml2\" is needed to edit SVG.",
"Please install \"xml2\" or use the non-linked version."),
call. = FALSE)
}
if (!requireNamespace("ggplot2", quietly = TRUE)) {
message(
paste0("Package \"ggplot2\" is needed to save the image.",
"Please install \"ggplot2\" or use the non-linked version."),
call. = FALSE)
}
if (!requireNamespace("purrr", quietly = TRUE)) {
message(
paste0("Package \"purrr\" is needed for image conversion.",
"Please install \"purrr\" or use the non-linked version."),
call. = FALSE)
}
# create text-link mapping
links <- get_text_link_map(g)
# save current ggplot at svg
tf <- tempfile(fileext = ".svg")
suppressMessages( ggplot2::ggsave(tf , g ) )
# add links to svg
xml <- xml2::read_xml(tf)
xml %>%
xml2::xml_find_all(xpath="//d1:text") %>%
purrr::keep(xml2::xml_text(.) %in% names(links)) %>%
xml2::xml_add_parent("a", "xlink:href" = links[xml2::xml_text(.)], target = "_blank")
if(missing(filepath)){
xml2::write_xml(xml, tf)
cat( readLines(tf), sep = "\n" )
}
else{
xml2::write_xml(xml, filepath )
}
# clean up environment
unlink(tf)
}
# internal functions/methods for deriving links ----
#' @keywords internal
get_text_link_map <- function(g){
# ensure graph data has preserved links
if(!("url" %in% names(g$data))){
stop( paste(
"url column was not included in dataset passed to viz funcion.",
"Please remake the plot with this field included before passing to viz_linked.",
sep = "\n"
))
}
# throw more readable error message if type unsupported
supported_plots <- c("gantt", "taskboard")
if(intersect(class(g), supported_plots) == 0) {
stop( paste(
"Object provided does not have an implementation for adding links.",
"Supported plots types are:",
paste(supported_plots, collapse = ", "),
sep = "\n"
))
}
# dispatch to S3 method
UseMethod('get_text_link_map', g)
}
#' @keywords internal
get_text_link_map.gantt <- function(g){
link_text_fmt <- g$data$title
link_text <- lapply(link_text_fmt, FUN = function(x) strwrap(x, width = g[['str_wrap_width']] ))
link_length <- vapply(link_text, FUN = length, FUN.VALUE = integer(1))
url_repeat <- rep(g$data$url, link_length)
links <- stats::setNames(url_repeat, link_text)
return(links)
}
#' @keywords internal
get_text_link_map.taskboard <- function(g){
link_text_fmt <- paste0("#", g$data$number, ": ", g$data$title)
link_text <- lapply(link_text_fmt, FUN = function(x) strwrap(x, width = g[['str_wrap_width']] ))
link_length <- vapply(link_text, FUN = length, FUN.VALUE = integer(1))
url_repeat <- rep(g$data$url, link_length)
links <- stats::setNames(url_repeat, link_text)
return(links)
}
|
context("infill crits")
test_that("infill crits", {
ninit = 20L
niters = 3L
f1 = smoof::makeSphereFunction(2L)
f2 = smoof::makeSingleObjectiveFunction(
fn = function(x) sum(x^2) + rnorm(1, 0, 0.03),
par.set = getParamSet(f1)
)
des = generateTestDesign(ninit, getParamSet(f1))
mycontrol = function(minimize, crit) {
ctrl = makeMBOControl(final.evals = 10L)
ctrl = setMBOControlTermination(ctrl, iters = niters)
ctrl = setMBOControlInfill(ctrl, crit = crit, opt = "focussearch", opt.restarts = 1L,
opt.focussearch.points = 300L)
return(ctrl)
}
mycheck = function(or, minimize) {
expect_equal(getOptPathLength(or$opt.path), ninit + niters + 10L)
expect_true(!is.na(or$y))
if (minimize)
expect_true(or$y < 25)
else
expect_true(or$y > 30)
}
learners = list(
makeLearner("regr.km", predict.type = "se"),
makeLearner("regr.randomForest", ntree = 10L, predict.type = "se")
)
# FIXME: we see a problem with crit = "mean" here.
# at some point we will always eval the same point.
# kriging will then produce numerical errors, but the real problem is that
# we have converged and just waste time. we need to detect this somehow, or cope with it
for (noisy in c(TRUE, FALSE)) {
for (minimize in c(TRUE, FALSE)) {
crits = if (!noisy) c("mean", "ei") else c("aei", "eqi")
for (lrn in learners) {
if (inherits(lrn, "regr.km"))
lrn = setHyperPars(lrn, nugget.estim = noisy)
for (crit in crits) {
ctrl = mycontrol(crit)
f = if (!noisy) f1 else f2
f = if (!minimize) setAttribute(f, "minimize", FALSE) else f
or = mbo(f, des, learner = lrn, control = ctrl)
mycheck(or, minimize)
}
}
}
}
# check lambda and pi for cb
ctrl = makeMBOControl(final.evals = 10L)
ctrl = setMBOControlTermination(ctrl, iters = niters)
ctrl = setMBOControlInfill(ctrl, crit = "cb", opt = "focussearch", opt.restarts = 1L,
opt.focussearch.points = 300L, crit.cb.lambda = 2)
mbo(f1, des, learner = makeLearner("regr.km", predict.type = "se"), control = ctrl)
expect_error(setMBOControlInfill(ctrl, crit = "cb", opt = "focussearch", opt.restarts = 1L,
opt.focussearch.points = 300L, crit.cb.lambda = 2, crit.cb.pi = 0.5))
ctrl = setMBOControlInfill(ctrl, crit = "cb", opt = "focussearch", opt.restarts = 1L,
opt.focussearch.points = 300L, crit.cb.lambda = NULL, crit.cb.pi = 0.5)
or = mbo(f1, des, learner = makeLearner("regr.km", predict.type = "se"), control = ctrl)
expect_true(or$y < 50)
# check beta for eqi
expect_error(setMBOControlInfill(ctrl, crit = "eqi", opt = "focussearch", opt.restarts = 1L,
opt.focussearch.points = 300L, crit.eqi.beta = 2))
ctrl = setMBOControlInfill(ctrl, crit = "eqi", opt = "focussearch", opt.restarts = 1L,
opt.focussearch.points = 300L, crit.eqi.beta = 0.6)
or = mbo(f1, des, learner = makeLearner("regr.km", predict.type = "se", nugget.estim = TRUE), control = ctrl)
expect_true(or$y < 50)
})
| /tests/testthat/test_infillcrits.R | no_license | DanielKuehn87/mlrMBO | R | false | false | 3,111 | r | context("infill crits")
test_that("infill crits", {
ninit = 20L
niters = 3L
f1 = smoof::makeSphereFunction(2L)
f2 = smoof::makeSingleObjectiveFunction(
fn = function(x) sum(x^2) + rnorm(1, 0, 0.03),
par.set = getParamSet(f1)
)
des = generateTestDesign(ninit, getParamSet(f1))
mycontrol = function(minimize, crit) {
ctrl = makeMBOControl(final.evals = 10L)
ctrl = setMBOControlTermination(ctrl, iters = niters)
ctrl = setMBOControlInfill(ctrl, crit = crit, opt = "focussearch", opt.restarts = 1L,
opt.focussearch.points = 300L)
return(ctrl)
}
mycheck = function(or, minimize) {
expect_equal(getOptPathLength(or$opt.path), ninit + niters + 10L)
expect_true(!is.na(or$y))
if (minimize)
expect_true(or$y < 25)
else
expect_true(or$y > 30)
}
learners = list(
makeLearner("regr.km", predict.type = "se"),
makeLearner("regr.randomForest", ntree = 10L, predict.type = "se")
)
# FIXME: we see a problem with crit = "mean" here.
# at some point we will always eval the same point.
# kriging will then produce numerical errors, but the real problem is that
# we have converged and just waste time. we need to detect this somehow, or cope with it
for (noisy in c(TRUE, FALSE)) {
for (minimize in c(TRUE, FALSE)) {
crits = if (!noisy) c("mean", "ei") else c("aei", "eqi")
for (lrn in learners) {
if (inherits(lrn, "regr.km"))
lrn = setHyperPars(lrn, nugget.estim = noisy)
for (crit in crits) {
ctrl = mycontrol(crit)
f = if (!noisy) f1 else f2
f = if (!minimize) setAttribute(f, "minimize", FALSE) else f
or = mbo(f, des, learner = lrn, control = ctrl)
mycheck(or, minimize)
}
}
}
}
# check lambda and pi for cb
ctrl = makeMBOControl(final.evals = 10L)
ctrl = setMBOControlTermination(ctrl, iters = niters)
ctrl = setMBOControlInfill(ctrl, crit = "cb", opt = "focussearch", opt.restarts = 1L,
opt.focussearch.points = 300L, crit.cb.lambda = 2)
mbo(f1, des, learner = makeLearner("regr.km", predict.type = "se"), control = ctrl)
expect_error(setMBOControlInfill(ctrl, crit = "cb", opt = "focussearch", opt.restarts = 1L,
opt.focussearch.points = 300L, crit.cb.lambda = 2, crit.cb.pi = 0.5))
ctrl = setMBOControlInfill(ctrl, crit = "cb", opt = "focussearch", opt.restarts = 1L,
opt.focussearch.points = 300L, crit.cb.lambda = NULL, crit.cb.pi = 0.5)
or = mbo(f1, des, learner = makeLearner("regr.km", predict.type = "se"), control = ctrl)
expect_true(or$y < 50)
# check beta for eqi
expect_error(setMBOControlInfill(ctrl, crit = "eqi", opt = "focussearch", opt.restarts = 1L,
opt.focussearch.points = 300L, crit.eqi.beta = 2))
ctrl = setMBOControlInfill(ctrl, crit = "eqi", opt = "focussearch", opt.restarts = 1L,
opt.focussearch.points = 300L, crit.eqi.beta = 0.6)
or = mbo(f1, des, learner = makeLearner("regr.km", predict.type = "se", nugget.estim = TRUE), control = ctrl)
expect_true(or$y < 50)
})
|
### --- Script to adjust raw data with ComBat algorithm
### -------SETUP-------
require(dplyr)
require(tidyr)
## ---- COMBAT functions
## data adjustment function
remove_single_images = function(chan, image_var){
## count cells by images
sub_chan = chan %>% group_by_at(image_var) %>% count()
sub_chan$bool = sub_chan$n <= 1
## mark cells that are n-of-1 in an image
nof1s = sub_chan[sub_chan$bool == TRUE,image_var]
## return dataset with no N-of-1s
return(chan[!(chan$Pos %in% nof1s$Pos),])
}
## internal function for delta functions
sqerr = function(x){sum((x - mean(x))^2)}
## update each iteration of the algo
update_gamma = function(batch_chan, gamma_c, tau_c,channel, slide_var){
## create numerator value
# batch_chan$gamma_num = (batch_chan[,channel] -
# batch_chan$alpha_c -
# batch_chan$lambda_ijc)/batch_chan$delta_ijc
gamma_num = batch_chan %>%
group_by_at(slide_var) %>%
summarise(avg = mean(get(channel)),.groups='drop')
gamma_num$avg = gamma_num$avg + gamma_c/tau_c
## create denominator value
gamma_denom = batch_chan %>%
group_by_at(slide_var) %>%
summarise(avg = mean(delta_ijc_inv),.groups='drop')
gamma_denom$avg = gamma_denom$avg + (1/tau_c)
gamma_ic_star = gamma_num
gamma_ic_star$avg = gamma_ic_star$avg/gamma_denom$avg
## returns zero if only one slide
if(is.na(gamma_ic_star$avg[1])){gamma_ic_star$avg<-0}
return(gamma_ic_star)
}
update_lambda = function(batch_chan, lambda_c, eta_c,channel,image_var){
##create numerator value
# batch_chan$lambda_num = (batch_chan[,channel] -
# batch_chan$alpha_c -
# batch_chan$gamma_ic)/batch_chan$delta_ijc
lambda_num = batch_chan %>%
group_by_at(image_var) %>%
summarise(avg = mean(get(channel)),.groups='drop')
lambda_num$avg = lambda_num$avg + lambda_c/eta_c
## create denominator value
lambda_denom = batch_chan %>%
group_by_at(image_var) %>%
summarise(avg = mean(delta_ijc_inv),.groups='drop')
lambda_denom$avg = lambda_denom$avg + (1/eta_c)
lambda_ijc_star = lambda_num
lambda_ijc_star$avg = lambda_ijc_star$avg/lambda_denom$avg
return(lambda_ijc_star)
}
update_delta = function(batch_chan, beta_c,omega_c,channel,image_var){
batch_chan$delta_num = beta_c + (batch_chan[,channel] -
batch_chan$alpha_c -
batch_chan$gamma_ic -
batch_chan$lambda_ijc)^2
delta_num = batch_chan %>%
group_by_at(image_var) %>%
summarise(avg = mean(delta_num),.groups='drop')
delta_denom = batch_chan %>%
group_by_at(image_var) %>%
count()
delta_denom$n = delta_denom$n/2 + omega_c - 1
delta_ijc_star = delta_num
delta_ijc_star$avg = delta_ijc_star$avg/delta_denom$n
return(delta_ijc_star)
}
update_delta2 = function(batch_chan, beta_c,omega_c,channel,image_var){
#batch_chan$delta_num = (batch_chan[,channel] -
# batch_chan$alpha_c -
# batch_chan$gamma_ic
# - batch_chan$lambda_ijc)
delta_num = batch_chan %>%
group_by_at(image_var) %>%
summarise(avg = sqerr(get(channel)),.groups='drop')
delta_denom = batch_chan %>%
group_by_at(image_var) %>%
count()
delta_denom$n = delta_denom$n/2 + omega_c - 1
delta_num$avg = 0.5*delta_num$avg + beta_c
delta_ijc_star = delta_num
delta_ijc_star$avg = delta_ijc_star$avg/delta_denom$n
delta_ijc_star[is.na(delta_ijc_star$avg),]$avg = 0.00001
return(delta_ijc_star)
}
## checking convergence
gamma_conv = function(batch_chan, gamma_stars,slide_var){
gams = batch_chan[,c(slide_var,'gamma_ic')] %>% distinct()
return(mean(abs(gams[match(unlist(gamma_stars[,slide_var]),
gams[,slide_var]),]$gamma_ic - gamma_stars$avg))) ## MAE
}
lambda_conv = function(batch_chan, lambda_stars,image_var){
lambs = batch_chan[,c(image_var,'lambda_ijc')] %>% distinct()
return(mean(abs(lambs[match(unlist(lambda_stars[,image_var]),
lambs[,image_var]),]$lambda_ijc - lambda_stars$avg))) ## MAE
}
delta_conv = function(batch_chan, delta_stars,image_var){
dels = batch_chan[,c(image_var,'delta_ijc')] %>% distinct()
return(mean(abs(dels[match(unlist(delta_stars[,image_var]),
dels[,image_var]),]$delta_ijc - delta_stars$avg))) ## MAE
}
## function to combat-adjust for one channel
adjust_vals = function(channel,slide_var,image_var,uid_var,h,remove_zeroes=TRUE,
tol = 0.0001){
print(channel)
### ---- Subset the data for the ComBat analysis
chan = as.data.frame(h[,c(uid_var,slide_var,image_var,channel)])
chan$raw = chan[,channel]
chan[,channel] = log10(chan[,channel]+1)
sigma_c = sd(chan[,channel])
alpha_c = mean(chan[,channel])
chan[,channel] = (chan[,channel] - alpha_c)/sigma_c
# if(remove_zeroes){
# ## remove zeroes if needed
# leftover = chan[chan[,channel] <=0,]
# chan = chan[(chan[,channel] > 0),]
#
# ## take ln
# chan[,channel] = log(chan[,channel])
# }
## fix n=1
#chan = remove_single_images(chan, image_var)
### -------COMBAT EMPIRICAL VALUES-------
## get alpha (grand mean)
chan$alpha_c = mean(chan[,channel])
## get gammas (slide means)
gamma_ic = chan %>%
group_by_at(slide_var) %>%
summarise(avg=mean(get(channel)), .groups = 'drop')
chan$gamma_ic = gamma_ic[match(chan[,slide_var],unlist(gamma_ic[,slide_var])),]$avg
#chan$gamma_ic = chan$gamma_ic - chan$alpha_c
## get lambdas (image means)
lambda_ijc = chan %>%
group_by_at(image_var) %>%
summarise(avg=mean(get(channel)), .groups = 'drop')
chan$lambda_ijc = lambda_ijc[match(chan[,image_var],unlist(lambda_ijc[,image_var])),]$avg
#chan$lambda_ijc = chan$lambda_ijc - chan$alpha_c - chan$gamma_ic
## get deltas (image variances)
#chan$delta_ijc = (chan[,channel] - chan$alpha_c - chan$gamma_ic - chan$lambda_ijc)^2
#chan$delta_ijc = (chan[,channel] - chan$alpha_c - chan$gamma_ic - chan$lambda_ijc)
## delta_ijc = chan %>%
## group_by_at(image_var) %>%
## summarise(v=var(delta_ijc), .groups = 'drop')
delta_ijc = chan %>%
group_by_at(image_var) %>%
summarise(v=var(get(channel)), .groups='drop')
#delta_ijc[is.na(delta_ijc$v),]$v = 0.0001 ## there are images with only one value
chan$delta_ijc = (delta_ijc[match(chan[,image_var],unlist(delta_ijc[,image_var])),]$v)
### -------COMBAT HYPERPARAMETERS-------
## slide level mean
gamma_c = mean(chan$gamma_ic)
tau_c = var(chan$gamma_ic)
## image level mean
lambda_c = mean(chan$lambda_ijc)
eta_c = var(chan$lambda_ijc)
## image level variances
M_c = mean(chan$delta_ijc)
S_c = var(chan$delta_ijc)
## is this correct?
omega_c = (M_c + 2*S_c)/S_c
beta_c = (M_c^3 + M_c*S_c)/S_c
### -------CALLING COMBAT BATCH EFFECTS FUNCTIONS-------
batch_chan = chan ## duplicate the dataframe to iterate
batch_chan$delta_ijc_inv = 1/batch_chan$delta_ijc
### -------COMBAT BATCH EFFECT ADJUSTMENT-------
## run a single iteration
## run delta first
#delta_stars = update_delta(batch_chan, beta_c, omega_c,channel,image_var=image_var)
delta_stars = update_delta2(batch_chan, beta_c, omega_c,channel,image_var=image_var)
check_delta_conv = delta_conv(batch_chan, delta_stars,image_var=image_var)
batch_chan$delta_ijc = (delta_stars[match(batch_chan[,image_var],unlist(delta_stars[,image_var])),]$avg)
batch_chan$delta_ijc_inv = 1/batch_chan$delta_ijc
## now update gamma
gamma_stars = update_gamma(batch_chan, gamma_c, tau_c,channel,slide_var=slide_var)
check_gamma_conv = gamma_conv(batch_chan, gamma_stars,slide_var=slide_var)
batch_chan$gamma_ic = gamma_stars[match(batch_chan[,slide_var],unlist(gamma_stars[,slide_var])),]$avg
## now update lambda
lambda_stars = update_lambda(batch_chan, lambda_c, eta_c,channel,image_var=image_var)
check_lambda_conv = lambda_conv(batch_chan, lambda_stars,image_var=image_var)
batch_chan$lambda_ijc = lambda_stars[match(batch_chan[,image_var],unlist(lambda_stars[,image_var])),]$avg
total_mae = sum(check_gamma_conv,check_lambda_conv,check_delta_conv)
iterations = 1
## first check of MAE
print(paste0('Total MAE after ', iterations,' iterations: ', round(total_mae,8)))
## run until convergence
while(total_mae > tol){
## run delta first
#delta_stars = update_delta(batch_chan, beta_c, omega_c,channel,image_var=image_var)
delta_stars = update_delta2(batch_chan, beta_c, omega_c,channel,image_var=image_var)
check_delta_conv = delta_conv(batch_chan, delta_stars,image_var=image_var)
batch_chan$delta_ijc = (delta_stars[match(batch_chan[,image_var],unlist(delta_stars[,image_var])),]$avg)
batch_chan$delta_ijc_inv = 1/batch_chan$delta_ijc
## now update gamma
gamma_stars = update_gamma(batch_chan, gamma_c, tau_c,channel,slide_var=slide_var)
check_gamma_conv = gamma_conv(batch_chan, gamma_stars,slide_var=slide_var)
batch_chan$gamma_ic = gamma_stars[match(batch_chan[,slide_var],unlist(gamma_stars[,slide_var])),]$avg
## now update lambda
lambda_stars = update_lambda(batch_chan, lambda_c, eta_c,channel,image_var=image_var)
check_lambda_conv = lambda_conv(batch_chan, lambda_stars,image_var=image_var)
batch_chan$lambda_ijc = lambda_stars[match(batch_chan[,image_var],unlist(lambda_stars[,image_var])),]$avg
total_mae = sum(check_gamma_conv,check_lambda_conv,check_delta_conv)
iterations = iterations + 1
## final check of MAE
print(paste0('Total MAE after ', iterations,' iterations: ', round(total_mae,4)))
}
### -------COMBAT BATCH EFFECT RESULTS-------
## NOW actually adjust for the batch effects
# batch_chan$Y_ijc_star = (batch_chan[,channel] -
# batch_chan$alpha_c -
# batch_chan$gamma_ic -
# batch_chan$lambda_ijc)/batch_chan$delta_ijc
batch_chan$Y_ijc_star = (sigma_c/batch_chan$delta_ijc) * (batch_chan[,channel]-batch_chan$gamma_ic-batch_chan$lambda_ijc) + alpha_c
## exponential after natural log
#batch_chan$Y_ijc_star = exp(batch_chan$Y_ijc_star)
## add zeroes back in if needed
if(remove_zeroes){
## add back in zeroes
leftover$Y_ijc_star = 0
leftover[,colnames(batch_chan)[!(colnames(batch_chan) %in% colnames(leftover))]] = NA
batch_chan = rbind(batch_chan,leftover)
}
return(batch_chan)
}
### NOTES ###
## dataset | SARDANA | mouse
## ------- | ------- | -----
## slide_var | SlideID | slideID
## image_var | image | view
## fov_var | Pos | Pos
run_full_combat = function(data,
save_path,
vars_to_adjust,
slide_var,
image_var,
uid_var,
remove_zeroes,
tol=0.001){
## create combat adjustment dir if necessary
if(!dir.exists(paste0(save_path,'ComBat_adjustment_files/'))){
dir.create(paste0(save_path,'ComBat_adjustment_files/'))
}
## remove n-of-1s in the full dataset
h_cb = data
#h_cb = remove_single_images(h_cb,image_var)
alphas = c(); gammas = c(); deltas = c()
## adjust within each channel
for (i in 1:length(vars_to_adjust)){
## adjust for the channel
chan_i = adjust_vals(channel=vars_to_adjust[i],
slide_var = slide_var,
image_var = image_var,
uid_var = uid_var,
tol=tol,
h = data,
remove_zeroes = remove_zeroes)
## save the dataframe
saveRDS(chan_i, paste0(save_path,'ComBat_adjustment_files/',vars_to_adjust[i],'.rds'))
## replace the combined data with the combat-adjusted values
h_cb[,paste0(vars_to_adjust[i],'_Adjusted')] = chan_i$Y_ijc_star
## save channel vals
#als = unique(chan_i$alpha_c)
#alphas = c(alphas, als[!is.na(als)])
#gams = unique(chan_i$gamma_ic)
#gammas = c(gammas, gams[!is.na(gams)])
#dels = unique(chan_i$delta_ijc)
#deltas = c(deltas, dels[!is.na(dels)])
}
## scale to give data natural scale
# grand_mean = exp(mean(alphas))
# grand_var = exp(mean(sqrt(deltas)))
# for(v in paste0(vars_to_adjust,'_Adjusted')){
# h_cb[h_cb[,v] != 0,v] = (h_cb[h_cb[,v] != 0,v] + grand_mean)/grand_var
# }
## save the adjusted dataset
return(h_cb)
}
## ---- END FUNCTIONS
| /combat_functions/all_combat_functions_new.R | no_license | ColemanRHarris/mxif_normalization | R | false | false | 12,758 | r | ### --- Script to adjust raw data with ComBat algorithm
### -------SETUP-------
require(dplyr)
require(tidyr)
## ---- COMBAT functions
## data adjustment function
remove_single_images = function(chan, image_var){
## count cells by images
sub_chan = chan %>% group_by_at(image_var) %>% count()
sub_chan$bool = sub_chan$n <= 1
## mark cells that are n-of-1 in an image
nof1s = sub_chan[sub_chan$bool == TRUE,image_var]
## return dataset with no N-of-1s
return(chan[!(chan$Pos %in% nof1s$Pos),])
}
## internal function for delta functions
sqerr = function(x){sum((x - mean(x))^2)}
## update each iteration of the algo
update_gamma = function(batch_chan, gamma_c, tau_c,channel, slide_var){
## create numerator value
# batch_chan$gamma_num = (batch_chan[,channel] -
# batch_chan$alpha_c -
# batch_chan$lambda_ijc)/batch_chan$delta_ijc
gamma_num = batch_chan %>%
group_by_at(slide_var) %>%
summarise(avg = mean(get(channel)),.groups='drop')
gamma_num$avg = gamma_num$avg + gamma_c/tau_c
## create denominator value
gamma_denom = batch_chan %>%
group_by_at(slide_var) %>%
summarise(avg = mean(delta_ijc_inv),.groups='drop')
gamma_denom$avg = gamma_denom$avg + (1/tau_c)
gamma_ic_star = gamma_num
gamma_ic_star$avg = gamma_ic_star$avg/gamma_denom$avg
## returns zero if only one slide
if(is.na(gamma_ic_star$avg[1])){gamma_ic_star$avg<-0}
return(gamma_ic_star)
}
update_lambda = function(batch_chan, lambda_c, eta_c,channel,image_var){
##create numerator value
# batch_chan$lambda_num = (batch_chan[,channel] -
# batch_chan$alpha_c -
# batch_chan$gamma_ic)/batch_chan$delta_ijc
lambda_num = batch_chan %>%
group_by_at(image_var) %>%
summarise(avg = mean(get(channel)),.groups='drop')
lambda_num$avg = lambda_num$avg + lambda_c/eta_c
## create denominator value
lambda_denom = batch_chan %>%
group_by_at(image_var) %>%
summarise(avg = mean(delta_ijc_inv),.groups='drop')
lambda_denom$avg = lambda_denom$avg + (1/eta_c)
lambda_ijc_star = lambda_num
lambda_ijc_star$avg = lambda_ijc_star$avg/lambda_denom$avg
return(lambda_ijc_star)
}
update_delta = function(batch_chan, beta_c,omega_c,channel,image_var){
batch_chan$delta_num = beta_c + (batch_chan[,channel] -
batch_chan$alpha_c -
batch_chan$gamma_ic -
batch_chan$lambda_ijc)^2
delta_num = batch_chan %>%
group_by_at(image_var) %>%
summarise(avg = mean(delta_num),.groups='drop')
delta_denom = batch_chan %>%
group_by_at(image_var) %>%
count()
delta_denom$n = delta_denom$n/2 + omega_c - 1
delta_ijc_star = delta_num
delta_ijc_star$avg = delta_ijc_star$avg/delta_denom$n
return(delta_ijc_star)
}
update_delta2 = function(batch_chan, beta_c,omega_c,channel,image_var){
#batch_chan$delta_num = (batch_chan[,channel] -
# batch_chan$alpha_c -
# batch_chan$gamma_ic
# - batch_chan$lambda_ijc)
delta_num = batch_chan %>%
group_by_at(image_var) %>%
summarise(avg = sqerr(get(channel)),.groups='drop')
delta_denom = batch_chan %>%
group_by_at(image_var) %>%
count()
delta_denom$n = delta_denom$n/2 + omega_c - 1
delta_num$avg = 0.5*delta_num$avg + beta_c
delta_ijc_star = delta_num
delta_ijc_star$avg = delta_ijc_star$avg/delta_denom$n
delta_ijc_star[is.na(delta_ijc_star$avg),]$avg = 0.00001
return(delta_ijc_star)
}
## checking convergence
gamma_conv = function(batch_chan, gamma_stars,slide_var){
gams = batch_chan[,c(slide_var,'gamma_ic')] %>% distinct()
return(mean(abs(gams[match(unlist(gamma_stars[,slide_var]),
gams[,slide_var]),]$gamma_ic - gamma_stars$avg))) ## MAE
}
lambda_conv = function(batch_chan, lambda_stars,image_var){
lambs = batch_chan[,c(image_var,'lambda_ijc')] %>% distinct()
return(mean(abs(lambs[match(unlist(lambda_stars[,image_var]),
lambs[,image_var]),]$lambda_ijc - lambda_stars$avg))) ## MAE
}
delta_conv = function(batch_chan, delta_stars,image_var){
dels = batch_chan[,c(image_var,'delta_ijc')] %>% distinct()
return(mean(abs(dels[match(unlist(delta_stars[,image_var]),
dels[,image_var]),]$delta_ijc - delta_stars$avg))) ## MAE
}
## function to combat-adjust for one channel
adjust_vals = function(channel,slide_var,image_var,uid_var,h,remove_zeroes=TRUE,
tol = 0.0001){
print(channel)
### ---- Subset the data for the ComBat analysis
chan = as.data.frame(h[,c(uid_var,slide_var,image_var,channel)])
chan$raw = chan[,channel]
chan[,channel] = log10(chan[,channel]+1)
sigma_c = sd(chan[,channel])
alpha_c = mean(chan[,channel])
chan[,channel] = (chan[,channel] - alpha_c)/sigma_c
# if(remove_zeroes){
# ## remove zeroes if needed
# leftover = chan[chan[,channel] <=0,]
# chan = chan[(chan[,channel] > 0),]
#
# ## take ln
# chan[,channel] = log(chan[,channel])
# }
## fix n=1
#chan = remove_single_images(chan, image_var)
### -------COMBAT EMPIRICAL VALUES-------
## get alpha (grand mean)
chan$alpha_c = mean(chan[,channel])
## get gammas (slide means)
gamma_ic = chan %>%
group_by_at(slide_var) %>%
summarise(avg=mean(get(channel)), .groups = 'drop')
chan$gamma_ic = gamma_ic[match(chan[,slide_var],unlist(gamma_ic[,slide_var])),]$avg
#chan$gamma_ic = chan$gamma_ic - chan$alpha_c
## get lambdas (image means)
lambda_ijc = chan %>%
group_by_at(image_var) %>%
summarise(avg=mean(get(channel)), .groups = 'drop')
chan$lambda_ijc = lambda_ijc[match(chan[,image_var],unlist(lambda_ijc[,image_var])),]$avg
#chan$lambda_ijc = chan$lambda_ijc - chan$alpha_c - chan$gamma_ic
## get deltas (image variances)
#chan$delta_ijc = (chan[,channel] - chan$alpha_c - chan$gamma_ic - chan$lambda_ijc)^2
#chan$delta_ijc = (chan[,channel] - chan$alpha_c - chan$gamma_ic - chan$lambda_ijc)
## delta_ijc = chan %>%
## group_by_at(image_var) %>%
## summarise(v=var(delta_ijc), .groups = 'drop')
delta_ijc = chan %>%
group_by_at(image_var) %>%
summarise(v=var(get(channel)), .groups='drop')
#delta_ijc[is.na(delta_ijc$v),]$v = 0.0001 ## there are images with only one value
chan$delta_ijc = (delta_ijc[match(chan[,image_var],unlist(delta_ijc[,image_var])),]$v)
### -------COMBAT HYPERPARAMETERS-------
## slide level mean
gamma_c = mean(chan$gamma_ic)
tau_c = var(chan$gamma_ic)
## image level mean
lambda_c = mean(chan$lambda_ijc)
eta_c = var(chan$lambda_ijc)
## image level variances
M_c = mean(chan$delta_ijc)
S_c = var(chan$delta_ijc)
## is this correct?
omega_c = (M_c + 2*S_c)/S_c
beta_c = (M_c^3 + M_c*S_c)/S_c
### -------CALLING COMBAT BATCH EFFECTS FUNCTIONS-------
batch_chan = chan ## duplicate the dataframe to iterate
batch_chan$delta_ijc_inv = 1/batch_chan$delta_ijc
### -------COMBAT BATCH EFFECT ADJUSTMENT-------
## run a single iteration
## run delta first
#delta_stars = update_delta(batch_chan, beta_c, omega_c,channel,image_var=image_var)
delta_stars = update_delta2(batch_chan, beta_c, omega_c,channel,image_var=image_var)
check_delta_conv = delta_conv(batch_chan, delta_stars,image_var=image_var)
batch_chan$delta_ijc = (delta_stars[match(batch_chan[,image_var],unlist(delta_stars[,image_var])),]$avg)
batch_chan$delta_ijc_inv = 1/batch_chan$delta_ijc
## now update gamma
gamma_stars = update_gamma(batch_chan, gamma_c, tau_c,channel,slide_var=slide_var)
check_gamma_conv = gamma_conv(batch_chan, gamma_stars,slide_var=slide_var)
batch_chan$gamma_ic = gamma_stars[match(batch_chan[,slide_var],unlist(gamma_stars[,slide_var])),]$avg
## now update lambda
lambda_stars = update_lambda(batch_chan, lambda_c, eta_c,channel,image_var=image_var)
check_lambda_conv = lambda_conv(batch_chan, lambda_stars,image_var=image_var)
batch_chan$lambda_ijc = lambda_stars[match(batch_chan[,image_var],unlist(lambda_stars[,image_var])),]$avg
total_mae = sum(check_gamma_conv,check_lambda_conv,check_delta_conv)
iterations = 1
## first check of MAE
print(paste0('Total MAE after ', iterations,' iterations: ', round(total_mae,8)))
## run until convergence
while(total_mae > tol){
## run delta first
#delta_stars = update_delta(batch_chan, beta_c, omega_c,channel,image_var=image_var)
delta_stars = update_delta2(batch_chan, beta_c, omega_c,channel,image_var=image_var)
check_delta_conv = delta_conv(batch_chan, delta_stars,image_var=image_var)
batch_chan$delta_ijc = (delta_stars[match(batch_chan[,image_var],unlist(delta_stars[,image_var])),]$avg)
batch_chan$delta_ijc_inv = 1/batch_chan$delta_ijc
## now update gamma
gamma_stars = update_gamma(batch_chan, gamma_c, tau_c,channel,slide_var=slide_var)
check_gamma_conv = gamma_conv(batch_chan, gamma_stars,slide_var=slide_var)
batch_chan$gamma_ic = gamma_stars[match(batch_chan[,slide_var],unlist(gamma_stars[,slide_var])),]$avg
## now update lambda
lambda_stars = update_lambda(batch_chan, lambda_c, eta_c,channel,image_var=image_var)
check_lambda_conv = lambda_conv(batch_chan, lambda_stars,image_var=image_var)
batch_chan$lambda_ijc = lambda_stars[match(batch_chan[,image_var],unlist(lambda_stars[,image_var])),]$avg
total_mae = sum(check_gamma_conv,check_lambda_conv,check_delta_conv)
iterations = iterations + 1
## final check of MAE
print(paste0('Total MAE after ', iterations,' iterations: ', round(total_mae,4)))
}
### -------COMBAT BATCH EFFECT RESULTS-------
## NOW actually adjust for the batch effects
# batch_chan$Y_ijc_star = (batch_chan[,channel] -
# batch_chan$alpha_c -
# batch_chan$gamma_ic -
# batch_chan$lambda_ijc)/batch_chan$delta_ijc
batch_chan$Y_ijc_star = (sigma_c/batch_chan$delta_ijc) * (batch_chan[,channel]-batch_chan$gamma_ic-batch_chan$lambda_ijc) + alpha_c
## exponential after natural log
#batch_chan$Y_ijc_star = exp(batch_chan$Y_ijc_star)
## add zeroes back in if needed
if(remove_zeroes){
## add back in zeroes
leftover$Y_ijc_star = 0
leftover[,colnames(batch_chan)[!(colnames(batch_chan) %in% colnames(leftover))]] = NA
batch_chan = rbind(batch_chan,leftover)
}
return(batch_chan)
}
### NOTES ###
## dataset | SARDANA | mouse
## ------- | ------- | -----
## slide_var | SlideID | slideID
## image_var | image | view
## fov_var | Pos | Pos
run_full_combat = function(data,
save_path,
vars_to_adjust,
slide_var,
image_var,
uid_var,
remove_zeroes,
tol=0.001){
## create combat adjustment dir if necessary
if(!dir.exists(paste0(save_path,'ComBat_adjustment_files/'))){
dir.create(paste0(save_path,'ComBat_adjustment_files/'))
}
## remove n-of-1s in the full dataset
h_cb = data
#h_cb = remove_single_images(h_cb,image_var)
alphas = c(); gammas = c(); deltas = c()
## adjust within each channel
for (i in 1:length(vars_to_adjust)){
## adjust for the channel
chan_i = adjust_vals(channel=vars_to_adjust[i],
slide_var = slide_var,
image_var = image_var,
uid_var = uid_var,
tol=tol,
h = data,
remove_zeroes = remove_zeroes)
## save the dataframe
saveRDS(chan_i, paste0(save_path,'ComBat_adjustment_files/',vars_to_adjust[i],'.rds'))
## replace the combined data with the combat-adjusted values
h_cb[,paste0(vars_to_adjust[i],'_Adjusted')] = chan_i$Y_ijc_star
## save channel vals
#als = unique(chan_i$alpha_c)
#alphas = c(alphas, als[!is.na(als)])
#gams = unique(chan_i$gamma_ic)
#gammas = c(gammas, gams[!is.na(gams)])
#dels = unique(chan_i$delta_ijc)
#deltas = c(deltas, dels[!is.na(dels)])
}
## scale to give data natural scale
# grand_mean = exp(mean(alphas))
# grand_var = exp(mean(sqrt(deltas)))
# for(v in paste0(vars_to_adjust,'_Adjusted')){
# h_cb[h_cb[,v] != 0,v] = (h_cb[h_cb[,v] != 0,v] + grand_mean)/grand_var
# }
## save the adjusted dataset
return(h_cb)
}
## ---- END FUNCTIONS
|
# Plain R ----------
# setwd(getSrcDirectory()[1])
# RStudio ----------
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# Libraries ----------
library(dplyr)
library(openxlsx)
library(ggplot2)
library(stringr)
library(zoo)
library(urca)
library(vars)
library(tsDyn)
`%format%` <- function(x, y) {
do.call(sprintf, c(list(x), y))
}
main <- function(country, years, lag_max) {
# Get data ----------
data_file_country <- "%s_data.xlsx" %format% c(country)
sheet_names <- openxlsx::getSheetNames(data_file_country)
analysis_data <- list()
for (name in sheet_names) {
N <- lag_max + 2
analysis_data[[name]] <- t(openxlsx::read.xlsx(data_file_country, sheet = name)[1, 2:N])
}
new_data_country <- data.frame(year = years)
new_data_country$ict <- analysis_data$ict * analysis_data$gdp_defl_lcu
new_data_country$gdp <- analysis_data$gdp_lcu * analysis_data$gdp_defl_lcu
new_data_country$l <- analysis_data$labor_total * analysis_data$wages_lcu * analysis_data$gdp_defl_lcu
new_data_country$k <- analysis_data$gross_cap_form_lcu * analysis_data$gdp_defl_lcu
return(new_data_country)
}
main_non_currency_ict <- function(country, years, lag_max) {
# Get data ----------
data_file_country <- "%s_data.xlsx" %format% c(country)
sheet_names <- openxlsx::getSheetNames(data_file_country)
analysis_data <- list()
for (name in sheet_names) {
N <- lag_max + 2
analysis_data[[name]] <- t(openxlsx::read.xlsx(data_file_country, sheet = name)[1, 2:N])
}
new_data_country <- data.frame(year = years)
new_data_country$ict <- analysis_data$ict
new_data_country$gdp <- analysis_data$gdp_lcu * analysis_data$gdp_defl_lcu
new_data_country$l <- analysis_data$labor_total * analysis_data$wages_lcu * analysis_data$gdp_defl_lcu
new_data_country$k <- analysis_data$gross_cap_form_lcu * analysis_data$gdp_defl_lcu
return(new_data_country)
}
# Run ----------
countries <- c("UScuip", "USrd", "USse", "USfix")
years <- 1990:2019
lag_max <- years[length(years)] - years[1]
acf_graph_title <- "Auto-Correlation Function"
num_of_possible_lags <- 1:3
df1 <- main(country = countries[1], years = years, lag_max = lag_max)
df2 <- main(country = countries[2], years = years, lag_max = lag_max)
df3 <- main(country = countries[3], years = years, lag_max = lag_max)
df4 <- main_non_currency_ict(country = countries[4], years = years, lag_max = lag_max)
# View(df1)
# View(df2)
# View(df3)
# View(df4)
df1$ict <- ifelse(na.spline.default(df1$ict) < 0, NA, na.spline.default(df1$ict))
df2$ict <- ifelse(na.spline.default(df2$ict) < 0, NA, na.spline.default(df2$ict))
df3$ict <- ifelse(na.spline.default(df3$ict) < 0, NA, na.spline.default(df3$ict))
df4$ict <- ifelse(na.spline.default(df4$ict) < 0, NA, na.spline.default(df4$ict))
icts <- data.frame(df1$ict,
df2$ict,
df3$ict,
df4$ict)
for (col in names(icts)) {
for (i in 1:NROW(icts[[col]])) {
if (is.na(icts[[col]][i])) {
icts[[col]][i] <- icts[[col]][!is.na(icts[[col]])][1]
}
}
}
icts0 <- as.data.frame(log(as.matrix(icts)))
names(icts0) <- countries
icts <- diff(icts0, lag = 1)
icts <- as.data.frame(icts)
names(icts) <- countries
# test_variance_df <- data.frame(value = unlist(icts),
# group = as.factor(rep(1:4, each = NROW(icts))))
# View(test_variance_df)
#
# anova_model <- aov(data = test_variance_df, value ~ group)
# summary(anova_model)
# oneway.test(data = test_variance_df, value ~ group, var.equal = TRUE)
# cor(icts)
# cor.test(icts$UScuip, icts$USrd, method = "pearson")
# cor.test(icts$UScuip, icts$USse, method = "pearson")
# cor.test(icts$UScuip, icts$USfix, method = "pearson")
# cor.test(icts$USrd, icts$USse, method = "pearson")
# cor.test(icts$USrd, icts$USfix, method = "pearson")
other <- as.data.frame(log(as.matrix(data.frame(df1$gdp,
df1$l,
df1$k))))
names(other) <- c("USgdp", "USlabor", "UScapital")
df <- cbind(other, icts0)
optimal_VAR <- VARselect(df,
lag.max = num_of_possible_lags[length(num_of_possible_lags)],
type = "const")
optimal_VAR_lag <- optimal_VAR$selection[1] - 1
coint_relations <- ca.jo(df,
type = "eigen",
ecdet = "const",
K = optimal_VAR_lag,
spec = "longrun")
print(summary(coint_relations))
# vecm_fit <- cajorls(coint_relations,
# r = 3)
# print(vecm_fit)
# print(summary(vecm_fit$rlm))
#
# to_var <- vec2var(coint_relations)
# print(serial.test(to_var, type = c("PT.asymptotic")))
# print(arch.test(to_var))
# print(normality.test(to_var))
# # plotres(coint_relations)
# plot(fevd(to_var))
| /extra_dynamics.R | no_license | FunnyRabbitIsAHabbit/Paper_Bachelor | R | false | false | 4,972 | r | # Plain R ----------
# setwd(getSrcDirectory()[1])
# RStudio ----------
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# Libraries ----------
library(dplyr)
library(openxlsx)
library(ggplot2)
library(stringr)
library(zoo)
library(urca)
library(vars)
library(tsDyn)
`%format%` <- function(x, y) {
do.call(sprintf, c(list(x), y))
}
main <- function(country, years, lag_max) {
# Get data ----------
data_file_country <- "%s_data.xlsx" %format% c(country)
sheet_names <- openxlsx::getSheetNames(data_file_country)
analysis_data <- list()
for (name in sheet_names) {
N <- lag_max + 2
analysis_data[[name]] <- t(openxlsx::read.xlsx(data_file_country, sheet = name)[1, 2:N])
}
new_data_country <- data.frame(year = years)
new_data_country$ict <- analysis_data$ict * analysis_data$gdp_defl_lcu
new_data_country$gdp <- analysis_data$gdp_lcu * analysis_data$gdp_defl_lcu
new_data_country$l <- analysis_data$labor_total * analysis_data$wages_lcu * analysis_data$gdp_defl_lcu
new_data_country$k <- analysis_data$gross_cap_form_lcu * analysis_data$gdp_defl_lcu
return(new_data_country)
}
main_non_currency_ict <- function(country, years, lag_max) {
# Get data ----------
data_file_country <- "%s_data.xlsx" %format% c(country)
sheet_names <- openxlsx::getSheetNames(data_file_country)
analysis_data <- list()
for (name in sheet_names) {
N <- lag_max + 2
analysis_data[[name]] <- t(openxlsx::read.xlsx(data_file_country, sheet = name)[1, 2:N])
}
new_data_country <- data.frame(year = years)
new_data_country$ict <- analysis_data$ict
new_data_country$gdp <- analysis_data$gdp_lcu * analysis_data$gdp_defl_lcu
new_data_country$l <- analysis_data$labor_total * analysis_data$wages_lcu * analysis_data$gdp_defl_lcu
new_data_country$k <- analysis_data$gross_cap_form_lcu * analysis_data$gdp_defl_lcu
return(new_data_country)
}
# Run ----------
countries <- c("UScuip", "USrd", "USse", "USfix")
years <- 1990:2019
lag_max <- years[length(years)] - years[1]
acf_graph_title <- "Auto-Correlation Function"
num_of_possible_lags <- 1:3
df1 <- main(country = countries[1], years = years, lag_max = lag_max)
df2 <- main(country = countries[2], years = years, lag_max = lag_max)
df3 <- main(country = countries[3], years = years, lag_max = lag_max)
df4 <- main_non_currency_ict(country = countries[4], years = years, lag_max = lag_max)
# View(df1)
# View(df2)
# View(df3)
# View(df4)
df1$ict <- ifelse(na.spline.default(df1$ict) < 0, NA, na.spline.default(df1$ict))
df2$ict <- ifelse(na.spline.default(df2$ict) < 0, NA, na.spline.default(df2$ict))
df3$ict <- ifelse(na.spline.default(df3$ict) < 0, NA, na.spline.default(df3$ict))
df4$ict <- ifelse(na.spline.default(df4$ict) < 0, NA, na.spline.default(df4$ict))
icts <- data.frame(df1$ict,
df2$ict,
df3$ict,
df4$ict)
for (col in names(icts)) {
for (i in 1:NROW(icts[[col]])) {
if (is.na(icts[[col]][i])) {
icts[[col]][i] <- icts[[col]][!is.na(icts[[col]])][1]
}
}
}
icts0 <- as.data.frame(log(as.matrix(icts)))
names(icts0) <- countries
icts <- diff(icts0, lag = 1)
icts <- as.data.frame(icts)
names(icts) <- countries
# test_variance_df <- data.frame(value = unlist(icts),
# group = as.factor(rep(1:4, each = NROW(icts))))
# View(test_variance_df)
#
# anova_model <- aov(data = test_variance_df, value ~ group)
# summary(anova_model)
# oneway.test(data = test_variance_df, value ~ group, var.equal = TRUE)
# cor(icts)
# cor.test(icts$UScuip, icts$USrd, method = "pearson")
# cor.test(icts$UScuip, icts$USse, method = "pearson")
# cor.test(icts$UScuip, icts$USfix, method = "pearson")
# cor.test(icts$USrd, icts$USse, method = "pearson")
# cor.test(icts$USrd, icts$USfix, method = "pearson")
other <- as.data.frame(log(as.matrix(data.frame(df1$gdp,
df1$l,
df1$k))))
names(other) <- c("USgdp", "USlabor", "UScapital")
df <- cbind(other, icts0)
optimal_VAR <- VARselect(df,
lag.max = num_of_possible_lags[length(num_of_possible_lags)],
type = "const")
optimal_VAR_lag <- optimal_VAR$selection[1] - 1
coint_relations <- ca.jo(df,
type = "eigen",
ecdet = "const",
K = optimal_VAR_lag,
spec = "longrun")
print(summary(coint_relations))
# vecm_fit <- cajorls(coint_relations,
# r = 3)
# print(vecm_fit)
# print(summary(vecm_fit$rlm))
#
# to_var <- vec2var(coint_relations)
# print(serial.test(to_var, type = c("PT.asymptotic")))
# print(arch.test(to_var))
# print(normality.test(to_var))
# # plotres(coint_relations)
# plot(fevd(to_var))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{gap_filter}
\alias{gap_filter}
\title{Filter on gap statistics for a given date range}
\usage{
gap_filter(x, date_range, c_d_min = 80, c_m_min = 80, lg_max = 6)
}
\arguments{
\item{x}{tibble containing columns with gaps statistics (c_d, c_m and l_g)}
\item{date_range}{date range for the gap statistics, character formatted as "start-end" in years}
\item{c_d_min}{minimum daily completeness}
\item{c_m_min}{minimum monthly completeness}
\item{lg_max}{maximum gap length}
}
\value{
x filtered according to c_d_min, c_m_min and lg_max for the given date range
}
\description{
Extract rows matching the given gap statistics criteria created with \code{\link[=gap_statistics]{gap_statistics()}}.
}
| /man/gap_filter.Rd | no_license | jthurner/baseflowchile | R | false | true | 790 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{gap_filter}
\alias{gap_filter}
\title{Filter on gap statistics for a given date range}
\usage{
gap_filter(x, date_range, c_d_min = 80, c_m_min = 80, lg_max = 6)
}
\arguments{
\item{x}{tibble containing columns with gaps statistics (c_d, c_m and l_g)}
\item{date_range}{date range for the gap statistics, character formatted as "start-end" in years}
\item{c_d_min}{minimum daily completeness}
\item{c_m_min}{minimum monthly completeness}
\item{lg_max}{maximum gap length}
}
\value{
x filtered according to c_d_min, c_m_min and lg_max for the given date range
}
\description{
Extract rows matching the given gap statistics criteria created with \code{\link[=gap_statistics]{gap_statistics()}}.
}
|
library(readxl)
# Carrega a planilha em um dataframe
df <- read_excel("dados/umses_alunos_2018.xlsx")
# Os dados que serão utilizados têm linhas/colunas com nenhum valor, isso faz com que ao
# lê-los ele tenha linhas a menos
nrow <- as.table(c(0, 0, 0, 0))
names(nrow) <- c(1, 2, 3, 4)
graf <- table(data.frame(df$usoacademico, df$idade))
# Concatenação entre os dados recebidos e uma linha em branco, para dar uma
# completude maior aos dados
ngraf <- cbind(graf[,1:4], nrow, graf[,5])
colnames(ngraf) <- c(1, 2, 3, 4, 5, 6)
lbls = c("16-20", "21-25", "26-30", "31-35", "36-40", "40+")
png("graficos/relacao-uso-midias-sociais-educacao-por-idade.png", width=600, height=600)
barplot(ngraf,
main="Relação entre respostas sobre uso de mídias\nsociais na educação e idade",
beside=TRUE,
names.arg=lbls,
ylim=c(0, max(ngraf) + 5),
col=rainbow(4, s=.3),
xlab="Idade",
ylab="Quantidade de respostas")
legend("topright", c("Não", "Sim", "Sim, com restrições", "Não sei/não tenho opinião"), fill=rainbow(4, s=.3))
dev.off()
| /codigos/relacao-uso-midias-sociais-educacao-por-idade.R | no_license | LInDa-ProPesq/Grupo-2 | R | false | false | 1,100 | r | library(readxl)
# Carrega a planilha em um dataframe
df <- read_excel("dados/umses_alunos_2018.xlsx")
# Os dados que serão utilizados têm linhas/colunas com nenhum valor, isso faz com que ao
# lê-los ele tenha linhas a menos
nrow <- as.table(c(0, 0, 0, 0))
names(nrow) <- c(1, 2, 3, 4)
graf <- table(data.frame(df$usoacademico, df$idade))
# Concatenação entre os dados recebidos e uma linha em branco, para dar uma
# completude maior aos dados
ngraf <- cbind(graf[,1:4], nrow, graf[,5])
colnames(ngraf) <- c(1, 2, 3, 4, 5, 6)
lbls = c("16-20", "21-25", "26-30", "31-35", "36-40", "40+")
png("graficos/relacao-uso-midias-sociais-educacao-por-idade.png", width=600, height=600)
barplot(ngraf,
main="Relação entre respostas sobre uso de mídias\nsociais na educação e idade",
beside=TRUE,
names.arg=lbls,
ylim=c(0, max(ngraf) + 5),
col=rainbow(4, s=.3),
xlab="Idade",
ylab="Quantidade de respostas")
legend("topright", c("Não", "Sim", "Sim, com restrições", "Não sei/não tenho opinião"), fill=rainbow(4, s=.3))
dev.off()
|
uniquebus<-readRDS("C:/Users/bdaro_000/Sociology/Dissertation/Data and Code/RData/BusinessData.rds")
HoursStats<-as.data.frame(matrix(nrow=7,ncol=4))
names(HoursStats)<-c("Day","NotOpen","OpenHour","CloseHour")
HoursStats$Day<-c("Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday")
HoursStats$NotOpen<-c(sum(uniquebus$MondayNotOpen),sum(uniquebus$TuesdayNotOpen),sum(uniquebus$WednesdayNotOpen),sum(uniquebus$ThursdayNotOpen),sum(uniquebus$FridayNotOpen),sum(uniquebus$SaturdayNotOpen),sum(uniquebus$SundayNotOpen))
HoursStats$OpenHour<-c(mean(uniquebus$MondayOpen,na.rm=T),mean(uniquebus$TuesdayOpen,na.rm=T),mean(uniquebus$WednesdayOpen,na.rm=T),mean(uniquebus$ThursdayOpen,na.rm=T),mean(uniquebus$FridayOpen,na.rm=T),mean(uniquebus$SaturdayOpen,na.rm=T),mean(uniquebus$SundayOpen,na.rm=T))
HoursStats$CloseHour<-c(mean(uniquebus$MondayClose,na.rm=T),mean(uniquebus$TuesdayClose,na.rm=T),mean(uniquebus$WednesdayClose,na.rm=T),mean(uniquebus$ThursdayClose,na.rm=T),mean(uniquebus$FridayClose,na.rm=T),mean(uniquebus$SaturdayClose,na.rm=T),mean(uniquebus$SundayClose,na.rm=T))
| /Archived/Hour Stats.R | no_license | BrianAronson/Competitive-Networks-Yelp | R | false | false | 1,110 | r | uniquebus<-readRDS("C:/Users/bdaro_000/Sociology/Dissertation/Data and Code/RData/BusinessData.rds")
HoursStats<-as.data.frame(matrix(nrow=7,ncol=4))
names(HoursStats)<-c("Day","NotOpen","OpenHour","CloseHour")
HoursStats$Day<-c("Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday")
HoursStats$NotOpen<-c(sum(uniquebus$MondayNotOpen),sum(uniquebus$TuesdayNotOpen),sum(uniquebus$WednesdayNotOpen),sum(uniquebus$ThursdayNotOpen),sum(uniquebus$FridayNotOpen),sum(uniquebus$SaturdayNotOpen),sum(uniquebus$SundayNotOpen))
HoursStats$OpenHour<-c(mean(uniquebus$MondayOpen,na.rm=T),mean(uniquebus$TuesdayOpen,na.rm=T),mean(uniquebus$WednesdayOpen,na.rm=T),mean(uniquebus$ThursdayOpen,na.rm=T),mean(uniquebus$FridayOpen,na.rm=T),mean(uniquebus$SaturdayOpen,na.rm=T),mean(uniquebus$SundayOpen,na.rm=T))
HoursStats$CloseHour<-c(mean(uniquebus$MondayClose,na.rm=T),mean(uniquebus$TuesdayClose,na.rm=T),mean(uniquebus$WednesdayClose,na.rm=T),mean(uniquebus$ThursdayClose,na.rm=T),mean(uniquebus$FridayClose,na.rm=T),mean(uniquebus$SaturdayClose,na.rm=T),mean(uniquebus$SundayClose,na.rm=T))
|
data <- read.table("household_power_consumption.txt",sep=";",header=TRUE)
data$Date <- dmy(as.character(data$Date))
data$Time <- chron(times=as.character(data$Time))
data1 <- data[data$Date=="2007-02-01" | data$Date=="2007-02-02" ,]
data1$Global_active_power <- as.numeric(as.character(data1$Global_active_power))
png(file="plot1.png")
hist(data1$Global_active_power,col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off()
| /plot1.R | no_license | KarthikGampala/ExData_Plotting1 | R | false | false | 463 | r | data <- read.table("household_power_consumption.txt",sep=";",header=TRUE)
data$Date <- dmy(as.character(data$Date))
data$Time <- chron(times=as.character(data$Time))
data1 <- data[data$Date=="2007-02-01" | data$Date=="2007-02-02" ,]
data1$Global_active_power <- as.numeric(as.character(data1$Global_active_power))
png(file="plot1.png")
hist(data1$Global_active_power,col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off()
|
library(DBI)
library(lattice)
library(Hmisc)
library(dplyr)
library(RMySQL)
library(plotrix)
library(reshape2)
library(unbalanced)
con <- dbConnect(MySQL(), user = 'root', password = 'admin', host = 'localhost', dbname='changehistory')
#vetorPaths <- c("dom","javascript","javascript_extras","javascript_xpconnect","layout_rendering","libraries","kernel","network","webpage_structure","widget")
#for(i in vetorPaths){
querryTable="kernelClassify"
querryBefore="SELECT func,cveID, module, vulnerability,vulnerabilitytype, SUM(NCEC),SUM(NCMC),SUM(NFCEC),SUM(NFCMC),SUM(NMEC),SUM(NMMC),SUM(NVEC),SUM(NVMC) FROM "
querryAfter=" GROUP BY func,file_path,vulnerabilitytype,module;"
kernelclassify <- dbGetQuery(con,paste(querryBefore,querryTable,querryAfter,sep=""))
kernel_vulnerabilities <- subset(kernelclassify,vulnerability == 1)
kernel_without_vulnerabilities <- subset(kernelclassify,vulnerability == 0)
n<-ncol(kernelclassify)
#output<-kernelclassify$vulnerability
kernelclassify$vulnerability<-factor(ifelse(kernelclassify$vulnerability==1,0,no=FALSE))
output<-kernelclassify$vulnerability
input<-kernelclassify[,-n]
input
kernelclassify<-ubSMOTE(X= input, Y=output, perc.over = 200, k = 5, perc.under = 200, verbose = TRUE)
newData<-cbind(kernelclassify$X, kernelclassify$Y)
newData
dbDisconnect(con)
| /Scripts/R Scripts/3.R | no_license | gustavo95/vulnerability-detection-tool | R | false | false | 1,317 | r | library(DBI)
library(lattice)
library(Hmisc)
library(dplyr)
library(RMySQL)
library(plotrix)
library(reshape2)
library(unbalanced)
con <- dbConnect(MySQL(), user = 'root', password = 'admin', host = 'localhost', dbname='changehistory')
#vetorPaths <- c("dom","javascript","javascript_extras","javascript_xpconnect","layout_rendering","libraries","kernel","network","webpage_structure","widget")
#for(i in vetorPaths){
querryTable="kernelClassify"
querryBefore="SELECT func,cveID, module, vulnerability,vulnerabilitytype, SUM(NCEC),SUM(NCMC),SUM(NFCEC),SUM(NFCMC),SUM(NMEC),SUM(NMMC),SUM(NVEC),SUM(NVMC) FROM "
querryAfter=" GROUP BY func,file_path,vulnerabilitytype,module;"
kernelclassify <- dbGetQuery(con,paste(querryBefore,querryTable,querryAfter,sep=""))
kernel_vulnerabilities <- subset(kernelclassify,vulnerability == 1)
kernel_without_vulnerabilities <- subset(kernelclassify,vulnerability == 0)
n<-ncol(kernelclassify)
#output<-kernelclassify$vulnerability
kernelclassify$vulnerability<-factor(ifelse(kernelclassify$vulnerability==1,0,no=FALSE))
output<-kernelclassify$vulnerability
input<-kernelclassify[,-n]
input
kernelclassify<-ubSMOTE(X= input, Y=output, perc.over = 200, k = 5, perc.under = 200, verbose = TRUE)
newData<-cbind(kernelclassify$X, kernelclassify$Y)
newData
dbDisconnect(con)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ui.binormal.R
\name{ui.binormal}
\alias{ui.binormal}
\title{Function for the determination of the thresholds of an uncertain interval for
bi-normal distributed test scores that are considered as inconclusive.}
\usage{
ui.binormal(
ref,
test,
UI.Se = 0.55,
UI.Sp = 0.55,
intersection = NULL,
start = NULL,
print.level = 0
)
}
\arguments{
\item{ref}{The reference standard. A column in a data frame or a vector
indicating the classification by the reference test. The reference standard
must be coded either as 0 (absence of the condition) or 1 (presence of the
condition).}
\item{test}{The index test or test under evaluation. A column in a dataset or
vector indicating the test results in a continuous scale.}
\item{UI.Se}{(default = .55). Desired sensitivity of the test scores within the
uncertain interval. A value <= .5 is not allowed.}
\item{UI.Sp}{(default = .55). Desired specificity of the test scores within the
uncertain interval. A value <= .5 is not allowed.}
\item{intersection}{Default NULL. If not null, the supplied value is used as
the estimate of the intersection of the two bi-normal distributions.
Otherwise, it is calculated using the function
\code{\link{get.intersection}}.}
\item{start}{Default NULL. If not null, the first two values of the supplied
vector are used as the starting values for the \code{nloptr} optimization
function.}
\item{print.level}{Default is 0. The option print_level controls how much
output is shown during the optimization process. Possible values: 0)
(default) no output; 1) show iteration number and value of objective
function; 2) 1 + show value of (in)equalities; 3) 2 + show value of controls.}
}
\value{
List of values: \describe{ \item{$status: }{Integer value with the
status of the optimization (0 is success).} \item{$message: }{More
informative message with the status of the optimization} \item{$results:
}{Vector with the following values:} \itemize{ \item{exp.UI.Sp: }{The
population value of the specificity in the Uncertain Interval, given mu0,
sd0, mu1 and sd1. This value should be very near the supplied value of Sp.}
\item{exp.UI.Se: }{The population value of the sensitivity in the Uncertain
Interval, given mu0, sd0, mu1 and sd1. This value should be very near the
supplied value of UI.Se.} \item{mu0: }{The value that has been supplied for
mu0.} \item{sd0: }{The value that has been supplied for sd0.} \item{mu1:
}{The value that has been supplied for mu1.} \item{sd1: }{The value that
has been supplied for sd1.} } \item{$solution: }{Vector with the following
values:} \itemize{ \item{L: }{The population value of the lower threshold
of the Uncertain Interval.} \item{U: }{The population value of the upper
threshold of the Uncertain Interval.} } }
}
\description{
Function for the determination of the thresholds of an uncertain interval for
bi-normal distributed test scores that are considered as inconclusive.
}
\details{
{ This function can be used for a test with bi-normal distributed
scores. The Uncertain Interval is generally defined as an interval below and
above the intersection, where the densities of the two distributions of
patients with and without the targeted condition are about equal. These test
scores are considered as inconclusive for the decision for or against the
targeted condition. This function uses for the definition of the uncertain
interval a sensitivity and specificity of the uncertain test scores below a
desired value (default .55).
Only a single intersection is assumed (or a second intersection where the
overlap is negligible). If another intersection exists and the overlap around
this intersection is considerable, the test with such a non-negligible
overlap is problematic and difficult to apply and interpret.
In general, when estimating decision thresholds, a sample of sufficient size
should be used. It is recommended to use at least a sample of 100 patients
with the targeted condition, and a 'healthy' sample (without the targeted
condition) of the same size or larger.
The function uses an optimization algorithm from the nlopt library
(https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/): the sequential
quadratic programming (SQP) algorithm for nonlinearly constrained
gradient-based optimization (supporting both inequality and equality
constraints), based on the implementation by Dieter Kraft (1988; 1944).
}
}
\examples{
test=c(rnorm(500,0,1), rnorm(500,1.6,1))
ref=c(rep(0,500), rep(1,500))
plotMD(ref, test, model='binormal')
ui.binormal(ref, test)
# test scores controls > patients works correctly from version 0.7 or higher
ui.binormal(ref, -test)
ref=c(rep(1,500), rep(0,500))
plotMD(ref, test, model='binormal')
ui.binormal(ref, test)
}
\references{
Dieter Kraft, "A software package for sequential quadratic
programming", Technical Report DFVLR-FB 88-28, Institut für Dynamik der
Flugsysteme, Oberpfaffenhofen, July 1988.
Dieter Kraft, "Algorithm 733: TOMP–Fortran modules for optimal control
calculations," ACM Transactions on Mathematical Software, vol. 20, no. 3,
pp. 262-281 (1994).
Landsheer, J. A. (2018). The Clinical Relevance of Methods for Handling
Inconclusive Medical Test Results: Quantification of Uncertainty in Medical
Decision-Making and Screening. Diagnostics, 8(2), 32.
https://doi.org/10.3390/diagnostics8020032
}
| /man/ui.binormal.Rd | no_license | cran/UncertainInterval | R | false | true | 5,521 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ui.binormal.R
\name{ui.binormal}
\alias{ui.binormal}
\title{Function for the determination of the thresholds of an uncertain interval for
bi-normal distributed test scores that are considered as inconclusive.}
\usage{
ui.binormal(
ref,
test,
UI.Se = 0.55,
UI.Sp = 0.55,
intersection = NULL,
start = NULL,
print.level = 0
)
}
\arguments{
\item{ref}{The reference standard. A column in a data frame or a vector
indicating the classification by the reference test. The reference standard
must be coded either as 0 (absence of the condition) or 1 (presence of the
condition).}
\item{test}{The index test or test under evaluation. A column in a dataset or
vector indicating the test results in a continuous scale.}
\item{UI.Se}{(default = .55). Desired sensitivity of the test scores within the
uncertain interval. A value <= .5 is not allowed.}
\item{UI.Sp}{(default = .55). Desired specificity of the test scores within the
uncertain interval. A value <= .5 is not allowed.}
\item{intersection}{Default NULL. If not null, the supplied value is used as
the estimate of the intersection of the two bi-normal distributions.
Otherwise, it is calculated using the function
\code{\link{get.intersection}}.}
\item{start}{Default NULL. If not null, the first two values of the supplied
vector are used as the starting values for the \code{nloptr} optimization
function.}
\item{print.level}{Default is 0. The option print_level controls how much
output is shown during the optimization process. Possible values: 0)
(default) no output; 1) show iteration number and value of objective
function; 2) 1 + show value of (in)equalities; 3) 2 + show value of controls.}
}
\value{
List of values: \describe{ \item{$status: }{Integer value with the
status of the optimization (0 is success).} \item{$message: }{More
informative message with the status of the optimization} \item{$results:
}{Vector with the following values:} \itemize{ \item{exp.UI.Sp: }{The
population value of the specificity in the Uncertain Interval, given mu0,
sd0, mu1 and sd1. This value should be very near the supplied value of Sp.}
\item{exp.UI.Se: }{The population value of the sensitivity in the Uncertain
Interval, given mu0, sd0, mu1 and sd1. This value should be very near the
supplied value of UI.Se.} \item{mu0: }{The value that has been supplied for
mu0.} \item{sd0: }{The value that has been supplied for sd0.} \item{mu1:
}{The value that has been supplied for mu1.} \item{sd1: }{The value that
has been supplied for sd1.} } \item{$solution: }{Vector with the following
values:} \itemize{ \item{L: }{The population value of the lower threshold
of the Uncertain Interval.} \item{U: }{The population value of the upper
threshold of the Uncertain Interval.} } }
}
\description{
Function for the determination of the thresholds of an uncertain interval for
bi-normal distributed test scores that are considered as inconclusive.
}
\details{
{ This function can be used for a test with bi-normal distributed
scores. The Uncertain Interval is generally defined as an interval below and
above the intersection, where the densities of the two distributions of
patients with and without the targeted condition are about equal. These test
scores are considered as inconclusive for the decision for or against the
targeted condition. This function uses for the definition of the uncertain
interval a sensitivity and specificity of the uncertain test scores below a
desired value (default .55).
Only a single intersection is assumed (or a second intersection where the
overlap is negligible). If another intersection exists and the overlap around
this intersection is considerable, the test with such a non-negligible
overlap is problematic and difficult to apply and interpret.
In general, when estimating decision thresholds, a sample of sufficient size
should be used. It is recommended to use at least a sample of 100 patients
with the targeted condition, and a 'healthy' sample (without the targeted
condition) of the same size or larger.
The function uses an optimization algorithm from the nlopt library
(https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/): the sequential
quadratic programming (SQP) algorithm for nonlinearly constrained
gradient-based optimization (supporting both inequality and equality
constraints), based on the implementation by Dieter Kraft (1988; 1944).
}
}
\examples{
test=c(rnorm(500,0,1), rnorm(500,1.6,1))
ref=c(rep(0,500), rep(1,500))
plotMD(ref, test, model='binormal')
ui.binormal(ref, test)
# test scores controls > patients works correctly from version 0.7 or higher
ui.binormal(ref, -test)
ref=c(rep(1,500), rep(0,500))
plotMD(ref, test, model='binormal')
ui.binormal(ref, test)
}
\references{
Dieter Kraft, "A software package for sequential quadratic
programming", Technical Report DFVLR-FB 88-28, Institut für Dynamik der
Flugsysteme, Oberpfaffenhofen, July 1988.
Dieter Kraft, "Algorithm 733: TOMP–Fortran modules for optimal control
calculations," ACM Transactions on Mathematical Software, vol. 20, no. 3,
pp. 262-281 (1994).
Landsheer, J. A. (2018). The Clinical Relevance of Methods for Handling
Inconclusive Medical Test Results: Quantification of Uncertainty in Medical
Decision-Making and Screening. Diagnostics, 8(2), 32.
https://doi.org/10.3390/diagnostics8020032
}
|
#part (b)
set.seed(123)
gibbs <- function(n.sims, y, burnin, thin)
{
n<-length(y)
x.draws <- matrix(NA, nrow=(n.sims-burnin)/thin,ncol=10)
mu.draws <- c() # initialize vector that will store draws from the full conditional
sig2.draws<- c() # initialize vector that will store draws from theh full conditional
sig2.cur = 2
mu.cur = 5
xi.update <- function(yi,mu,sig2) { # updates xi using the full conditional distribution
xi = rtruncnorm(1, a=yi-.5, b=yi+.5, mean = mu, sd = sqrt(sig2))
return(xi)
}
mu.update <- function(y,x,mu,sig2) { # updates using the MH-RW
mu.mh.rw(y,x,mu,sig2)
}
sig2.update <- function(y,x,mu,sig2){
sig2.mh.rw(y,x,mu,sig2)
}
for (i in 1:n.sims) { # simulates and calls update functions to simulate parameters
x1.cur <- xi.update(y[1],mu.cur,sig2.cur)
x2.cur <- xi.update(y[2],mu.cur,sig2.cur)
x3.cur <- xi.update(y[3],mu.cur,sig2.cur)
x4.cur <- xi.update(y[4],mu.cur,sig2.cur)
x5.cur <- xi.update(y[5],mu.cur,sig2.cur)
x6.cur <- xi.update(y[6],mu.cur,sig2.cur)
x7.cur <- xi.update(y[7],mu.cur,sig2.cur)
x8.cur <- xi.update(y[8],mu.cur,sig2.cur)
x9.cur <- xi.update(y[9],mu.cur,sig2.cur)
x10.cur <- xi.update(y[10],mu.cur,sig2.cur)
x.cur = c(x1.cur, x2.cur, x3.cur, x4.cur, x5.cur, x6.cur, x7.cur, x8.cur, x9.cur, x10.cur)
mu.cur <- mu.update(y,x.cur,mu.cur,sig2.cur)
sig2.cur <- sig2.update(y,x.cur,mu.cur,sig2.cur)
if (i > burnin & (i - burnin)%%thin == 0) { # applys burn-in and thining to the simulated data
x.draws[(i - burnin)/thin,] <- x.cur
mu.draws[(i - burnin)/thin] <- mu.cur
sig2.draws[(i - burnin)/thin] <- sig2.cur
}
}
sims <- cbind(mu.draws, sig2.draws, x.draws)
return(sims)
}
mu.mh.rw<-function(y,x,mu,sig2){
mu.accpt.cnt <- 0
n<-length(y)
mu.full = function(m){
ldens = 0
for(i in 1:n){
ldens = ldens + log( pnorm(y[i]+.5, m, sqrt(sig2) ) - pnorm(y[i]-.5, m, sqrt(sig2) ) )
}
ldens = ldens - (5/sig2)*(m-mean(x))^2
return(ldens)
}
p.cur = mu.full(mu)
mu.pro <- exp(log(mu) + rnorm(1, 0, 1)) ##generate a proposed value
p.pro = mu.full(mu.pro)
accpt.prob <- exp(p.pro - p.cur)
if(runif(1) < accpt.prob)
{
mu <- mu.pro
mu.accpt.cnt <- mu.accpt.cnt + 1
}
return(mu)
}
sig2.mh.rw = function(y,x,mu,sig2){
sig2.accpt.cnt <- 0
n<-length(y)
sig2.full = function(s2){
ldens2 = 0
for(i in 1:n){
ldens2 = ldens2 + log(pnorm(y[i]+.5,mu,sqrt(s2)) - pnorm(y[i]-.5,mu,sqrt(s2)))
}
for(i in 1:n){
f = 0
f = f + (x[i]-mu)^2
}
ldens2 = ldens2 + (-6)*log(s2) - (1/(2*s2))*f
return(ldens2)
}
p.cur = sig2.full(sig2)
sig2.pro <- exp(log(sig2) + rnorm(1, 0, 10) ) ##generate a proposed value
p.pro = sig2.full(sig2.pro)
accpt.prob <- exp(p.pro - p.cur)
if(runif(1) < accpt.prob)
{
sig2 <- sig2.pro
sig2.accpt.cnt <- sig2.accpt.cnt + 1
}
return(sig2)
}
n.sims <- 30000
y<-c(7,6,7,5,5,3,6,5,4,3)
sample= gibbs(n.sims, y, 1000, 5)
#data samples
mu.mcmc = sample[,1]
sig2.mcmc = sample[,2]
x.mcmc = cbind(sample[,3],sample[,4],sample[,5],sample[,6],sample[,7],sample[,8],sample[,9],sample[,10])
x.mcmc = cbind(x.mcmc,sample[,11],sample[,12])
hist(sig2.mcmc)
hist(mu.mcmc)
| /r_code/rounded_data_ptB.R | no_license | rae89/rounded_data_case_study | R | false | false | 3,267 | r | #part (b)
set.seed(123)
gibbs <- function(n.sims, y, burnin, thin)
{
n<-length(y)
x.draws <- matrix(NA, nrow=(n.sims-burnin)/thin,ncol=10)
mu.draws <- c() # initialize vector that will store draws from the full conditional
sig2.draws<- c() # initialize vector that will store draws from theh full conditional
sig2.cur = 2
mu.cur = 5
xi.update <- function(yi,mu,sig2) { # updates xi using the full conditional distribution
xi = rtruncnorm(1, a=yi-.5, b=yi+.5, mean = mu, sd = sqrt(sig2))
return(xi)
}
mu.update <- function(y,x,mu,sig2) { # updates using the MH-RW
mu.mh.rw(y,x,mu,sig2)
}
sig2.update <- function(y,x,mu,sig2){
sig2.mh.rw(y,x,mu,sig2)
}
for (i in 1:n.sims) { # simulates and calls update functions to simulate parameters
x1.cur <- xi.update(y[1],mu.cur,sig2.cur)
x2.cur <- xi.update(y[2],mu.cur,sig2.cur)
x3.cur <- xi.update(y[3],mu.cur,sig2.cur)
x4.cur <- xi.update(y[4],mu.cur,sig2.cur)
x5.cur <- xi.update(y[5],mu.cur,sig2.cur)
x6.cur <- xi.update(y[6],mu.cur,sig2.cur)
x7.cur <- xi.update(y[7],mu.cur,sig2.cur)
x8.cur <- xi.update(y[8],mu.cur,sig2.cur)
x9.cur <- xi.update(y[9],mu.cur,sig2.cur)
x10.cur <- xi.update(y[10],mu.cur,sig2.cur)
x.cur = c(x1.cur, x2.cur, x3.cur, x4.cur, x5.cur, x6.cur, x7.cur, x8.cur, x9.cur, x10.cur)
mu.cur <- mu.update(y,x.cur,mu.cur,sig2.cur)
sig2.cur <- sig2.update(y,x.cur,mu.cur,sig2.cur)
if (i > burnin & (i - burnin)%%thin == 0) { # applys burn-in and thining to the simulated data
x.draws[(i - burnin)/thin,] <- x.cur
mu.draws[(i - burnin)/thin] <- mu.cur
sig2.draws[(i - burnin)/thin] <- sig2.cur
}
}
sims <- cbind(mu.draws, sig2.draws, x.draws)
return(sims)
}
mu.mh.rw<-function(y,x,mu,sig2){
mu.accpt.cnt <- 0
n<-length(y)
mu.full = function(m){
ldens = 0
for(i in 1:n){
ldens = ldens + log( pnorm(y[i]+.5, m, sqrt(sig2) ) - pnorm(y[i]-.5, m, sqrt(sig2) ) )
}
ldens = ldens - (5/sig2)*(m-mean(x))^2
return(ldens)
}
p.cur = mu.full(mu)
mu.pro <- exp(log(mu) + rnorm(1, 0, 1)) ##generate a proposed value
p.pro = mu.full(mu.pro)
accpt.prob <- exp(p.pro - p.cur)
if(runif(1) < accpt.prob)
{
mu <- mu.pro
mu.accpt.cnt <- mu.accpt.cnt + 1
}
return(mu)
}
sig2.mh.rw = function(y,x,mu,sig2){
sig2.accpt.cnt <- 0
n<-length(y)
sig2.full = function(s2){
ldens2 = 0
for(i in 1:n){
ldens2 = ldens2 + log(pnorm(y[i]+.5,mu,sqrt(s2)) - pnorm(y[i]-.5,mu,sqrt(s2)))
}
for(i in 1:n){
f = 0
f = f + (x[i]-mu)^2
}
ldens2 = ldens2 + (-6)*log(s2) - (1/(2*s2))*f
return(ldens2)
}
p.cur = sig2.full(sig2)
sig2.pro <- exp(log(sig2) + rnorm(1, 0, 10) ) ##generate a proposed value
p.pro = sig2.full(sig2.pro)
accpt.prob <- exp(p.pro - p.cur)
if(runif(1) < accpt.prob)
{
sig2 <- sig2.pro
sig2.accpt.cnt <- sig2.accpt.cnt + 1
}
return(sig2)
}
n.sims <- 30000
y<-c(7,6,7,5,5,3,6,5,4,3)
sample= gibbs(n.sims, y, 1000, 5)
#data samples
mu.mcmc = sample[,1]
sig2.mcmc = sample[,2]
x.mcmc = cbind(sample[,3],sample[,4],sample[,5],sample[,6],sample[,7],sample[,8],sample[,9],sample[,10])
x.mcmc = cbind(x.mcmc,sample[,11],sample[,12])
hist(sig2.mcmc)
hist(mu.mcmc)
|
##makeCacheMatrix:
##This function creates a special "matrix" object that can cache its inverse.
##When the function called inverse matrix "inver" set to null
##setinverse adds the inverse matrix to the cache
##getinverse gets the inverse matrix from the cache
makeCacheMatrix <- function(x = matrix()) {
inver<-NULL
set<-function(y){
x<<-y
inver<<-NULL
}
get<-function() x
setinverse<-function(solve) inver<<- solve
getinverse<-function() inver
list(set=set, get=get,
setinverse=setinverse,
getinverse=getinverse)
}
##cacheSolve:
##This function does: if matrix new then inverse matrix. If the matrix a old one already
## calculated yhen bring the inverse matrix from the cahe
cacheSolve <- function(x=matrix(), ...) {
inver<-x$getinverse()
if(!is.null(inver)){
message("getting cached data")
return(inver)
}
datos<-x$get()
inver<-solve(datos, ...)
x$setinverse(inver)
inver
}
| /cachematrix.R | no_license | habsal/ProgrammingAssignment2 | R | false | false | 982 | r | ##makeCacheMatrix:
##This function creates a special "matrix" object that can cache its inverse.
##When the function called inverse matrix "inver" set to null
##setinverse adds the inverse matrix to the cache
##getinverse gets the inverse matrix from the cache
makeCacheMatrix <- function(x = matrix()) {
inver<-NULL
set<-function(y){
x<<-y
inver<<-NULL
}
get<-function() x
setinverse<-function(solve) inver<<- solve
getinverse<-function() inver
list(set=set, get=get,
setinverse=setinverse,
getinverse=getinverse)
}
##cacheSolve:
##This function does: if matrix new then inverse matrix. If the matrix a old one already
## calculated yhen bring the inverse matrix from the cahe
cacheSolve <- function(x=matrix(), ...) {
inver<-x$getinverse()
if(!is.null(inver)){
message("getting cached data")
return(inver)
}
datos<-x$get()
inver<-solve(datos, ...)
x$setinverse(inver)
inver
}
|
library(animation)
for(i in 1:ani.options("nmax")){
polydebug <- iso.polydebug(s_poly, mix, c_poly, its = 1)
windows()
V <- polydebug$V
hull_a <- polydebug$hull_a
m <- polydebug$m
plot(0, 0, type = "n", xlim = c(-8, 30), ylim = c(-2, 24))
points(mix[,1], mix[,2], col = "black", pch = 16)
points(V[hull_a, 1], V[hull_a, 2], col = "blue", pch = 4, cex = 1.5)
lines(V[hull_a, 1], V[hull_a, 2], col = "darkgreen", lwd = 1.5)
m_sample <- sample(62500, 1000, rep = F)
points(m$x[m_sample], m$y_f[m_sample], col = "orangered", cex = .5)
ani.pause()
savePlot(filename = i, type = "png")
}
| /polydebug.R | no_license | rogerclarkgc/isopolygon | R | false | false | 583 | r | library(animation)
for(i in 1:ani.options("nmax")){
polydebug <- iso.polydebug(s_poly, mix, c_poly, its = 1)
windows()
V <- polydebug$V
hull_a <- polydebug$hull_a
m <- polydebug$m
plot(0, 0, type = "n", xlim = c(-8, 30), ylim = c(-2, 24))
points(mix[,1], mix[,2], col = "black", pch = 16)
points(V[hull_a, 1], V[hull_a, 2], col = "blue", pch = 4, cex = 1.5)
lines(V[hull_a, 1], V[hull_a, 2], col = "darkgreen", lwd = 1.5)
m_sample <- sample(62500, 1000, rep = F)
points(m$x[m_sample], m$y_f[m_sample], col = "orangered", cex = .5)
ani.pause()
savePlot(filename = i, type = "png")
}
|
#################################################################################################
qdg.sem <- function(qdgObject, cross)
{
#################################################################################
score.sem.models <- function(cross,pheno.names,all.solutions,steptol,addcov=NULL) {
n.sol <- length(all.solutions[[1]])
mypheno <- cross$pheno[,pheno.names]
np <- length(mypheno[1,])
n.paths <- nrow(all.solutions[[1]][[1]])
semBIC <- rep(NA,n.sol)
path.coeffs <- matrix(NA,n.paths,n.sol)
if(!is.null(addcov)){
addcov <- paste("cross$pheno$",addcov,sep="")
myresid <- matrix(0, qtl::nind(cross),np)
for(i in 1:np){
fm <- stats::lm(stats::as.formula(paste("mypheno[,i] ~ ", paste(addcov, collapse = "+"))))
myresid[,i] <- fm$resid
}
mycov <- stats::cov(myresid)
for(i in 1:n.sol){
ramMatrix <- create.sem.model(DG=all.solutions[[1]][[i]],pheno.names=pheno.names)
mysem <- try(sem::sem(ramMatrix, S = mycov, N = qtl::nind(cross), var.names = pheno.names,
steptol = steptol, analytic.gradient = FALSE,
param.names = paste("Param", seq(nrow(ramMatrix)), sep = "")), silent = TRUE)
if(class(mysem)[1] != "try-error"){
aux.summary <- try(summary(mysem),silent=TRUE)
if(class(aux.summary)[1] != "try-error"){
semBIC[i] <- aux.summary$BIC
path.coeffs[,i] <- include.path.coefficients(sem.summary=aux.summary,output=all.solutions[[1]][[i]])
}
}
}
}
else {
mycov <- stats::cov(mypheno)
for(i in 1:n.sol){
ramMatrix <- create.sem.model(DG=all.solutions[[1]][[i]],pheno.names=pheno.names)
mysem <- try(sem::sem(ramMatrix, S = mycov, N = qtl::nind(cross), var.names = pheno.names,
steptol = steptol, analytic.gradient = FALSE,
param.names = paste("Param", seq(nrow(ramMatrix)), sep = "")), silent = TRUE)
if(class(mysem)[1] != "try-error"){
aux.summary <- try(summary(mysem),silent=TRUE)
if(class(aux.summary)[1] != "try-error"){
semBIC[i] <- aux.summary$BIC
path.coeffs[,i] <- include.path.coefficients(sem.summary=aux.summary,output=all.solutions[[1]][[i]])
}
}
}
}
## Drop solutions that did not work with sem().
tmp <- !is.na(semBIC)
if(!any(tmp)) {
stop("No qdg solutions could be fit with sem().")
}
if(any(!tmp)) {
warning(paste(sum(!tmp), "qdg solutions could not be fit with sem() and were dropped."))
semBIC <- semBIC[tmp]
path.coeffs <- path.coeffs[, tmp, drop = FALSE]
n.sol <- sum(tmp)
dropped <- which(!tmp)
}
else
dropped <- NULL
output <- data.frame(cbind(semBIC,approx.posterior(semBIC)),
stringsAsFactors = TRUE)
names(output) <- c("sem.BIC","posterior prob")
row.names(output) <- paste("model.",1:n.sol,sep="")
## if there are ties, returns the first.
best <- which(output[,2] == max(output[,2]))[1]
list(output,path.coeffs[,best], dropped)
}
#########################################################
include.path.coefficients <- function(sem.summary,output) {
ne <- length(output[,1])
mypathcoef <- rep(NA,ne)
aux <- sem.summary$coeff
aux <- aux[1:ne,]
for(i in 1:ne){
if(output[i,2] == "---->") aux1 <- paste(output[i,3], output[i,1], sep=" <--- ")
if(output[i,2] == "<----") aux1 <- paste(output[i,1], output[i,3], sep=" <--- ")
aux2 <- match(aux1,aux[,5])
mypathcoef[i] <- aux[aux2,1]
}
mypathcoef
}
############################################
create.sem.model <- function(DG,pheno.names) {
n <- length(DG[,1])
myvector <- c()
for(i in 1:n){
aux1 <- which(DG[i,1]==pheno.names)
aux2 <- which(DG[i,3]==pheno.names)
if(DG[i,2] == "---->"){
aux.vector <- c(1,aux2,aux1,i,NA)
}
else{aux.vector <- c(1,aux1,aux2,i,NA)}
myvector <- c(myvector,aux.vector)
}
for(i in 1:length(pheno.names)){
aux.vector <- c(2,i,i,n+i,NA)
myvector <- c(myvector,aux.vector)
}
matrix(myvector,ncol=5,byrow=TRUE)
}
##################################
approx.posterior <- function(bics) {
aux <- min(bics)
round(exp(-0.5*(bics-aux))/sum(exp(-0.5*(bics-aux))),6)
}
#################################################
ss <- score.sem.models(cross = cross,
pheno.names = qdgObject$phenotype.names,
all.solutions = qdgObject$Solutions,
steptol = 1 / 100000,
addcov = qdgObject$addcov)
best <- which(ss[[1]][,1] == min(ss[[1]][,1]))
mylist <- list(best, ss[[1]], ss[[2]])
names(mylist) <- c("best.SEM","BIC.SEM","path.coeffs")
mylist$Solutions <- qdgObject$Solutions
mylist$marker.names <- qdgObject$marker.names
mylist$phenotype.names <- qdgObject$phenotype.names
mylist$dropped <- ss[[3]]
class(mylist) <- c("qdg.sem", "qdg", "list")
mylist
}
summary.qdg.sem <- function(object, ...)
{
cat("\nBest SEM solution:\n")
print(object$Solution$solution[[object$best.SEM]])
bic.sem <- object$BIC.SEM[object$best.SEM, "sem.BIC"]
cat("\nBIC:\n")
print(c(sem = bic.sem))
cat("\nBest SEM solution is solution number:\n")
print(object$best.SEM)
if(!is.null(object$dropped)) {
cat(length(object$dropped), "qdg.sem solution were dropped; sem() failed for graphs",
paste(object$dropped, collapse = ","))
}
invisible()
}
print.qdg.sem <- function(x, ...) summary(x, ...)
| /R/sem.R | no_license | byandell/qtlnet | R | false | false | 5,691 | r | #################################################################################################
qdg.sem <- function(qdgObject, cross)
{
#################################################################################
score.sem.models <- function(cross,pheno.names,all.solutions,steptol,addcov=NULL) {
n.sol <- length(all.solutions[[1]])
mypheno <- cross$pheno[,pheno.names]
np <- length(mypheno[1,])
n.paths <- nrow(all.solutions[[1]][[1]])
semBIC <- rep(NA,n.sol)
path.coeffs <- matrix(NA,n.paths,n.sol)
if(!is.null(addcov)){
addcov <- paste("cross$pheno$",addcov,sep="")
myresid <- matrix(0, qtl::nind(cross),np)
for(i in 1:np){
fm <- stats::lm(stats::as.formula(paste("mypheno[,i] ~ ", paste(addcov, collapse = "+"))))
myresid[,i] <- fm$resid
}
mycov <- stats::cov(myresid)
for(i in 1:n.sol){
ramMatrix <- create.sem.model(DG=all.solutions[[1]][[i]],pheno.names=pheno.names)
mysem <- try(sem::sem(ramMatrix, S = mycov, N = qtl::nind(cross), var.names = pheno.names,
steptol = steptol, analytic.gradient = FALSE,
param.names = paste("Param", seq(nrow(ramMatrix)), sep = "")), silent = TRUE)
if(class(mysem)[1] != "try-error"){
aux.summary <- try(summary(mysem),silent=TRUE)
if(class(aux.summary)[1] != "try-error"){
semBIC[i] <- aux.summary$BIC
path.coeffs[,i] <- include.path.coefficients(sem.summary=aux.summary,output=all.solutions[[1]][[i]])
}
}
}
}
else {
mycov <- stats::cov(mypheno)
for(i in 1:n.sol){
ramMatrix <- create.sem.model(DG=all.solutions[[1]][[i]],pheno.names=pheno.names)
mysem <- try(sem::sem(ramMatrix, S = mycov, N = qtl::nind(cross), var.names = pheno.names,
steptol = steptol, analytic.gradient = FALSE,
param.names = paste("Param", seq(nrow(ramMatrix)), sep = "")), silent = TRUE)
if(class(mysem)[1] != "try-error"){
aux.summary <- try(summary(mysem),silent=TRUE)
if(class(aux.summary)[1] != "try-error"){
semBIC[i] <- aux.summary$BIC
path.coeffs[,i] <- include.path.coefficients(sem.summary=aux.summary,output=all.solutions[[1]][[i]])
}
}
}
}
## Drop solutions that did not work with sem().
tmp <- !is.na(semBIC)
if(!any(tmp)) {
stop("No qdg solutions could be fit with sem().")
}
if(any(!tmp)) {
warning(paste(sum(!tmp), "qdg solutions could not be fit with sem() and were dropped."))
semBIC <- semBIC[tmp]
path.coeffs <- path.coeffs[, tmp, drop = FALSE]
n.sol <- sum(tmp)
dropped <- which(!tmp)
}
else
dropped <- NULL
output <- data.frame(cbind(semBIC,approx.posterior(semBIC)),
stringsAsFactors = TRUE)
names(output) <- c("sem.BIC","posterior prob")
row.names(output) <- paste("model.",1:n.sol,sep="")
## if there are ties, returns the first.
best <- which(output[,2] == max(output[,2]))[1]
list(output,path.coeffs[,best], dropped)
}
#########################################################
include.path.coefficients <- function(sem.summary,output) {
ne <- length(output[,1])
mypathcoef <- rep(NA,ne)
aux <- sem.summary$coeff
aux <- aux[1:ne,]
for(i in 1:ne){
if(output[i,2] == "---->") aux1 <- paste(output[i,3], output[i,1], sep=" <--- ")
if(output[i,2] == "<----") aux1 <- paste(output[i,1], output[i,3], sep=" <--- ")
aux2 <- match(aux1,aux[,5])
mypathcoef[i] <- aux[aux2,1]
}
mypathcoef
}
############################################
create.sem.model <- function(DG,pheno.names) {
n <- length(DG[,1])
myvector <- c()
for(i in 1:n){
aux1 <- which(DG[i,1]==pheno.names)
aux2 <- which(DG[i,3]==pheno.names)
if(DG[i,2] == "---->"){
aux.vector <- c(1,aux2,aux1,i,NA)
}
else{aux.vector <- c(1,aux1,aux2,i,NA)}
myvector <- c(myvector,aux.vector)
}
for(i in 1:length(pheno.names)){
aux.vector <- c(2,i,i,n+i,NA)
myvector <- c(myvector,aux.vector)
}
matrix(myvector,ncol=5,byrow=TRUE)
}
##################################
approx.posterior <- function(bics) {
aux <- min(bics)
round(exp(-0.5*(bics-aux))/sum(exp(-0.5*(bics-aux))),6)
}
#################################################
ss <- score.sem.models(cross = cross,
pheno.names = qdgObject$phenotype.names,
all.solutions = qdgObject$Solutions,
steptol = 1 / 100000,
addcov = qdgObject$addcov)
best <- which(ss[[1]][,1] == min(ss[[1]][,1]))
mylist <- list(best, ss[[1]], ss[[2]])
names(mylist) <- c("best.SEM","BIC.SEM","path.coeffs")
mylist$Solutions <- qdgObject$Solutions
mylist$marker.names <- qdgObject$marker.names
mylist$phenotype.names <- qdgObject$phenotype.names
mylist$dropped <- ss[[3]]
class(mylist) <- c("qdg.sem", "qdg", "list")
mylist
}
summary.qdg.sem <- function(object, ...)
{
cat("\nBest SEM solution:\n")
print(object$Solution$solution[[object$best.SEM]])
bic.sem <- object$BIC.SEM[object$best.SEM, "sem.BIC"]
cat("\nBIC:\n")
print(c(sem = bic.sem))
cat("\nBest SEM solution is solution number:\n")
print(object$best.SEM)
if(!is.null(object$dropped)) {
cat(length(object$dropped), "qdg.sem solution were dropped; sem() failed for graphs",
paste(object$dropped, collapse = ","))
}
invisible()
}
print.qdg.sem <- function(x, ...) summary(x, ...)
|
#' Get google drive file update time
#'
#' @param googId Google drive file key
#' @param tzone timezone
#' @import googledrive lubridate
#' @return
#' @export
#'
#' @examples
googleDriveUpdateTime <- function(googId,tzone = "UTC"){
#money sheet update
info <- googledrive::drive_get(googledrive::as_id(googId))
mtime <- info[3]$drive_resource[[1]]$modifiedTime
return(lubridate::with_tz(lubridate::ymd_hms(mtime),tzone = tzone))
}
#' Check to see if a project needs to be updated
#'
#' @param project
#' @param webDirectory
#' @param lipdDir
#' @param qcId
#' @param versionMetaId
#' @import googlesheets4
#' @import magrittr
#' @import dplyr
#' @import googledrive
#' @import lubridate
#'
#' @return TRUE or FALSE
#' @export
updateNeeded <- function(project,webDirectory,lipdDir,qcId,versionMetaId = "1OHD7PXEQ_5Lq6GxtzYvPA76bpQvN1_eYoFR0X80FIrY",googEmail = NULL){
#compare files with MD5s
# currentMD5 <- directoryMD5(lipdDir)
# dir(lipdDir)
#
# lastMD5 <- directoryMD5(file.path(webDirectory,project,"current_version"))
#
googlesheets4::gs4_auth(email = googEmail)
#compare QC update times
versionSheet <- read_sheet_retry(googledrive::as_id(versionMetaId)) %>%
dplyr::filter(project == (!!project)) %>%
dplyr::arrange(desc(versionCreated))
lastUpdate <- lubridate::ymd_hms(versionSheet$versionCreated[1])
lastMD5 <- versionSheet$`zip MD5`[1]
filesNeedUpdating <- TRUE
if(length(lastMD5) > 0){
currentMD5 <- directoryMD5(lipdDir)
if(lastMD5 == currentMD5){
filesNeedUpdating <- FALSE
}
}
#most recent file edit time
lastMod <- purrr::map(list.files(lipdDir,pattern = "*.lpd",full.names = TRUE),file.mtime )
lastMod <- lubridate::with_tz(lubridate::ymd_hms(lastMod[[which.max(unlist(lastMod))]],tz = "America/Phoenix"),tzone = "UTC")
# check based on folder modification time
# filesNeedUpdating <- TRUE
# if(lastUpdate > lastMod){
# filesNeedUpdating <- FALSE
# }
#most recent QC update
qcUpdate <- googleDriveUpdateTime(qcId)
qcNeedsUpdating <- TRUE
if(lastUpdate > qcUpdate){
qcNeedsUpdating <- FALSE
}
if(qcNeedsUpdating | filesNeedUpdating){
needsUpdating <- TRUE
}else{
needsUpdating <- FALSE
}
return(needsUpdating)
}
#' Title
#'
#' @param project project name
#' @param versionMetaId ID of the versioning qc sheet
#' @param qcIc dataSetNames in this compilation from teh QC sheet
#' @param tsIc dataSetNames in the last compilation from the files
#' @param googEmail google user ID
#'
#' @description Ticks the version of a database for you. Assumes that a change is necessary.
#' @import googlesheets4
#' @import magrittr
#' @import dplyr
#' @import googledrive
#' @import stringr
#' @return the new version string
#' @export
#'
#' @examples
tickVersion <- function(project,qcIc,tsIc,versionMetaId = "1OHD7PXEQ_5Lq6GxtzYvPA76bpQvN1_eYoFR0X80FIrY",googEmail = NULL){
googlesheets4::gs4_auth(email = googEmail)
#get last versions udsn
versionSheet <- read_sheet_retry(googledrive::as_id(versionMetaId)) %>%
dplyr::filter(project == (!!project)) %>%
dplyr::arrange(desc(versionCreated))
lastUdsn <- sort(tsIc)
#and the new udsn
thisUdsn <- sort(qcIc)
if(all(lastUdsn==thisUdsn)){
#then tick metadata
p <- versionSheet$publication[1]
d <- versionSheet$dataset[1]
m <- versionSheet$metadata[1]+1
}else{
p <- versionSheet$publication[1]
d <- versionSheet$dataset[1]+1
m <- 0
}
newVers <- stringr::str_c(p,d,m,sep = "_")
return(newVers)
}
#' Get the most recent version of the compilation (before updating)
#'
#' @param project project name
#' @param udsn a vector of dataset names in the project
#' @param versionMetaId ID of the versioning qc sheet
#' @param googEmail google user ID
#' @description Gets the last version of the database (before updating)
#' @import googlesheets4
#' @import magrittr
#' @import dplyr
#' @import googledrive
#' @import stringr
#' @return the new version string
#' @export
#'
#' @examples
lastVersion <- function(project,versionMetaId = "1OHD7PXEQ_5Lq6GxtzYvPA76bpQvN1_eYoFR0X80FIrY",googEmail = NULL){
googlesheets4::gs4_auth(email = googEmail)
#get last versions udsn
versionSheet <- read_sheet_retry(googledrive::as_id(versionMetaId)) %>%
dplyr::filter(project == (!!project)) %>%
dplyr::arrange(desc(versionCreated))
p <- versionSheet$publication[1]
d <- versionSheet$dataset[1]
m <- versionSheet$metadata[1]
lastVers <- stringr::str_c(p,d,m,sep = "_")
return(lastVers)
}
assignVariablesFromList <- function(params,env = parent.env(environment())){
for(i in 1:length(params)){
assign(names(params)[i],params[[i]],envir = env)
}
}
#' Build parameters
#'
#' @param project project name
#' @param lipdDir authority directory for a lipd file
#' @param webDirectory directory for webserver
#' @param qcId google sheets ID for the qc sheet
#' @param lastUpdateId google sheets ID for the last version
#' @param updateWebpages update lipdverse webpages (default = TRUE). Usually TRUE unless troubleshooting.
#' @param googEmail google user ID
#' @import purrr
#' @import googlesheets4
#' @import readr
#' @import lipdR
#' @import geoChronR
#' @export
buildParams <- function(project,
lipdDir,
webDirectory,
qcId,
lastUpdateId,
versionMetaId = "1OHD7PXEQ_5Lq6GxtzYvPA76bpQvN1_eYoFR0X80FIrY",
googEmail = NULL,
updateWebpages = TRUE,
standardizeTerms = TRUE,
ageOrYear = "age",
recreateDataPages = FALSE,
restrictWebpagesToCompilation = TRUE,
qcStandardizationCheck = TRUE,
serialize = TRUE,
projVersion = NA,
updateLipdverse = TRUE){
an <- ls()
av <- purrr::map(an,~eval(parse(text = .x))) %>% setNames(an)
return(av)
}
#' Check if an update is needed
#'
#' @param params
#'
#' @return
#' @export
#'
#' @examples
checkIfUpdateNeeded <- function(params){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
if(is.na(projVersion)){#skip check if new version is specified
#check if update is necessary
toUpdate <- updateNeeded(project,webDirectory,lipdDir,qcId,googEmail = googEmail)
if(!toUpdate){
return("No update needed")
}else{
return("Update needed")
}
}
}
#' Load in new data
#'
#' @param params
#'
#' @return
#' @export
loadInUpdatedData <- function(params){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#if looking at full database:
if(lipdDir == "/Volumes/data/Dropbox/lipdverse/database"){
#getDatasetInCompilationFromQC()
#0. Figure out which datasets to load based on QC sheet.
dscomp <- read_sheet_retry(ss = qcId,sheet = "datasetsInCompilation")
#make sure that all names there are in the lipdDir, and that there are no duplicates
if(any(duplicated(dscomp$dsn))){
stop(glue::glue("There are duplicated dataSetNames in 'datasetsInCompilation': {dscomp$dsn[duplicated(dscomp$dsn)]}"))
}
#get all files in lipdverse
af <- list.files(lipdDir,pattern = ".lpd",full.names = FALSE) %>% stringr::str_remove_all(".lpd")
#see if any in dscomp don't exist
missing <- which(!dscomp$dsn %in% af)
#remove this next time
dscomp <- dscomp[-missing,]
#see if any in dscomp don't exist
missing <- which(!dscomp$dsn %in% af)
if(length(missing) > 0){
stop(glue("{length(missing)} datasets in 'datasetsInCompilation' don't exist in the database: {paste(dscomp$dsn[missing],collapse = '; ')}"))
}
#look for new files not in the dscomp page
#which local files not in dscomp
new <- which(!af %in% dscomp$dsn)
dscompgood <- filter(dscomp,inComp != "FALSE")
filesToConsider <- file.path(lipdDir, paste0(c(dscompgood$dsn,af[new]),".lpd"))
}else{
filesToConsider <- list.files(lipdDir,pattern = ".lpd",full.names = TRUE)
}
filesToUltimatelyDelete <- filesToConsider
#1. load in (potentially updated) files
flagUpdate(project)
D <- lipdR::readLipd(filesToConsider)
#create datasetIds for records that don't have them
for(d in 1:length(D)){
if(is.null(D[[d]]$datasetId)){
D[[d]]$datasetId <- createDatasetId()
}
#check for chronMeasurementTable and fix
if(!is.null(D[[d]]$chronData[[1]]$chronMeasurementTable)){
for(ccic in 1:length(D[[d]]$chronData)){
D[[d]]$chronData[[ccic]]$measurementTable <- D[[d]]$chronData[[ccic]]$chronMeasurementTable
D[[d]]$chronData[[ccic]]$chronMeasurementTable <- NULL
}
}
#check for changelog and fix
if(is.null(D[[d]]$changelog)){
D[[d]] <- initializeChangelog(D[[d]])
}
}
Dloaded <- D#store for changelogging
dsidsOriginal <- tibble::tibble(datasetId = purrr::map_chr(D,"datasetId"),
dataSetNameOrig = purrr::map_chr(D,"dataSetName"),
dataSetVersion = purrr::map_chr(D,getVersion))
#make sure that primary chronologies are named appropriately
D <- purrr::map(D,renamePrimaryChron)
if(standardizeTerms){
D <- purrr::map(D,cleanOriginalDataUrl)
D <- purrr::map(D,hasDepth)
D <- purrr::map(D,nUniqueAges)
D <- purrr::map(D,nGoodAges)
D <- purrr::map(D,nOtherAges)
# D <- purrr::map(D,fixExcelIssues)
D <- purrr::map(D,standardizeChronVariableNames)
}
#1a. Screen by some criterion...
#check for TSid
TS <- lipdR::extractTs(D)
#create grouping terms for later standardization
#TO DO!# remove entries that don't fall into the groups/lumps!
if(standardizeTerms){
#Do some cleaning
TS <- standardizeTsValues(TS)
TS <- fix_pubYear(TS)
TS <- fixKiloyearsTs(TS)
TS <- purrr::map(TS,removeEmptyInterpretationsFromTs)
}
#get some relevant information
TSid <- lipdR::pullTsVariable(TS,"paleoData_TSid")
udsn <- unique(lipdR::pullTsVariable(TS,"dataSetName"))
data <- list(Dloaded = Dloaded ,
D = D,
TS = TS,
TSid = TSid,
filesToUltimatelyDelete = filesToUltimatelyDelete,
dsidsOriginal = dsidsOriginal,
udsn = udsn)
return(data)
}
#' Get QC
#'
#' @param params
#' @param data
#'
#' @return
#' @export
getQcInfo <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#assignVariablesFromList(data)
for(i in 1:length(data)){
assign(names(data)[i],data[[i]])
}
#get the google qc sheet
qcB <- getGoogleQCSheet(qcId)
#reolve conflicts
qcB <- resolveQcConflict(qcB)
#make sure no terms are missing
if(any(is.na(qcB$TSid))){
stop("TSids missing from google QC sheet")
}
if(any(is.na(qcB$dataSetName))){
stop("dataSetName missing from google QC sheet")
}
if(any(is.na(qcB$variableName))){
stop("variableName missing from google QC sheet")
}
if(qcStandardizationCheck){
#check QCsheet terms are valid
#replace them with other terms if they're not
allSheetNames <- googlesheets4::sheet_names(ss = qcId)
#check for year, age, depth fixes
allInvalid <- allSheetNames[grepl(allSheetNames,pattern = "-invalid")]
atsid <- pullTsVariable(TS,"paleoData_TSid")
for(av in allInvalid){
thisOne <- read_sheet_retry(ss = qcId,sheet = av)
#check to find TSids not in QC sheet AND in TS
if("number" %in% names(thisOne)){
#if there's a number, then do all but number one
tochange <- which(thisOne$number > 1 & thisOne$TSid %in% atsid)
}else{
#if there's not a number, only do those without a TSid in the QCSheet
tochange <- which(!thisOne$TSid %in% qcB$TSid & thisOne$TSid %in% atsid)
}
for(tci in tochange){
tsidi <- which(thisOne$TSid[tci] == atsid)
vnts <- str_remove(av,"-invalid")
if(!is.null(thisOne$number[tsidi])){#then we need to append the number into the name
vnts <- str_replace(vnts,"_",paste0(thisOne$number[tci],"_"))
}
if(!is.na(names(TS[[tsidi]][vnts]))){
print(glue::glue("Changed special column {vnts} ({thisOne$TSid[tci]}) from {TS[[tsidi]][[vnts]]} to {thisOne[[4]][tci]}"))
TS[[tsidi]][[vnts]] <- thisOne[[4]][tci]
if(av == "paleoData_proxy-invalid"){
if(is.na(TS[[tsidi]][[vnts]])){#replace these with NULLs
TS[[tsidi]][[vnts]] <- NULL
}
}
}
}
}
stando <- lipdR::standardizeQCsheetValues(qcB)
qcB <- stando$newSheet
if(length(stando$remainingInvalid) > 0){#standardization issues. Do a few things:
#check to see if the existing invalid sheets contain corrected information....
convo <- read_sheet_retry(ss="1T5RrAtrk3RiWIUSyO0XTAa756k6ljiYjYpvP67Ngl_w")
for(rv in names(stando$remainingInvalid)){
tivs <- allSheetNames[startsWith(x = allSheetNames,prefix = rv)]
if(length(tivs) == 1){
thisOne <- read_sheet_retry(ss = qcId,sheet = tivs)
convoi <- which(convo$tsName == rv)
if(length(convoi) != 1){
if(rv == "interpretation_variable"){
qcName <- "climateVariable"
}else if(rv == "interpretation_seasonality"){
qcName <- "seasonality"
}else{
stop("I can't figure out the qc name")
}
}else{
qcName <- convo$qcSheetName[convoi]
}
#loop through terms and see if in standardTables, and replace if so.
if(nrow(thisOne) > 0){
for(rvr in 1:nrow(thisOne)){
if(thisOne[[ncol(thisOne)]][rvr] %in% standardTables[[rv]]$lipdName){#it's a standard term!
#replace it!
tsidm <- which(qcB$TSid == thisOne$TSid[rvr])
if(length(tsidm) > 1){stop("this shouldn't be possible")}
print(glue::glue("{thisOne$TSid[rvr]} - {rv}: replaced {qcB[[qcName]][tsidm]} with {thisOne[[ncol(thisOne)]][rvr]}"))
qcB[[qcName]][tsidm] <- thisOne[[ncol(thisOne)]][rvr]
}
}
}
}else if(length(tivs) == 0){
print(glue::glue("No sheet for {tivs} in the qc sheet"))
}else{
print(glue::glue("Multiple {tivs} sheets found: {allSheetNames}"))
}
}
#rerun the standardization report
stando <- lipdR::standardizeQCsheetValues(qcB)
qcB <- stando$newSheet
if(length(stando$remainingInvalid) > 0){#standardization issues remain
#write the standardized value back into the qc sheet
qcB[is.null(qcB) | qcB == ""] <- NA
#find differences for log
#diff <- daff::diff_data(qcA,qc2w,ids = "TSid",ignore_whitespace = TRUE,columns_to_ignore = "link to lipdverse",never_show_order = TRUE)
qcB[is.na(qcB)] <- ""
readr::write_csv(qcB,file = file.path(webDirectory,project,"qcInvalid.csv"))
#upload it to google drive into temporary qcInvalid
googledrive::drive_update(media = file.path(webDirectory,project,"qcInvalid.csv"),
file = googledrive::as_id("1valJY2eqpUT1fsfRggLmPpwh32-HMb9ZO5J5LvZERLQ"))
#copy the qc check to the qcsheet:
googlesheets4::sheet_delete(ss = qcId,sheet = 1)
googlesheets4::sheet_copy(from_ss = "1valJY2eqpUT1fsfRggLmPpwh32-HMb9ZO5J5LvZERLQ", from_sheet = 1,to_ss = qcId, to_sheet = "QC",.before = "datasetsInCompilation")
#write_sheet_retry(qc2w,ss = qcId, sheet = 1)
googledrive::drive_rename(googledrive::as_id(qcId),name = stringr::str_c(project," v. QC sheet - INVALID TERMS!"))
#two write a validation report
writeValidationReportToQCSheet(stando$remainingInvalid,qcId)
#delete sheets without missing terms
tokeep <- paste0(names(stando$remainingInvalid),"-invalid")
allSheetNames <- googlesheets4::sheet_names(ss = qcId)
ivnames <- allSheetNames[str_detect(allSheetNames,pattern = "-invalid")]
todelete <- setdiff(ivnames,tokeep)
try(googlesheets4::sheet_delete(ss = qcId,sheet = todelete),silent = TRUE)
#throw an error
stop("There are invalid terms in the QC sheet. Check the validation report")
}
}
}
if(!any(names(qcB)=="changelogNotes")){
qcB$changelogNotes <- NA
}
#pull out changelog notes
clNotes <- qcB %>%
dplyr::select(dataSetName,TSid,changelogNotes) %>%
dplyr::filter(!is.na(changelogNotes)) %>%
dplyr::group_by(dataSetName) %>%
dplyr::summarize(changes = paste(paste(TSid,changelogNotes,sep = ": "),collapse = "; ")) %>%
dplyr::rename(dataSetNameOrig = dataSetName)
#then remove that column
qcB <- dplyr::select(qcB,-changelogNotes)
data$dsidsOriginal <- data$dsidsOriginal %>%
dplyr::left_join(clNotes,by = "dataSetNameOrig")
#1b. New version name
lastProjVersion <- lastVersion(project,googEmail = googEmail)
if(is.na(projVersion)){
#qc in compilation
qcIc <- qcB %>%
filter(inThisCompilation == TRUE) %>%
select(dataSetName) %>%
unique()
qcIc <- qcIc$dataSetName
inLast <- inThisCompilation(TS,project,lastProjVersion)
tsIci <- which(purrr::map_lgl(inLast,isTRUE))
tsIc <- unique(lipdR::pullTsVariable(TS,"dataSetName")[tsIci])
projVersion <- tickVersion(project,qcIc,tsIc,googEmail = googEmail)
}
#setup new version
if(!dir.exists(file.path(webDirectory,project))){
dir.create(file.path(webDirectory,project))
}
if(!dir.exists(file.path(webDirectory,project,projVersion))){
dir.create(file.path(webDirectory,project,projVersion))
}
#create TSids if needed
et <- which(is.na(TSid))
if(length(et) > 0){
ntsid <- unlist(purrr::rerun(length(et),lipdR::createTSid()))
TSid[et] <- ntsid
TS <- lipdR::pushTsVariable(TS,variable = "paleoData_TSid",vec = TSid)
}
#check for duplicate TSids
while(any(duplicated(TSid))){
wd <- which(duplicated(TSid))
dtsid <- paste0(TSid[wd],"-dup")
TSid[wd] <- dtsid
TS <- lipdR::pushTsVariable(TS,variable = "paleoData_TSid",vec = TSid)
}
sTS <- lipdR::splitInterpretationByScope(TS)
data$TS <- TS
newData <- list(qcB = qcB,
clNotes = clNotes,
projVersion = projVersion,
lastProjVersion = lastProjVersion,
sTS = sTS)
data <- append(data,newData)
return(data)
}
#' Create QC sheet from data
#'
#' @param params
#' @param data
#'
#' @return
#' @export
createQcFromFile <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#assignVariablesFromList(data)
for(i in 1:length(data)){
assign(names(data)[i],data[[i]])
}
#2. Create a new qc sheet from files
qcC <- createQCdataFrame(sTS,templateId = qcId,ageOrYear = ageOrYear,compilationName = project,compVersion = lastProjVersion)
readr::write_csv(qcC,path = file.path(webDirectory,project,projVersion,"qcTs.csv"))
#3. Get the updated QC sheet from google
#first, lock editing
#googledrive::drive_share(as_id(qcId),role = "reader", type = "anyone")
#check for duplicate TSids
while(any(duplicated(qcB$TSid))){
wd <- which(duplicated(qcB$TSid))
dtsid <- paste0(qcB$TSid[wd],"-dup")
qcB$TSid[wd] <- dtsid
}
readr::write_csv(qcB,path = file.path(webDirectory,project,projVersion,"qcGoog.csv"))
lu <- getGoogleQCSheet(lastUpdateId)
readr::write_csv(lu,file.path(webDirectory,project,"lastUpdate.csv"))
data$qcC <- qcC
return(data)
}
#' Merge sources
#'
#' @param params
#' @param data
#'
#' @return
#' @export
mergeQcSheets <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#assignVariablesFromList(data)
for(i in 1:length(data)){
assign(names(data)[i],data[[i]])
}
#4. Load in the old QC sheet (from last update), and merge with new ones
rosetta <- lipdverseR::rosettaStone()
qcA <- readr::read_csv(file.path(webDirectory,project,"lastUpdate.csv"),guess_max = Inf) %>%
purrr::map_df(lipdverseR::replaceSpecialCharacters,rosetta)
qcB <- readr::read_csv(file.path(webDirectory,project,projVersion,"qcGoog.csv"),guess_max = Inf) %>%
purrr::map_df(lipdverseR::replaceSpecialCharacters,rosetta)
qcC <- readr::read_csv(file.path(webDirectory,project,projVersion,"qcTs.csv"),guess_max = Inf) %>%
purrr::map_df(lipdverseR::replaceSpecialCharacters,rosetta)
#qc <- daff::merge_data(parent = qcA,a = qcB,b = qcC) Old way
#NPM: 2.20.20 added to help merge_data work as desired
#new way. What if we only consider QC entries that are present in the TS QC (qcC)
qcAs <- dplyr::filter(qcA,TSid %in% qcC$TSid)
qcBs <- dplyr::filter(qcB,TSid %in% qcC$TSid)
#shuffle in
# dBC <- dplyr::anti_join(qcB,qcC,by = "TSid")
# dCB <- dplyr::anti_join(qcC,qcB,by = "TSid")
# dCA <- dplyr::anti_join(qcC,qcA,by = "TSid")
#dBC <- dplyr::anti_join(qcC,qcA,by = "TSid")
dCB <- dplyr::anti_join(qcC,qcBs,by = "TSid")
dCA <- dplyr::anti_join(qcC,qcAs,by = "TSid")
qcA2 <- dplyr::bind_rows(qcAs,dCA)
qcB2 <- dplyr::bind_rows(qcBs,dCB)
#qcC2 <- dplyr::bind_rows(qcC,dBC)
#check once more
#dBA <- dplyr::anti_join(qcB2,qcA2,by = "TSid")
#qcA2 <- dplyr::bind_rows(qcA2,dBA)
#arrange by qcB TSid
miA <- match(qcB2$TSid,qcA2$TSid)
miC <- match(qcB2$TSid,qcC$TSid)
qcA <- qcA2[miA,]
qcC <- qcC[miC,]
qcB <- qcB2
#turn all NULLs and blanks to NAs
qcA[is.null(qcA) | qcA == ""] <- NA
qcB[is.null(qcB) | qcB == ""] <- NA
qcC[is.null(qcC) | qcC == ""] <- NA
#prep inThisCompilation
qcA$inThisCompilation[is.na(qcA$inThisCompilation)] <- FALSE
qcB$inThisCompilation[is.na(qcB$inThisCompilation)] <- FALSE
qcC$inThisCompilation[is.na(qcC$inThisCompilation)] <- FALSE
#find all TRUE in B and apply to C (since they should only be changed in B)
bf <- qcB %>%
filter(inThisCompilation == "TRUE")
cfi <- which(qcC$TSid %in% bf$TSid)
qcC$inThisCompilation[cfi] <- "TRUE"
qc <- daff::merge_data(parent = qcA,a = qcB,b = qcC)
#remove fake conflicts
qc <- purrr::map_dfc(qc,removeFakeConflictsCol)
#remove duplicate rows
qc <- dplyr::distinct(qc)
dd <- daff::diff_data(qcA,qc)
daff::render_diff(dd,file = file.path(webDirectory,project,projVersion,"qcChanges.html"),view = FALSE)
if(any(names(qc) == "inThisCompilation")){
#check for conflicts in "inThisCompilation"
#this is especially important when first starting this variable
#default to google qc sheet (qcB)
shouldBeTrue <- which(qc$inThisCompilation == "((( null ))) TRUE /// FALSE")
shouldBeFalse <- which(qc$inThisCompilation == "((( null ))) FALSE /// TRUE")
qc$inThisCompilation[shouldBeTrue] <- "TRUE"
qc$inThisCompilation[shouldBeFalse] <- "FALSE"
}
#this should fix conflicts that shouldnt exist
#qc <- resolveDumbConflicts(qc)
data$qc <- qc
#data$qcA <- qcA
return(data)
}
#' updateTsFromMergedQc
#'
#' @param params
#' @param data
#'
#' @return
#' @export
updateTsFromMergedQc <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#drop unneeded variables.
neededData <- which(names(data) %in% c("sTS",
"qc",
"projVersion",
"dsidsOriginal",
"Dloaded",
"lastProjVersion",
"projVersion",
"filesToUltimatelyDelete","clNotes"))
#assignVariablesFromList(data)
for(i in neededData){
assign(names(data)[i],data[[i]])
}
rm("data")
#5. Update sTS from merged qc
#p <- profvis({nsTS <- updateFromQC(sTS,qc,project,projVersion)})
nsTS <- updateFromQC(sTS,qc,project,projVersion)
nTS <- combineInterpretationByScope(nsTS)
#check for standardized terms
validationReport <- lipdR:::isValidAll(nTS,report = TRUE)
#write validation report to QC sheet
writeValidationReportToQCSheet(validationReport,qcId)
if(standardizeTerms){#To do: #make this its own function
#proxy lumps
groupFrom <- c("paleoData_proxy","paleoData_inferredMaterial","interpretation1_variable","interpretation2_variable","interpretation3_variable","interpretation4_variable","interpretation5_variable","interpretation6_variable","interpretation7_variable","interpretation8_variable")
groupInto <- c("paleoData_proxyLumps","paleoData_inferredMaterialGroup","interpretation1_variableGroup","interpretation2_variableGroup","interpretation3_variableGroup","interpretation4_variableGroup","interpretation5_variableGroup","interpretation6_variableGroup","interpretation7_variableGroup","interpretation8_variableGroup")
#create new vectors for grouping variables.
nTS <- createVectorsForGroups(nTS,groupFrom,groupInto)
#Do some cleaning
nTS <- standardizeTsValues(nTS)
#add directions to isotope groups
igf <- c("interpretation1_variableGroup","interpretation2_variableGroup","interpretation3_variableGroup","interpretation4_variableGroup","interpretation5_variableGroup","interpretation6_variableGroup","interpretation7_variableGroup","interpretation8_variableGroup")
igt <- c("interpretation1_variableGroupDirection","interpretation2_variableGroupDirection","interpretation3_variableGroupDirection","interpretation4_variableGroupDirection","interpretation5_variableGroupDirection","interpretation6_variableGroupDirection","interpretation7_variableGroupDirection","interpretation8_variableGroupDirection")
nTS <- createInterpretationGroupDirections(nTS,igf,igt)
nTS <- fix_pubYear(nTS)
nTS <- fixKiloyearsTs(nTS)
nTS <- purrr::map(nTS,removeEmptyInterpretationsFromTs)
}
#5c rebuild database
nD <- collapseTs(nTS)
#5d clean D
if(standardizeTerms){
nDt <- purrr::map(nD,removeEmptyPubs)
if(class(nDt) == "list"){
nD <- nDt
}
}
#check to see which datasets are this compilation
itc <- inThisCompilation(nTS,project,projVersion)
ndsn <- pullTsVariable(nTS, "dataSetName")
dsnInComp <- unique(ndsn[map_lgl(itc,isTRUE)])
nicdi <- which(!names(nD) %in% dsnInComp)
# update file and project changelogs
#first file changelogs
dsidsNew <- tibble(datasetId = map_chr(nD,"datasetId"),
dataSetNameNew = map_chr(nD,"dataSetName"),
dataSetVersion = purrr::map_chr(nD,getVersion))
#deal with missing datasetIds...
if(any(is.na(dsidsNew$datasetId))){
bbb <- which(is.na(dsidsNew$datasetId))
for(bb in bbb){
bbdsn <- dsidsNew$dataSetNameNew[bb]
olddsid <- dsidsOriginal$datasetId[dsidsOriginal$dataSetNameOrig == bbdsn]
#see if that works
if(length(olddsid) == 1){
if(!any(olddsid == dsidsNew$datasetId[-bbb])){
#then this seems ok
dsidsNew$datasetId[bb] <- olddsid
nD[[bbdsn]]$datasetId <- olddsid
}
}
}
}
#if there still are bad ones stop.
if(any(is.na(dsidsNew$datasetId))){
stop(glue("paste(dsidsNew$datasetId[is.na(dsidsNew$datasetId)],collapse = ', )} are missing dsids in the new data which is bad.'"))
}
#figure out change notes
dsidKey <- dplyr::left_join(dsidsNew,dsidsOriginal,by = "datasetId")
print("Updating changelogs....")
#loop through DSid and create changelog (this is for files, not for the project)
for(dfi in 1:nrow(dsidKey)){
newName <- dsidKey$dataSetNameNew[dfi]
oldName <- dsidKey$dataSetNameOrig[dfi]
cl <- try(createChangelog(Dloaded[[oldName]],nD[[newName]]))
if(is(cl,"try-error")){
stop("Error in dataset changelogging")
}
nD[[newName]] <- updateChangelog(nD[[newName]],
changelog = cl,
notes = dsidKey$changes[dfi])
}
newData <- list(nD = nD,
ndsn = ndsn,
nicdi = nicdi,
dsidKey = dsidKey,
dsnInComp = dsnInComp,
projVersion = projVersion,
filesToUltimatelyDelete = filesToUltimatelyDelete)
data <- newData
return(data)
}
createDataPages <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#assignVariablesFromList(data)
for(i in 1:length(data)){
assign(names(data)[i],data[[i]])
}
#re extract nTS
nTS <- extractTs(nD)
#temporary
#create changelog
for(d in 1:length(nD)){
if(is.null(nD[[d]]$changelog)){
nD[[d]] <- initializeChangelog(nD[[d]])
}
}
googlesheets4::gs4_auth(email = googEmail,cache = ".secret")
newInv <- createInventory(nD)
oldInv <- getInventory(lipdDir,googEmail)
#find any updates to versions, or new datasets that we need to create for this
if(recreateDataPages){
toCreate <- dplyr::full_join(oldInv,newInv,by = "datasetId")
toUpdate <- data.frame()
}else{#only create what's changed
toCreate <- dplyr::full_join(oldInv,newInv,by = "datasetId") %>%
dplyr::filter(dataSetVersion.x != dataSetVersion.y | is.na(dataSetVersion.x))
#update pages for data in compilation, but that didn't change
toUpdate <- dplyr::full_join(oldInv,newInv,by = "datasetId") %>%
dplyr::filter(dataSetVersion.x == dataSetVersion.y & !is.na(dataSetVersion.x))
}
if(nrow(toUpdate) > 0 & nrow(toCreate) > 0){#check to make sure were good, if need be
#make sure distinct from create
if(any(toCreate$datasetId %in% toUpdate$datasetId)){
stop("Data pages to create and update are not distinct (and they should be)")
}
}
if(nrow(toCreate) > 0){
#create new datapages for the appropriate files
w <- which(is.na(toCreate$dataSetNameNew.y))
tc <- nD[toCreate$dataSetNameNew.y]
if(length(w) > 0){
if(length(w) < nrow(toCreate)){
ndsn <- toCreate$dataSetNameNew.y[-w]
tc <- tc[-w]
}else{
stop("no datasets left to create")
}
}
print("Creating new data webpages...")
purrr::walk(tc,quietly(createDataWebPage),webdir = webDirectory,.progress = TRUE)
}
#if changes
if(nrow(toUpdate) > 0){
#create new datapages for the appropriate files
w <- which(is.na(toUpdate$dataSetNameNew.y))
tu <- nD[toUpdate$dataSetNameNew.y]
if(length(w) > 0){
if(length(w) < nrow(toUpdate)){
ndsn <- toUpdate$dataSetNameNew.y[-w]
tu <- tu[-w]
}else{
stop("no datasets left to update")
}
}
print("Updating data webpages...")
purrr::walk(tu,quietly(updateDataWebPageForCompilation),webdir = webDirectory,.progress = TRUE)
}
#pass on to the next
newData <- list(newInv = newInv,
oldInv = oldInv,
toCreate = toCreate)
data <- append(data,newData)
return(data)
}
#' Create lipdverse pages for this version of the project
#'
#' @param params
#' @param data
#'
#' @return
#' @export
createProjectWebpages <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#assignVariablesFromList(data)
for(i in 1:length(data)){
assign(names(data)[i],data[[i]])
}
#create this version overview page
createProjectSidebarHtml(project, projVersion,webDirectory)
createProjectOverviewPage(project,projVersion,webDirectory)
#update lipdverse overview page
createProjectSidebarHtml("lipdverse", "current_version",webDirectory)
createProjectOverviewPage("lipdverse", "current_version",webDirectory)
#get only those in the compilation
nDic <- nD[unique(dsnInComp)] #the unique shouldn't be necessary here, but also shouldn't hurt since it was uniqued earlier
tcdf <- data.frame(dsid = map_chr(nDic,"datasetId"),
dsn = map_chr(nDic,"dataSetName"),
vers = map_chr(nDic,getVersion))
#create all the project shell sites
print(glue::glue("Creating {nrow(tcdf)} project shell sites"))
purrr::pwalk(tcdf,
quietly(createProjectDataWebPage),
webdir = webDirectory,
.progress = TRUE,
project,
projVersion)
#create a project map
nnTS <- extractTs(nDic)
createProjectMapHtml(nnTS,project = project,projVersion = projVersion,webdir = webDirectory)
if(updateLipdverse){
updateQueryCsv(nD)
#get lipdverse inventory
allDataDir <- list.dirs("~/Dropbox/lipdverse/html/data/",recursive = FALSE)
getDataDetails <- function(datadir){
maxVers <- list.dirs(datadir)[-1] %>%
basename() %>%
stringr::str_replace_all(pattern = "_",replacement = ".") %>%
as.numeric_version() %>%
max() %>%
as.character() %>%
stringr::str_replace_all(pattern = "[.]",replacement = "_")
dsid <- datadir %>% basename()
fnames <- list.files(file.path(datadir,maxVers))
fnamesFull <- list.files(file.path(datadir,maxVers),full.names = TRUE)
dsni <- fnames %>%
stringr::str_detect(pattern = ".lpd") %>%
which()
longest <- dsni[which.max(purrr::map_dbl(fnames[dsni],stringr::str_length))]
dsn <- fnames[longest] %>% stringr::str_remove(pattern = ".lpd")
path <- fnamesFull[longest]
mod.time <- file.info(path)$mtime
return(data.frame(
dsid = dsid,
dsn = dsn,
vers = stringr::str_replace_all(string = maxVers,pattern = "_",replacement = "."),
path = path,
versionCreated = mod.time))
}
#sure that data files exist for all of the data in the database
lipdverseDirectory <- purrr:::map_dfr(allDataDir,getDataDetails)
LV <- readLipd(lipdverseDirectory$path)
allDataDetails <- data.frame(dsid = map_chr(LV,"datasetId"),
dsn = map_chr(LV,"dataSetName"),
vers = map_chr(LV,getVersion))
add <- dplyr::left_join(allDataDetails,lipdverseDirectory,by = "dsid")
lvtc <- function(versO,versN){
versO[is.na(versO)] <- "0.0.0"
versN[is.na(versN)] <- "0.0.0"
return(as.numeric_version(versO) > as.numeric_version(versN))
}
whichUpdated <- which(lvtc(add$vers.x,add$vers.y))
if(length(whichUpdated) > 0){
dsnu <- nD[add$dsn.x[whichUpdated]]
walk(dsnu,createDataWebPage,webdir = webDirectory)
#create lipdverse project pages
}
#find missing lipdverse htmls
lpht <- list.files("~/Dropbox/lipdverse/html/lipdverse/current_version/",pattern = ".html")
lphtdsn <- stringr::str_remove_all(lpht,pattern = ".html")
addh <- which(!allDataDetails$dsn %in% lphtdsn)
if(length(addh) > 0){
lphtdf <- allDataDetails[addh,]
#create all the project shell sites
print(glue::glue("Creating {length(addh)} new lipdverse shell sites"))
purrr::pwalk(lphtdf,
createProjectDataWebPage,
webdir = webDirectory,
project = "lipdverse",
projVersion = "current_version")
}
#look for updated lipdverse htmls
lphtfull <- list.files("~/Dropbox/lipdverse/html/lipdverse/current_version/",pattern = ".html",full.names = TRUE)
lpht <- list.files("~/Dropbox/lipdverse/html/lipdverse/current_version/",pattern = ".html",full.names = FALSE)
getLipdverseHtmlVersions <- function(lfile){
lss <- readLines(lfile)
sbl <- max(which(stringr::str_detect(lss,"sidebar.html")))
vers <- as.character(stringr::str_match_all(lss[sbl],"\\d{1,}_\\d{1,}_\\d{1,}")[[1]])
vers <- str_replace_all(vers,"_",".")
return(vers)
}
lphtdsn <- stringr::str_remove_all(lpht,pattern = ".html")
htmlVers <- map_chr(lphtfull,getLipdverseHtmlVersions)
addv <- dplyr::left_join(allDataDetails,data.frame(dsn = lphtdsn,vers = htmlVers),by = "dsn")
whichUpdatedHtml <- which(lvtc(addv$vers.x,addv$vers.y))
if(length(whichUpdatedHtml) > 0){
lphtdf <- allDataDetails[whichUpdatedHtml,]
#create all the project shell sites
print(glue::glue("Updating {length(whichUpdatedHtml)} lipdverse shell sites"))
purrr::pwalk(lphtdf,
createProjectDataWebPage,
webdir = webDirectory,
project = "lipdverse",
projVersion = "current_version")
}
#lipdverse htmls to remove
# #don't do this for now, because it doesn't work with multiple data directories
# todeht <- which(!lphtdsn %in% allDataDetails$dsn)
# lphtdsn[todeht]
#update lipdverse map
LVTS <- extractTs(LV)
createProjectMapHtml(LVTS,project = "lipdverse",projVersion = "current_version",webdir = webDirectory)
}
#create lipdverse querying csv
#reassign
DF <- nDic
if(serialize){
try(createSerializations(D = DF,webDirectory,project,projVersion),silent = FALSE)
if(updateLipdverse){
try(createSerializations(D = LV,webDirectory,"lipdverse","current_version"),silent = FALSE)
}
}
#add datasets not in compilation into DF
if(length(nicdi)>0){
DF <- append(DF,nD[nicdi])
}
if(length(DF) != length(nD)){
stop("Uh oh, you lost or gained datasets while creating the webpages")
}
TSF <- extractTs(DF)
#get most recent in compilations
mics <- getMostRecentInCompilationsTs(TSF)
TSF <- pushTsVariable(TSF,variable = "paleoData_mostRecentCompilations",vec = mics,createNew = TRUE)
sTSF <- splitInterpretationByScope(TSF)
qcF <- createQCdataFrame(sTSF,templateId = qcId,ageOrYear = ageOrYear,compilationName = project,compVersion = projVersion)
newData <- list(qcF = qcF,
DF = DF)
data$nD <- NULL
data$nTS <- NULL
return(append(data,newData))
}
#' Create lipdversePages old framework
#'
#' @param params
#' @param data
#'
#' @return
#' @export
createWebpages <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#assignVariablesFromList(data)
for(i in 1:length(data)){
assign(names(data)[i],data[[i]])
}
#6 Update lipdverse
if(updateWebpages){
#restrict as necessary
if(restrictWebpagesToCompilation){
ictsi <- which(ndsn %in% dsnInComp)
icdi <- which(names(nD) %in% dsnInComp)
if(length(ictsi) == 0 || length(icdi) == 0){
stop("didn't find any datasets in the compilation for the webpage")
}
}else{
ictsi <- seq_along(nTS)
icdi <- seq_along(nD)
nicdi <- NULL
}
createProjectDashboards(nD[icdi],nTS[ictsi],webDirectory,project,projVersion)
#load back in files
DF <- readLipd(file.path(webDirectory,project,projVersion))
if(serialize){
try(createSerializations(D = DF,webDirectory,project,projVersion),silent = TRUE)
}
#add datasets not in compilation into DF
if(length(nicdi)>0){
DF <- append(DF,nD[nicdi])
}
if(length(DF) != length(nD)){
stop("Uh oh, you lost or gained datasets while creating the webpages")
}
}else{
DF <- nD
}
TSF <- extractTs(DF)
#get most recent in compilations
mics <- getMostRecentInCompilationsTs(TSF)
TSF <- pushTsVariable(TSF,variable = "paleoData_mostRecentCompilations",vec = mics,createNew = TRUE)
sTSF <- splitInterpretationByScope(TSF)
qcF <- createQCdataFrame(sTSF,templateId = qcId,ageOrYear = ageOrYear,compilationName = project,compVersion = projVersion)
newData <- list(TSF = TSF,
sTSF = sTSF,
qcF = qcF,
DF = DF)
return(append(data,newData))
}
#' Update google
#'
#' @param params
#' @param data
#'
#' @return
#' @export
updateGoogleQc <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#assignVariablesFromList(data)
for(i in 1:length(data)){
assign(names(data)[i],data[[i]])
}
#7 Update QC sheet on google (and make a lastUpdate.csv file)
qc2w <- qcF
qc2w[is.null(qc2w) | qc2w == ""] <- NA
#find differences for log
#diff <- daff::diff_data(qcA,qc2w,ids = "TSid",ignore_whitespace = TRUE,columns_to_ignore = "link to lipdverse",never_show_order = TRUE)
qc2w[is.na(qc2w)] <- ""
# goodDatasets <- unique(qc2w$dataSetName[which(qc2w$inThisCompilation == "TRUE")])
#
# gi <- which(qc2w$dataSetName %in% goodDatasets)
# qc2w <- qc2w[gi,]
#update the data compilation page
updateDatasetCompilationQc(DF,project,projVersion,qcId)
googlesheets4::gs4_auth(email = googEmail,cache = ".secret")
#write the new qcsheet to file
readr::write_csv(qc2w,path = file.path(webDirectory,project,"newLastUpdate.csv"))
#upload it to google drive for last update
googledrive::drive_update(media = file.path(webDirectory,project,"newLastUpdate.csv"),
file = googledrive::as_id(lastUpdateId))
#copy the last update to the qcsheet:
googlesheets4::sheet_delete(ss = qcId,sheet = 1)
googlesheets4::sheet_copy(from_ss = lastUpdateId, from_sheet = 1,to_ss = qcId, to_sheet = "QC",.before = "datasetsInCompilation")
#write_sheet_retry(qc2w,ss = qcId, sheet = 1)
googledrive::drive_rename(googledrive::as_id(qcId),name = stringr::str_c(project," v.",projVersion," QC sheet"))
#daff::render_diff(diff,file = file.path(webDirectory,project,projVersion,"metadataChangelog.html"),title = paste("Metadata changelog:",project,projVersion),view = FALSE)
#googledrive::drive_update(file = googledrive::as_id(lastUpdateId),media = file.path(webDirectory,project,"newLastUpdate.csv"))
#newName <- stringr::str_c(project," v.",projVersion," QC sheet")
#googledrive::drive_update(file = googledrive::as_id(qcId),media = file.path(webDirectory,project,"newLastUpdate.csv"),name = newName)
#remove unneeded data
neededVariablesMovingForward <- c("dsidKey",
"webDirectory",
"dsnInComp",
"project",
"lastVersionNumber",
"DF",
"projVersion",
"webDirectory",
"googEmail",
"versionMetaId",
"filesToUltimatelyDelete",
"lipdDir")
vToRemove <- names(data)[!names(data) %in% neededVariablesMovingForward]
for(v2r in vToRemove){
data[v2r] <- NULL
}
return(data)
}
#' Finalize
#'
#' @param params
#' @param data
#'
#' @return
#' @export
finalize <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#assignVariablesFromList(data)
for(i in 1:length(data)){
assign(names(data)[i],data[[i]])
}
#8 finalize and write lipd files
#DF <- purrr::map(DF,removeEmptyPubs)
#9 update the google version file
versionDf <- read_sheet_retry(googledrive::as_id(versionMetaId),col_types = "cdddccccc")
#versionDf <- read_sheet_retry(googledrive::as_id(versionMetaId))
versionDf$versionCreated <- lubridate::ymd_hms(versionDf$versionCreated)
newRow <- versionDf[1,]
newRow$project <- project
pdm <- as.numeric(unlist(str_split(projVersion,"_")))
newRow$publication <- pdm[1]
newRow$dataset <- pdm[2]
newRow$metadata <- pdm[3]
newRow$dsns <- paste(unique(dsnInComp),collapse = "|")
newRow$versionCreated <- lubridate::ymd_hms(lubridate::now(tzone = "UTC"))
newRow$`zip MD5` <- directoryMD5(lipdDir)
#check for differences in dsns
dsndiff <- filter(versionDf,project == (!!project)) %>%
filter(versionCreated == max(versionCreated,na.rm = TRUE))
lastVersionNumber <- paste(dsndiff[1,2:4],collapse = "_")
oldDsns <- stringr::str_split(dsndiff$dsns,pattern = "[|]",simplify = T)
newDsns <- stringr::str_split(newRow$dsns,pattern = "[|]",simplify = T)
newRow$`dataSets removed` <- paste(setdiff(oldDsns,newDsns),collapse = "|")
newRow$`dataSets added` <- paste(setdiff(newDsns,oldDsns),collapse = "|")
nvdf <- dplyr::bind_rows(versionDf,newRow)
nvdf$versionCreated <- as.character(nvdf$versionCreated)
readr::write_csv(nvdf,file = file.path(tempdir(),"versTemp.csv"))
data$lastVersionNumber <- lastVersionNumber
#remove unneeded data
neededVariablesMovingForward <- c("dsidKey",
"webDirectory",
"project",
"lastVersionNumber",
"DF",
"projVersion",
"webDirectory",
"googEmail",
"versionMetaId",
"filesToUltimatelyDelete",
"lipdDir")
vToRemove <- names(data)[!names(data) %in% neededVariablesMovingForward]
for(v2r in vToRemove){
data[v2r] <- NULL
}
return(data)
}
#' Log changes and update
#'
#' @param params
#' @param data
#'
#' @return
#' @export
changeloggingAndUpdating <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#assignVariablesFromList(data)
for(i in 1:length(data)){
assign(names(data)[i],data[[i]])
}
#write project changelog
#get last project's data. Try serialiation first:
lastSerial <- try(load(file.path(webDirectory,project,lastVersionNumber,paste0(project,lastVersionNumber,".RData"))),silent = TRUE)
if(!is(lastSerial,"try-error")){
Dpo <- D
}else{#try to load from lipd
Dpo <- readLipd(file.path(webDirectory,project,lastVersionNumber))
}
if(length(Dpo)>0){
createProjectChangelog(Dold = Dpo,
Dnew = DF,
proj = project,
projVersOld = lastVersionNumber,
projVersNew = projVersion,
webDirectory = webDirectory,
notesTib = dsidKey)
}else{#write empty changelog
cle <- glue::glue("## Changelog is empty - probably because there were no files in the web directory for {project} version {lastVersionNumber}")
readr::write_file(cle,file.path(webDirectory,project,projVersion,"changelogEmpty.Rmd"))
rmarkdown::render(file.path(webDirectory,project,projVersion,"changelogEmpty.Rmd"),
output_file = file.path(webDirectory,project,projVersion,"changelogSummary.html"))
rmarkdown::render(file.path(webDirectory,project,projVersion,"changelogEmpty.Rmd"),
output_file = file.path(webDirectory,project,projVersion,"changelogDetail.html"))
}
vt <- readr::read_csv(file.path(tempdir(),"versTemp.csv"),col_types = "cdddccccc")
googlesheets4::gs4_auth(email = googEmail,cache = ".secret")
wrote <- try(write_sheet_retry(vt,ss = versionMetaId,sheet = 1))
if(is(wrote,"try-error")){
print("failed to write lipdverse versioning - do this manually")
}
#update datasetId information
updateDatasetIdDereferencer(DF,
compilation = project,
version = projVersion,
dateUpdated = lubridate::today())
#update vocab
try(updateVocabWebsites())
#give permissions back
#drive_share(as_id(qcId),role = "writer", type = "user",emailAddress = "")
#update the files
unlink(file.path(webDirectory,project,"current_version"),force = TRUE,recursive = TRUE)
dir.create(file.path(webDirectory,project,"current_version"))
file.copy(file.path(webDirectory,project,projVersion,.Platform$file.sep), file.path(webDirectory,project,"current_version",.Platform$file.sep), recursive=TRUE,overwrite = TRUE)
file.copy(file.path(webDirectory,project,projVersion,str_c(project,projVersion,".zip")),
file.path(webDirectory,project,"current_version","current_version.zip"),overwrite = TRUE)
unlink(x = filesToUltimatelyDelete,force = TRUE, recursive = TRUE)
writeLipd(DF,path = lipdDir,removeNamesFromLists = TRUE)
unFlagUpdate()
}
#' create serializations of a database in R, matlab and python
#'
#' @param D
#' @param matlabUtilitiesPath
#' @param matlabPath
#' @param webDirectory
#' @param project
#' @param projVersion
#' @param python3Path
#'
#' @import stringr
#' @import lipdR
#' @import readr
#' @description creates serialization; requires that Matlab and Python be installed, along with lipd utilities for those languages.
#' @return
#' @export
createSerializations <- function(D,
webDirectory,
project,
projVersion,
remove.ensembles = TRUE,
matlabUtilitiesPath = "/Volumes/data/GitHub/LiPD-utilities/Matlab",
matlabPath = "/Applications/MATLAB_R2021b.app/bin/matlab",
python3Path="/Users/nicholas/opt/anaconda3/envs/pyleo/bin/python3"){
#create serializations for web
#R
if(remove.ensembles){
Do <- D
D <- purrr::map(D,removeEnsembles)
}
if(object.size(Do) > object.size(D)){
has.ensembles <- TRUE
}else{
has.ensembles <- FALSE
}
TS <- extractTs(D)
#sTS <- splitInterpretationByScope(TS)
save(list = c("D","TS"),file = file.path(webDirectory,project,projVersion,stringr::str_c(project,projVersion,".RData")))
#write files to a temporary directory
lpdtmp <- file.path(tempdir(),"lpdTempSerialization")
unlink(lpdtmp,recursive = TRUE)
dir.create(lpdtmp)
writeLipd(D,path = lpdtmp)
#zip it
zip(zipfile = file.path(webDirectory,project,projVersion,str_c(project,projVersion,".zip")),files = list.files(lpdtmp,pattern= "*.lpd",full.names = TRUE),extras = '-j')
if(has.ensembles){
print("writing again with ensembles")
TS <- extractTs(Do)
#sTS <- splitInterpretationByScope(TS)
save(list = c("D","TS"),file = file.path(webDirectory,project,projVersion,stringr::str_c(project,projVersion,"-ensembles.RData")))
#write files to a temporary directory
lpdtmpens <- file.path(tempdir(),"lpdTempSerializationEnsembles")
unlink(lpdtmpens,recursive = TRUE)
dir.create(lpdtmpens)
writeLipd(Do,path = lpdtmpens)
#zip it
zip(zipfile = file.path(webDirectory,project,projVersion,str_c(project,projVersion,"-ensembles.zip")),files = list.files(lpdtmpens,pattern= "*.lpd",full.names = TRUE))
}
#matlab
mfile <- stringr::str_c("addpath(genpath('",matlabUtilitiesPath,"'));\n") %>%
stringr::str_c("D = readLiPD('",lpdtmp,"');\n") %>%
stringr::str_c("TS = extractTs(D);\n") %>%
stringr::str_c("sTS = splitInterpretationByScope(TS);\n") %>%
stringr::str_c("save ",file.path(webDirectory,project,projVersion,stringr::str_c(project,projVersion,".mat")),' D TS sTS\n') %>%
stringr::str_c("exit")
#write the file
readr::write_file(mfile,path = file.path(webDirectory,project,projVersion,"createSerialization.m"))
#run the file
try(system(stringr::str_c(matlabPath," -nodesktop -nosplash -nodisplay -r \"run('",file.path(webDirectory,project,projVersion,"createSerialization.m"),"')\"")))
#Python
pyfile <- "import lipd\n" %>%
stringr::str_c("import pickle\n") %>%
stringr::str_c("D = lipd.readLipd('",lpdtmp,"/')\n") %>%
stringr::str_c("TS = lipd.extractTs(D)\n") %>%
stringr::str_c("filetosave = open('",file.path(webDirectory,project,projVersion,stringr::str_c(project,projVersion,".pkl'")),",'wb')\n") %>%
stringr::str_c("all_data = {}\n") %>%
stringr::str_c("all_data['D'] = D\n") %>%
stringr::str_c("all_data['TS'] = TS\n") %>%
stringr::str_c("pickle.dump(all_data, filetosave,protocol = 2)\n") %>%
stringr::str_c("filetosave.close()")
#write the file
readr::write_file(pyfile,path = file.path(webDirectory,project,projVersion,"createSerialization.py"))
#run the file
try(system(stringr::str_c(python3Path, " ",file.path(webDirectory,project,projVersion,"createSerialization.py"))))
}
| /R/nightlyUpdateDrake.R | no_license | nickmckay/lipdverseR | R | false | false | 52,788 | r | #' Get google drive file update time
#'
#' @param googId Google drive file key
#' @param tzone timezone
#' @import googledrive lubridate
#' @return
#' @export
#'
#' @examples
googleDriveUpdateTime <- function(googId,tzone = "UTC"){
#money sheet update
info <- googledrive::drive_get(googledrive::as_id(googId))
mtime <- info[3]$drive_resource[[1]]$modifiedTime
return(lubridate::with_tz(lubridate::ymd_hms(mtime),tzone = tzone))
}
#' Check to see if a project needs to be updated
#'
#' @param project
#' @param webDirectory
#' @param lipdDir
#' @param qcId
#' @param versionMetaId
#' @import googlesheets4
#' @import magrittr
#' @import dplyr
#' @import googledrive
#' @import lubridate
#'
#' @return TRUE or FALSE
#' @export
updateNeeded <- function(project,webDirectory,lipdDir,qcId,versionMetaId = "1OHD7PXEQ_5Lq6GxtzYvPA76bpQvN1_eYoFR0X80FIrY",googEmail = NULL){
#compare files with MD5s
# currentMD5 <- directoryMD5(lipdDir)
# dir(lipdDir)
#
# lastMD5 <- directoryMD5(file.path(webDirectory,project,"current_version"))
#
googlesheets4::gs4_auth(email = googEmail)
#compare QC update times
versionSheet <- read_sheet_retry(googledrive::as_id(versionMetaId)) %>%
dplyr::filter(project == (!!project)) %>%
dplyr::arrange(desc(versionCreated))
lastUpdate <- lubridate::ymd_hms(versionSheet$versionCreated[1])
lastMD5 <- versionSheet$`zip MD5`[1]
filesNeedUpdating <- TRUE
if(length(lastMD5) > 0){
currentMD5 <- directoryMD5(lipdDir)
if(lastMD5 == currentMD5){
filesNeedUpdating <- FALSE
}
}
#most recent file edit time
lastMod <- purrr::map(list.files(lipdDir,pattern = "*.lpd",full.names = TRUE),file.mtime )
lastMod <- lubridate::with_tz(lubridate::ymd_hms(lastMod[[which.max(unlist(lastMod))]],tz = "America/Phoenix"),tzone = "UTC")
# check based on folder modification time
# filesNeedUpdating <- TRUE
# if(lastUpdate > lastMod){
# filesNeedUpdating <- FALSE
# }
#most recent QC update
qcUpdate <- googleDriveUpdateTime(qcId)
qcNeedsUpdating <- TRUE
if(lastUpdate > qcUpdate){
qcNeedsUpdating <- FALSE
}
if(qcNeedsUpdating | filesNeedUpdating){
needsUpdating <- TRUE
}else{
needsUpdating <- FALSE
}
return(needsUpdating)
}
#' Title
#'
#' @param project project name
#' @param versionMetaId ID of the versioning qc sheet
#' @param qcIc dataSetNames in this compilation from teh QC sheet
#' @param tsIc dataSetNames in the last compilation from the files
#' @param googEmail google user ID
#'
#' @description Ticks the version of a database for you. Assumes that a change is necessary.
#' @import googlesheets4
#' @import magrittr
#' @import dplyr
#' @import googledrive
#' @import stringr
#' @return the new version string
#' @export
#'
#' @examples
tickVersion <- function(project,qcIc,tsIc,versionMetaId = "1OHD7PXEQ_5Lq6GxtzYvPA76bpQvN1_eYoFR0X80FIrY",googEmail = NULL){
googlesheets4::gs4_auth(email = googEmail)
#get last versions udsn
versionSheet <- read_sheet_retry(googledrive::as_id(versionMetaId)) %>%
dplyr::filter(project == (!!project)) %>%
dplyr::arrange(desc(versionCreated))
lastUdsn <- sort(tsIc)
#and the new udsn
thisUdsn <- sort(qcIc)
if(all(lastUdsn==thisUdsn)){
#then tick metadata
p <- versionSheet$publication[1]
d <- versionSheet$dataset[1]
m <- versionSheet$metadata[1]+1
}else{
p <- versionSheet$publication[1]
d <- versionSheet$dataset[1]+1
m <- 0
}
newVers <- stringr::str_c(p,d,m,sep = "_")
return(newVers)
}
#' Get the most recent version of the compilation (before updating)
#'
#' @param project project name
#' @param udsn a vector of dataset names in the project
#' @param versionMetaId ID of the versioning qc sheet
#' @param googEmail google user ID
#' @description Gets the last version of the database (before updating)
#' @import googlesheets4
#' @import magrittr
#' @import dplyr
#' @import googledrive
#' @import stringr
#' @return the new version string
#' @export
#'
#' @examples
lastVersion <- function(project,versionMetaId = "1OHD7PXEQ_5Lq6GxtzYvPA76bpQvN1_eYoFR0X80FIrY",googEmail = NULL){
googlesheets4::gs4_auth(email = googEmail)
#get last versions udsn
versionSheet <- read_sheet_retry(googledrive::as_id(versionMetaId)) %>%
dplyr::filter(project == (!!project)) %>%
dplyr::arrange(desc(versionCreated))
p <- versionSheet$publication[1]
d <- versionSheet$dataset[1]
m <- versionSheet$metadata[1]
lastVers <- stringr::str_c(p,d,m,sep = "_")
return(lastVers)
}
assignVariablesFromList <- function(params,env = parent.env(environment())){
for(i in 1:length(params)){
assign(names(params)[i],params[[i]],envir = env)
}
}
#' Build parameters
#'
#' @param project project name
#' @param lipdDir authority directory for a lipd file
#' @param webDirectory directory for webserver
#' @param qcId google sheets ID for the qc sheet
#' @param lastUpdateId google sheets ID for the last version
#' @param updateWebpages update lipdverse webpages (default = TRUE). Usually TRUE unless troubleshooting.
#' @param googEmail google user ID
#' @import purrr
#' @import googlesheets4
#' @import readr
#' @import lipdR
#' @import geoChronR
#' @export
buildParams <- function(project,
lipdDir,
webDirectory,
qcId,
lastUpdateId,
versionMetaId = "1OHD7PXEQ_5Lq6GxtzYvPA76bpQvN1_eYoFR0X80FIrY",
googEmail = NULL,
updateWebpages = TRUE,
standardizeTerms = TRUE,
ageOrYear = "age",
recreateDataPages = FALSE,
restrictWebpagesToCompilation = TRUE,
qcStandardizationCheck = TRUE,
serialize = TRUE,
projVersion = NA,
updateLipdverse = TRUE){
an <- ls()
av <- purrr::map(an,~eval(parse(text = .x))) %>% setNames(an)
return(av)
}
#' Check if an update is needed
#'
#' @param params
#'
#' @return
#' @export
#'
#' @examples
checkIfUpdateNeeded <- function(params){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
if(is.na(projVersion)){#skip check if new version is specified
#check if update is necessary
toUpdate <- updateNeeded(project,webDirectory,lipdDir,qcId,googEmail = googEmail)
if(!toUpdate){
return("No update needed")
}else{
return("Update needed")
}
}
}
#' Load in new data
#'
#' @param params
#'
#' @return
#' @export
loadInUpdatedData <- function(params){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#if looking at full database:
if(lipdDir == "/Volumes/data/Dropbox/lipdverse/database"){
#getDatasetInCompilationFromQC()
#0. Figure out which datasets to load based on QC sheet.
dscomp <- read_sheet_retry(ss = qcId,sheet = "datasetsInCompilation")
#make sure that all names there are in the lipdDir, and that there are no duplicates
if(any(duplicated(dscomp$dsn))){
stop(glue::glue("There are duplicated dataSetNames in 'datasetsInCompilation': {dscomp$dsn[duplicated(dscomp$dsn)]}"))
}
#get all files in lipdverse
af <- list.files(lipdDir,pattern = ".lpd",full.names = FALSE) %>% stringr::str_remove_all(".lpd")
#see if any in dscomp don't exist
missing <- which(!dscomp$dsn %in% af)
#remove this next time
dscomp <- dscomp[-missing,]
#see if any in dscomp don't exist
missing <- which(!dscomp$dsn %in% af)
if(length(missing) > 0){
stop(glue("{length(missing)} datasets in 'datasetsInCompilation' don't exist in the database: {paste(dscomp$dsn[missing],collapse = '; ')}"))
}
#look for new files not in the dscomp page
#which local files not in dscomp
new <- which(!af %in% dscomp$dsn)
dscompgood <- filter(dscomp,inComp != "FALSE")
filesToConsider <- file.path(lipdDir, paste0(c(dscompgood$dsn,af[new]),".lpd"))
}else{
filesToConsider <- list.files(lipdDir,pattern = ".lpd",full.names = TRUE)
}
filesToUltimatelyDelete <- filesToConsider
#1. load in (potentially updated) files
flagUpdate(project)
D <- lipdR::readLipd(filesToConsider)
#create datasetIds for records that don't have them
for(d in 1:length(D)){
if(is.null(D[[d]]$datasetId)){
D[[d]]$datasetId <- createDatasetId()
}
#check for chronMeasurementTable and fix
if(!is.null(D[[d]]$chronData[[1]]$chronMeasurementTable)){
for(ccic in 1:length(D[[d]]$chronData)){
D[[d]]$chronData[[ccic]]$measurementTable <- D[[d]]$chronData[[ccic]]$chronMeasurementTable
D[[d]]$chronData[[ccic]]$chronMeasurementTable <- NULL
}
}
#check for changelog and fix
if(is.null(D[[d]]$changelog)){
D[[d]] <- initializeChangelog(D[[d]])
}
}
Dloaded <- D#store for changelogging
dsidsOriginal <- tibble::tibble(datasetId = purrr::map_chr(D,"datasetId"),
dataSetNameOrig = purrr::map_chr(D,"dataSetName"),
dataSetVersion = purrr::map_chr(D,getVersion))
#make sure that primary chronologies are named appropriately
D <- purrr::map(D,renamePrimaryChron)
if(standardizeTerms){
D <- purrr::map(D,cleanOriginalDataUrl)
D <- purrr::map(D,hasDepth)
D <- purrr::map(D,nUniqueAges)
D <- purrr::map(D,nGoodAges)
D <- purrr::map(D,nOtherAges)
# D <- purrr::map(D,fixExcelIssues)
D <- purrr::map(D,standardizeChronVariableNames)
}
#1a. Screen by some criterion...
#check for TSid
TS <- lipdR::extractTs(D)
#create grouping terms for later standardization
#TO DO!# remove entries that don't fall into the groups/lumps!
if(standardizeTerms){
#Do some cleaning
TS <- standardizeTsValues(TS)
TS <- fix_pubYear(TS)
TS <- fixKiloyearsTs(TS)
TS <- purrr::map(TS,removeEmptyInterpretationsFromTs)
}
#get some relevant information
TSid <- lipdR::pullTsVariable(TS,"paleoData_TSid")
udsn <- unique(lipdR::pullTsVariable(TS,"dataSetName"))
data <- list(Dloaded = Dloaded ,
D = D,
TS = TS,
TSid = TSid,
filesToUltimatelyDelete = filesToUltimatelyDelete,
dsidsOriginal = dsidsOriginal,
udsn = udsn)
return(data)
}
#' Get QC
#'
#' @param params
#' @param data
#'
#' @return
#' @export
getQcInfo <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#assignVariablesFromList(data)
for(i in 1:length(data)){
assign(names(data)[i],data[[i]])
}
#get the google qc sheet
qcB <- getGoogleQCSheet(qcId)
#reolve conflicts
qcB <- resolveQcConflict(qcB)
#make sure no terms are missing
if(any(is.na(qcB$TSid))){
stop("TSids missing from google QC sheet")
}
if(any(is.na(qcB$dataSetName))){
stop("dataSetName missing from google QC sheet")
}
if(any(is.na(qcB$variableName))){
stop("variableName missing from google QC sheet")
}
if(qcStandardizationCheck){
#check QCsheet terms are valid
#replace them with other terms if they're not
allSheetNames <- googlesheets4::sheet_names(ss = qcId)
#check for year, age, depth fixes
allInvalid <- allSheetNames[grepl(allSheetNames,pattern = "-invalid")]
atsid <- pullTsVariable(TS,"paleoData_TSid")
for(av in allInvalid){
thisOne <- read_sheet_retry(ss = qcId,sheet = av)
#check to find TSids not in QC sheet AND in TS
if("number" %in% names(thisOne)){
#if there's a number, then do all but number one
tochange <- which(thisOne$number > 1 & thisOne$TSid %in% atsid)
}else{
#if there's not a number, only do those without a TSid in the QCSheet
tochange <- which(!thisOne$TSid %in% qcB$TSid & thisOne$TSid %in% atsid)
}
for(tci in tochange){
tsidi <- which(thisOne$TSid[tci] == atsid)
vnts <- str_remove(av,"-invalid")
if(!is.null(thisOne$number[tsidi])){#then we need to append the number into the name
vnts <- str_replace(vnts,"_",paste0(thisOne$number[tci],"_"))
}
if(!is.na(names(TS[[tsidi]][vnts]))){
print(glue::glue("Changed special column {vnts} ({thisOne$TSid[tci]}) from {TS[[tsidi]][[vnts]]} to {thisOne[[4]][tci]}"))
TS[[tsidi]][[vnts]] <- thisOne[[4]][tci]
if(av == "paleoData_proxy-invalid"){
if(is.na(TS[[tsidi]][[vnts]])){#replace these with NULLs
TS[[tsidi]][[vnts]] <- NULL
}
}
}
}
}
stando <- lipdR::standardizeQCsheetValues(qcB)
qcB <- stando$newSheet
if(length(stando$remainingInvalid) > 0){#standardization issues. Do a few things:
#check to see if the existing invalid sheets contain corrected information....
convo <- read_sheet_retry(ss="1T5RrAtrk3RiWIUSyO0XTAa756k6ljiYjYpvP67Ngl_w")
for(rv in names(stando$remainingInvalid)){
tivs <- allSheetNames[startsWith(x = allSheetNames,prefix = rv)]
if(length(tivs) == 1){
thisOne <- read_sheet_retry(ss = qcId,sheet = tivs)
convoi <- which(convo$tsName == rv)
if(length(convoi) != 1){
if(rv == "interpretation_variable"){
qcName <- "climateVariable"
}else if(rv == "interpretation_seasonality"){
qcName <- "seasonality"
}else{
stop("I can't figure out the qc name")
}
}else{
qcName <- convo$qcSheetName[convoi]
}
#loop through terms and see if in standardTables, and replace if so.
if(nrow(thisOne) > 0){
for(rvr in 1:nrow(thisOne)){
if(thisOne[[ncol(thisOne)]][rvr] %in% standardTables[[rv]]$lipdName){#it's a standard term!
#replace it!
tsidm <- which(qcB$TSid == thisOne$TSid[rvr])
if(length(tsidm) > 1){stop("this shouldn't be possible")}
print(glue::glue("{thisOne$TSid[rvr]} - {rv}: replaced {qcB[[qcName]][tsidm]} with {thisOne[[ncol(thisOne)]][rvr]}"))
qcB[[qcName]][tsidm] <- thisOne[[ncol(thisOne)]][rvr]
}
}
}
}else if(length(tivs) == 0){
print(glue::glue("No sheet for {tivs} in the qc sheet"))
}else{
print(glue::glue("Multiple {tivs} sheets found: {allSheetNames}"))
}
}
#rerun the standardization report
stando <- lipdR::standardizeQCsheetValues(qcB)
qcB <- stando$newSheet
if(length(stando$remainingInvalid) > 0){#standardization issues remain
#write the standardized value back into the qc sheet
qcB[is.null(qcB) | qcB == ""] <- NA
#find differences for log
#diff <- daff::diff_data(qcA,qc2w,ids = "TSid",ignore_whitespace = TRUE,columns_to_ignore = "link to lipdverse",never_show_order = TRUE)
qcB[is.na(qcB)] <- ""
readr::write_csv(qcB,file = file.path(webDirectory,project,"qcInvalid.csv"))
#upload it to google drive into temporary qcInvalid
googledrive::drive_update(media = file.path(webDirectory,project,"qcInvalid.csv"),
file = googledrive::as_id("1valJY2eqpUT1fsfRggLmPpwh32-HMb9ZO5J5LvZERLQ"))
#copy the qc check to the qcsheet:
googlesheets4::sheet_delete(ss = qcId,sheet = 1)
googlesheets4::sheet_copy(from_ss = "1valJY2eqpUT1fsfRggLmPpwh32-HMb9ZO5J5LvZERLQ", from_sheet = 1,to_ss = qcId, to_sheet = "QC",.before = "datasetsInCompilation")
#write_sheet_retry(qc2w,ss = qcId, sheet = 1)
googledrive::drive_rename(googledrive::as_id(qcId),name = stringr::str_c(project," v. QC sheet - INVALID TERMS!"))
#two write a validation report
writeValidationReportToQCSheet(stando$remainingInvalid,qcId)
#delete sheets without missing terms
tokeep <- paste0(names(stando$remainingInvalid),"-invalid")
allSheetNames <- googlesheets4::sheet_names(ss = qcId)
ivnames <- allSheetNames[str_detect(allSheetNames,pattern = "-invalid")]
todelete <- setdiff(ivnames,tokeep)
try(googlesheets4::sheet_delete(ss = qcId,sheet = todelete),silent = TRUE)
#throw an error
stop("There are invalid terms in the QC sheet. Check the validation report")
}
}
}
if(!any(names(qcB)=="changelogNotes")){
qcB$changelogNotes <- NA
}
#pull out changelog notes
clNotes <- qcB %>%
dplyr::select(dataSetName,TSid,changelogNotes) %>%
dplyr::filter(!is.na(changelogNotes)) %>%
dplyr::group_by(dataSetName) %>%
dplyr::summarize(changes = paste(paste(TSid,changelogNotes,sep = ": "),collapse = "; ")) %>%
dplyr::rename(dataSetNameOrig = dataSetName)
#then remove that column
qcB <- dplyr::select(qcB,-changelogNotes)
data$dsidsOriginal <- data$dsidsOriginal %>%
dplyr::left_join(clNotes,by = "dataSetNameOrig")
#1b. New version name
lastProjVersion <- lastVersion(project,googEmail = googEmail)
if(is.na(projVersion)){
#qc in compilation
qcIc <- qcB %>%
filter(inThisCompilation == TRUE) %>%
select(dataSetName) %>%
unique()
qcIc <- qcIc$dataSetName
inLast <- inThisCompilation(TS,project,lastProjVersion)
tsIci <- which(purrr::map_lgl(inLast,isTRUE))
tsIc <- unique(lipdR::pullTsVariable(TS,"dataSetName")[tsIci])
projVersion <- tickVersion(project,qcIc,tsIc,googEmail = googEmail)
}
#setup new version
if(!dir.exists(file.path(webDirectory,project))){
dir.create(file.path(webDirectory,project))
}
if(!dir.exists(file.path(webDirectory,project,projVersion))){
dir.create(file.path(webDirectory,project,projVersion))
}
#create TSids if needed
et <- which(is.na(TSid))
if(length(et) > 0){
ntsid <- unlist(purrr::rerun(length(et),lipdR::createTSid()))
TSid[et] <- ntsid
TS <- lipdR::pushTsVariable(TS,variable = "paleoData_TSid",vec = TSid)
}
#check for duplicate TSids
while(any(duplicated(TSid))){
wd <- which(duplicated(TSid))
dtsid <- paste0(TSid[wd],"-dup")
TSid[wd] <- dtsid
TS <- lipdR::pushTsVariable(TS,variable = "paleoData_TSid",vec = TSid)
}
sTS <- lipdR::splitInterpretationByScope(TS)
data$TS <- TS
newData <- list(qcB = qcB,
clNotes = clNotes,
projVersion = projVersion,
lastProjVersion = lastProjVersion,
sTS = sTS)
data <- append(data,newData)
return(data)
}
#' Create QC sheet from data
#'
#' @param params
#' @param data
#'
#' @return
#' @export
createQcFromFile <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#assignVariablesFromList(data)
for(i in 1:length(data)){
assign(names(data)[i],data[[i]])
}
#2. Create a new qc sheet from files
qcC <- createQCdataFrame(sTS,templateId = qcId,ageOrYear = ageOrYear,compilationName = project,compVersion = lastProjVersion)
readr::write_csv(qcC,path = file.path(webDirectory,project,projVersion,"qcTs.csv"))
#3. Get the updated QC sheet from google
#first, lock editing
#googledrive::drive_share(as_id(qcId),role = "reader", type = "anyone")
#check for duplicate TSids
while(any(duplicated(qcB$TSid))){
wd <- which(duplicated(qcB$TSid))
dtsid <- paste0(qcB$TSid[wd],"-dup")
qcB$TSid[wd] <- dtsid
}
readr::write_csv(qcB,path = file.path(webDirectory,project,projVersion,"qcGoog.csv"))
lu <- getGoogleQCSheet(lastUpdateId)
readr::write_csv(lu,file.path(webDirectory,project,"lastUpdate.csv"))
data$qcC <- qcC
return(data)
}
#' Merge sources
#'
#' @param params
#' @param data
#'
#' @return
#' @export
mergeQcSheets <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#assignVariablesFromList(data)
for(i in 1:length(data)){
assign(names(data)[i],data[[i]])
}
#4. Load in the old QC sheet (from last update), and merge with new ones
rosetta <- lipdverseR::rosettaStone()
qcA <- readr::read_csv(file.path(webDirectory,project,"lastUpdate.csv"),guess_max = Inf) %>%
purrr::map_df(lipdverseR::replaceSpecialCharacters,rosetta)
qcB <- readr::read_csv(file.path(webDirectory,project,projVersion,"qcGoog.csv"),guess_max = Inf) %>%
purrr::map_df(lipdverseR::replaceSpecialCharacters,rosetta)
qcC <- readr::read_csv(file.path(webDirectory,project,projVersion,"qcTs.csv"),guess_max = Inf) %>%
purrr::map_df(lipdverseR::replaceSpecialCharacters,rosetta)
#qc <- daff::merge_data(parent = qcA,a = qcB,b = qcC) Old way
#NPM: 2.20.20 added to help merge_data work as desired
#new way. What if we only consider QC entries that are present in the TS QC (qcC)
qcAs <- dplyr::filter(qcA,TSid %in% qcC$TSid)
qcBs <- dplyr::filter(qcB,TSid %in% qcC$TSid)
#shuffle in
# dBC <- dplyr::anti_join(qcB,qcC,by = "TSid")
# dCB <- dplyr::anti_join(qcC,qcB,by = "TSid")
# dCA <- dplyr::anti_join(qcC,qcA,by = "TSid")
#dBC <- dplyr::anti_join(qcC,qcA,by = "TSid")
dCB <- dplyr::anti_join(qcC,qcBs,by = "TSid")
dCA <- dplyr::anti_join(qcC,qcAs,by = "TSid")
qcA2 <- dplyr::bind_rows(qcAs,dCA)
qcB2 <- dplyr::bind_rows(qcBs,dCB)
#qcC2 <- dplyr::bind_rows(qcC,dBC)
#check once more
#dBA <- dplyr::anti_join(qcB2,qcA2,by = "TSid")
#qcA2 <- dplyr::bind_rows(qcA2,dBA)
#arrange by qcB TSid
miA <- match(qcB2$TSid,qcA2$TSid)
miC <- match(qcB2$TSid,qcC$TSid)
qcA <- qcA2[miA,]
qcC <- qcC[miC,]
qcB <- qcB2
#turn all NULLs and blanks to NAs
qcA[is.null(qcA) | qcA == ""] <- NA
qcB[is.null(qcB) | qcB == ""] <- NA
qcC[is.null(qcC) | qcC == ""] <- NA
#prep inThisCompilation
qcA$inThisCompilation[is.na(qcA$inThisCompilation)] <- FALSE
qcB$inThisCompilation[is.na(qcB$inThisCompilation)] <- FALSE
qcC$inThisCompilation[is.na(qcC$inThisCompilation)] <- FALSE
#find all TRUE in B and apply to C (since they should only be changed in B)
bf <- qcB %>%
filter(inThisCompilation == "TRUE")
cfi <- which(qcC$TSid %in% bf$TSid)
qcC$inThisCompilation[cfi] <- "TRUE"
qc <- daff::merge_data(parent = qcA,a = qcB,b = qcC)
#remove fake conflicts
qc <- purrr::map_dfc(qc,removeFakeConflictsCol)
#remove duplicate rows
qc <- dplyr::distinct(qc)
dd <- daff::diff_data(qcA,qc)
daff::render_diff(dd,file = file.path(webDirectory,project,projVersion,"qcChanges.html"),view = FALSE)
if(any(names(qc) == "inThisCompilation")){
#check for conflicts in "inThisCompilation"
#this is especially important when first starting this variable
#default to google qc sheet (qcB)
shouldBeTrue <- which(qc$inThisCompilation == "((( null ))) TRUE /// FALSE")
shouldBeFalse <- which(qc$inThisCompilation == "((( null ))) FALSE /// TRUE")
qc$inThisCompilation[shouldBeTrue] <- "TRUE"
qc$inThisCompilation[shouldBeFalse] <- "FALSE"
}
#this should fix conflicts that shouldnt exist
#qc <- resolveDumbConflicts(qc)
data$qc <- qc
#data$qcA <- qcA
return(data)
}
#' updateTsFromMergedQc
#'
#' @param params
#' @param data
#'
#' @return
#' @export
updateTsFromMergedQc <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#drop unneeded variables.
neededData <- which(names(data) %in% c("sTS",
"qc",
"projVersion",
"dsidsOriginal",
"Dloaded",
"lastProjVersion",
"projVersion",
"filesToUltimatelyDelete","clNotes"))
#assignVariablesFromList(data)
for(i in neededData){
assign(names(data)[i],data[[i]])
}
rm("data")
#5. Update sTS from merged qc
#p <- profvis({nsTS <- updateFromQC(sTS,qc,project,projVersion)})
nsTS <- updateFromQC(sTS,qc,project,projVersion)
nTS <- combineInterpretationByScope(nsTS)
#check for standardized terms
validationReport <- lipdR:::isValidAll(nTS,report = TRUE)
#write validation report to QC sheet
writeValidationReportToQCSheet(validationReport,qcId)
if(standardizeTerms){#To do: #make this its own function
#proxy lumps
groupFrom <- c("paleoData_proxy","paleoData_inferredMaterial","interpretation1_variable","interpretation2_variable","interpretation3_variable","interpretation4_variable","interpretation5_variable","interpretation6_variable","interpretation7_variable","interpretation8_variable")
groupInto <- c("paleoData_proxyLumps","paleoData_inferredMaterialGroup","interpretation1_variableGroup","interpretation2_variableGroup","interpretation3_variableGroup","interpretation4_variableGroup","interpretation5_variableGroup","interpretation6_variableGroup","interpretation7_variableGroup","interpretation8_variableGroup")
#create new vectors for grouping variables.
nTS <- createVectorsForGroups(nTS,groupFrom,groupInto)
#Do some cleaning
nTS <- standardizeTsValues(nTS)
#add directions to isotope groups
igf <- c("interpretation1_variableGroup","interpretation2_variableGroup","interpretation3_variableGroup","interpretation4_variableGroup","interpretation5_variableGroup","interpretation6_variableGroup","interpretation7_variableGroup","interpretation8_variableGroup")
igt <- c("interpretation1_variableGroupDirection","interpretation2_variableGroupDirection","interpretation3_variableGroupDirection","interpretation4_variableGroupDirection","interpretation5_variableGroupDirection","interpretation6_variableGroupDirection","interpretation7_variableGroupDirection","interpretation8_variableGroupDirection")
nTS <- createInterpretationGroupDirections(nTS,igf,igt)
nTS <- fix_pubYear(nTS)
nTS <- fixKiloyearsTs(nTS)
nTS <- purrr::map(nTS,removeEmptyInterpretationsFromTs)
}
#5c rebuild database
nD <- collapseTs(nTS)
#5d clean D
if(standardizeTerms){
nDt <- purrr::map(nD,removeEmptyPubs)
if(class(nDt) == "list"){
nD <- nDt
}
}
#check to see which datasets are this compilation
itc <- inThisCompilation(nTS,project,projVersion)
ndsn <- pullTsVariable(nTS, "dataSetName")
dsnInComp <- unique(ndsn[map_lgl(itc,isTRUE)])
nicdi <- which(!names(nD) %in% dsnInComp)
# update file and project changelogs
#first file changelogs
dsidsNew <- tibble(datasetId = map_chr(nD,"datasetId"),
dataSetNameNew = map_chr(nD,"dataSetName"),
dataSetVersion = purrr::map_chr(nD,getVersion))
#deal with missing datasetIds...
if(any(is.na(dsidsNew$datasetId))){
bbb <- which(is.na(dsidsNew$datasetId))
for(bb in bbb){
bbdsn <- dsidsNew$dataSetNameNew[bb]
olddsid <- dsidsOriginal$datasetId[dsidsOriginal$dataSetNameOrig == bbdsn]
#see if that works
if(length(olddsid) == 1){
if(!any(olddsid == dsidsNew$datasetId[-bbb])){
#then this seems ok
dsidsNew$datasetId[bb] <- olddsid
nD[[bbdsn]]$datasetId <- olddsid
}
}
}
}
#if there still are bad ones stop.
if(any(is.na(dsidsNew$datasetId))){
stop(glue("paste(dsidsNew$datasetId[is.na(dsidsNew$datasetId)],collapse = ', )} are missing dsids in the new data which is bad.'"))
}
#figure out change notes
dsidKey <- dplyr::left_join(dsidsNew,dsidsOriginal,by = "datasetId")
print("Updating changelogs....")
#loop through DSid and create changelog (this is for files, not for the project)
for(dfi in 1:nrow(dsidKey)){
newName <- dsidKey$dataSetNameNew[dfi]
oldName <- dsidKey$dataSetNameOrig[dfi]
cl <- try(createChangelog(Dloaded[[oldName]],nD[[newName]]))
if(is(cl,"try-error")){
stop("Error in dataset changelogging")
}
nD[[newName]] <- updateChangelog(nD[[newName]],
changelog = cl,
notes = dsidKey$changes[dfi])
}
newData <- list(nD = nD,
ndsn = ndsn,
nicdi = nicdi,
dsidKey = dsidKey,
dsnInComp = dsnInComp,
projVersion = projVersion,
filesToUltimatelyDelete = filesToUltimatelyDelete)
data <- newData
return(data)
}
createDataPages <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#assignVariablesFromList(data)
for(i in 1:length(data)){
assign(names(data)[i],data[[i]])
}
#re extract nTS
nTS <- extractTs(nD)
#temporary
#create changelog
for(d in 1:length(nD)){
if(is.null(nD[[d]]$changelog)){
nD[[d]] <- initializeChangelog(nD[[d]])
}
}
googlesheets4::gs4_auth(email = googEmail,cache = ".secret")
newInv <- createInventory(nD)
oldInv <- getInventory(lipdDir,googEmail)
#find any updates to versions, or new datasets that we need to create for this
if(recreateDataPages){
toCreate <- dplyr::full_join(oldInv,newInv,by = "datasetId")
toUpdate <- data.frame()
}else{#only create what's changed
toCreate <- dplyr::full_join(oldInv,newInv,by = "datasetId") %>%
dplyr::filter(dataSetVersion.x != dataSetVersion.y | is.na(dataSetVersion.x))
#update pages for data in compilation, but that didn't change
toUpdate <- dplyr::full_join(oldInv,newInv,by = "datasetId") %>%
dplyr::filter(dataSetVersion.x == dataSetVersion.y & !is.na(dataSetVersion.x))
}
if(nrow(toUpdate) > 0 & nrow(toCreate) > 0){#check to make sure were good, if need be
#make sure distinct from create
if(any(toCreate$datasetId %in% toUpdate$datasetId)){
stop("Data pages to create and update are not distinct (and they should be)")
}
}
if(nrow(toCreate) > 0){
#create new datapages for the appropriate files
w <- which(is.na(toCreate$dataSetNameNew.y))
tc <- nD[toCreate$dataSetNameNew.y]
if(length(w) > 0){
if(length(w) < nrow(toCreate)){
ndsn <- toCreate$dataSetNameNew.y[-w]
tc <- tc[-w]
}else{
stop("no datasets left to create")
}
}
print("Creating new data webpages...")
purrr::walk(tc,quietly(createDataWebPage),webdir = webDirectory,.progress = TRUE)
}
#if changes
if(nrow(toUpdate) > 0){
#create new datapages for the appropriate files
w <- which(is.na(toUpdate$dataSetNameNew.y))
tu <- nD[toUpdate$dataSetNameNew.y]
if(length(w) > 0){
if(length(w) < nrow(toUpdate)){
ndsn <- toUpdate$dataSetNameNew.y[-w]
tu <- tu[-w]
}else{
stop("no datasets left to update")
}
}
print("Updating data webpages...")
purrr::walk(tu,quietly(updateDataWebPageForCompilation),webdir = webDirectory,.progress = TRUE)
}
#pass on to the next
newData <- list(newInv = newInv,
oldInv = oldInv,
toCreate = toCreate)
data <- append(data,newData)
return(data)
}
#' Create lipdverse pages for this version of the project
#'
#' @param params
#' @param data
#'
#' @return
#' @export
createProjectWebpages <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#assignVariablesFromList(data)
for(i in 1:length(data)){
assign(names(data)[i],data[[i]])
}
#create this version overview page
createProjectSidebarHtml(project, projVersion,webDirectory)
createProjectOverviewPage(project,projVersion,webDirectory)
#update lipdverse overview page
createProjectSidebarHtml("lipdverse", "current_version",webDirectory)
createProjectOverviewPage("lipdverse", "current_version",webDirectory)
#get only those in the compilation
nDic <- nD[unique(dsnInComp)] #the unique shouldn't be necessary here, but also shouldn't hurt since it was uniqued earlier
tcdf <- data.frame(dsid = map_chr(nDic,"datasetId"),
dsn = map_chr(nDic,"dataSetName"),
vers = map_chr(nDic,getVersion))
#create all the project shell sites
print(glue::glue("Creating {nrow(tcdf)} project shell sites"))
purrr::pwalk(tcdf,
quietly(createProjectDataWebPage),
webdir = webDirectory,
.progress = TRUE,
project,
projVersion)
#create a project map
nnTS <- extractTs(nDic)
createProjectMapHtml(nnTS,project = project,projVersion = projVersion,webdir = webDirectory)
if(updateLipdverse){
updateQueryCsv(nD)
#get lipdverse inventory
allDataDir <- list.dirs("~/Dropbox/lipdverse/html/data/",recursive = FALSE)
getDataDetails <- function(datadir){
maxVers <- list.dirs(datadir)[-1] %>%
basename() %>%
stringr::str_replace_all(pattern = "_",replacement = ".") %>%
as.numeric_version() %>%
max() %>%
as.character() %>%
stringr::str_replace_all(pattern = "[.]",replacement = "_")
dsid <- datadir %>% basename()
fnames <- list.files(file.path(datadir,maxVers))
fnamesFull <- list.files(file.path(datadir,maxVers),full.names = TRUE)
dsni <- fnames %>%
stringr::str_detect(pattern = ".lpd") %>%
which()
longest <- dsni[which.max(purrr::map_dbl(fnames[dsni],stringr::str_length))]
dsn <- fnames[longest] %>% stringr::str_remove(pattern = ".lpd")
path <- fnamesFull[longest]
mod.time <- file.info(path)$mtime
return(data.frame(
dsid = dsid,
dsn = dsn,
vers = stringr::str_replace_all(string = maxVers,pattern = "_",replacement = "."),
path = path,
versionCreated = mod.time))
}
#sure that data files exist for all of the data in the database
lipdverseDirectory <- purrr:::map_dfr(allDataDir,getDataDetails)
LV <- readLipd(lipdverseDirectory$path)
allDataDetails <- data.frame(dsid = map_chr(LV,"datasetId"),
dsn = map_chr(LV,"dataSetName"),
vers = map_chr(LV,getVersion))
add <- dplyr::left_join(allDataDetails,lipdverseDirectory,by = "dsid")
lvtc <- function(versO,versN){
versO[is.na(versO)] <- "0.0.0"
versN[is.na(versN)] <- "0.0.0"
return(as.numeric_version(versO) > as.numeric_version(versN))
}
whichUpdated <- which(lvtc(add$vers.x,add$vers.y))
if(length(whichUpdated) > 0){
dsnu <- nD[add$dsn.x[whichUpdated]]
walk(dsnu,createDataWebPage,webdir = webDirectory)
#create lipdverse project pages
}
#find missing lipdverse htmls
lpht <- list.files("~/Dropbox/lipdverse/html/lipdverse/current_version/",pattern = ".html")
lphtdsn <- stringr::str_remove_all(lpht,pattern = ".html")
addh <- which(!allDataDetails$dsn %in% lphtdsn)
if(length(addh) > 0){
lphtdf <- allDataDetails[addh,]
#create all the project shell sites
print(glue::glue("Creating {length(addh)} new lipdverse shell sites"))
purrr::pwalk(lphtdf,
createProjectDataWebPage,
webdir = webDirectory,
project = "lipdverse",
projVersion = "current_version")
}
#look for updated lipdverse htmls
lphtfull <- list.files("~/Dropbox/lipdverse/html/lipdverse/current_version/",pattern = ".html",full.names = TRUE)
lpht <- list.files("~/Dropbox/lipdverse/html/lipdverse/current_version/",pattern = ".html",full.names = FALSE)
getLipdverseHtmlVersions <- function(lfile){
lss <- readLines(lfile)
sbl <- max(which(stringr::str_detect(lss,"sidebar.html")))
vers <- as.character(stringr::str_match_all(lss[sbl],"\\d{1,}_\\d{1,}_\\d{1,}")[[1]])
vers <- str_replace_all(vers,"_",".")
return(vers)
}
lphtdsn <- stringr::str_remove_all(lpht,pattern = ".html")
htmlVers <- map_chr(lphtfull,getLipdverseHtmlVersions)
addv <- dplyr::left_join(allDataDetails,data.frame(dsn = lphtdsn,vers = htmlVers),by = "dsn")
whichUpdatedHtml <- which(lvtc(addv$vers.x,addv$vers.y))
if(length(whichUpdatedHtml) > 0){
lphtdf <- allDataDetails[whichUpdatedHtml,]
#create all the project shell sites
print(glue::glue("Updating {length(whichUpdatedHtml)} lipdverse shell sites"))
purrr::pwalk(lphtdf,
createProjectDataWebPage,
webdir = webDirectory,
project = "lipdverse",
projVersion = "current_version")
}
#lipdverse htmls to remove
# #don't do this for now, because it doesn't work with multiple data directories
# todeht <- which(!lphtdsn %in% allDataDetails$dsn)
# lphtdsn[todeht]
#update lipdverse map
LVTS <- extractTs(LV)
createProjectMapHtml(LVTS,project = "lipdverse",projVersion = "current_version",webdir = webDirectory)
}
#create lipdverse querying csv
#reassign
DF <- nDic
if(serialize){
try(createSerializations(D = DF,webDirectory,project,projVersion),silent = FALSE)
if(updateLipdverse){
try(createSerializations(D = LV,webDirectory,"lipdverse","current_version"),silent = FALSE)
}
}
#add datasets not in compilation into DF
if(length(nicdi)>0){
DF <- append(DF,nD[nicdi])
}
if(length(DF) != length(nD)){
stop("Uh oh, you lost or gained datasets while creating the webpages")
}
TSF <- extractTs(DF)
#get most recent in compilations
mics <- getMostRecentInCompilationsTs(TSF)
TSF <- pushTsVariable(TSF,variable = "paleoData_mostRecentCompilations",vec = mics,createNew = TRUE)
sTSF <- splitInterpretationByScope(TSF)
qcF <- createQCdataFrame(sTSF,templateId = qcId,ageOrYear = ageOrYear,compilationName = project,compVersion = projVersion)
newData <- list(qcF = qcF,
DF = DF)
data$nD <- NULL
data$nTS <- NULL
return(append(data,newData))
}
#' Create lipdversePages old framework
#'
#' @param params
#' @param data
#'
#' @return
#' @export
createWebpages <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#assignVariablesFromList(data)
for(i in 1:length(data)){
assign(names(data)[i],data[[i]])
}
#6 Update lipdverse
if(updateWebpages){
#restrict as necessary
if(restrictWebpagesToCompilation){
ictsi <- which(ndsn %in% dsnInComp)
icdi <- which(names(nD) %in% dsnInComp)
if(length(ictsi) == 0 || length(icdi) == 0){
stop("didn't find any datasets in the compilation for the webpage")
}
}else{
ictsi <- seq_along(nTS)
icdi <- seq_along(nD)
nicdi <- NULL
}
createProjectDashboards(nD[icdi],nTS[ictsi],webDirectory,project,projVersion)
#load back in files
DF <- readLipd(file.path(webDirectory,project,projVersion))
if(serialize){
try(createSerializations(D = DF,webDirectory,project,projVersion),silent = TRUE)
}
#add datasets not in compilation into DF
if(length(nicdi)>0){
DF <- append(DF,nD[nicdi])
}
if(length(DF) != length(nD)){
stop("Uh oh, you lost or gained datasets while creating the webpages")
}
}else{
DF <- nD
}
TSF <- extractTs(DF)
#get most recent in compilations
mics <- getMostRecentInCompilationsTs(TSF)
TSF <- pushTsVariable(TSF,variable = "paleoData_mostRecentCompilations",vec = mics,createNew = TRUE)
sTSF <- splitInterpretationByScope(TSF)
qcF <- createQCdataFrame(sTSF,templateId = qcId,ageOrYear = ageOrYear,compilationName = project,compVersion = projVersion)
newData <- list(TSF = TSF,
sTSF = sTSF,
qcF = qcF,
DF = DF)
return(append(data,newData))
}
#' Update google
#'
#' @param params
#' @param data
#'
#' @return
#' @export
updateGoogleQc <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#assignVariablesFromList(data)
for(i in 1:length(data)){
assign(names(data)[i],data[[i]])
}
#7 Update QC sheet on google (and make a lastUpdate.csv file)
qc2w <- qcF
qc2w[is.null(qc2w) | qc2w == ""] <- NA
#find differences for log
#diff <- daff::diff_data(qcA,qc2w,ids = "TSid",ignore_whitespace = TRUE,columns_to_ignore = "link to lipdverse",never_show_order = TRUE)
qc2w[is.na(qc2w)] <- ""
# goodDatasets <- unique(qc2w$dataSetName[which(qc2w$inThisCompilation == "TRUE")])
#
# gi <- which(qc2w$dataSetName %in% goodDatasets)
# qc2w <- qc2w[gi,]
#update the data compilation page
updateDatasetCompilationQc(DF,project,projVersion,qcId)
googlesheets4::gs4_auth(email = googEmail,cache = ".secret")
#write the new qcsheet to file
readr::write_csv(qc2w,path = file.path(webDirectory,project,"newLastUpdate.csv"))
#upload it to google drive for last update
googledrive::drive_update(media = file.path(webDirectory,project,"newLastUpdate.csv"),
file = googledrive::as_id(lastUpdateId))
#copy the last update to the qcsheet:
googlesheets4::sheet_delete(ss = qcId,sheet = 1)
googlesheets4::sheet_copy(from_ss = lastUpdateId, from_sheet = 1,to_ss = qcId, to_sheet = "QC",.before = "datasetsInCompilation")
#write_sheet_retry(qc2w,ss = qcId, sheet = 1)
googledrive::drive_rename(googledrive::as_id(qcId),name = stringr::str_c(project," v.",projVersion," QC sheet"))
#daff::render_diff(diff,file = file.path(webDirectory,project,projVersion,"metadataChangelog.html"),title = paste("Metadata changelog:",project,projVersion),view = FALSE)
#googledrive::drive_update(file = googledrive::as_id(lastUpdateId),media = file.path(webDirectory,project,"newLastUpdate.csv"))
#newName <- stringr::str_c(project," v.",projVersion," QC sheet")
#googledrive::drive_update(file = googledrive::as_id(qcId),media = file.path(webDirectory,project,"newLastUpdate.csv"),name = newName)
#remove unneeded data
neededVariablesMovingForward <- c("dsidKey",
"webDirectory",
"dsnInComp",
"project",
"lastVersionNumber",
"DF",
"projVersion",
"webDirectory",
"googEmail",
"versionMetaId",
"filesToUltimatelyDelete",
"lipdDir")
vToRemove <- names(data)[!names(data) %in% neededVariablesMovingForward]
for(v2r in vToRemove){
data[v2r] <- NULL
}
return(data)
}
#' Finalize
#'
#' @param params
#' @param data
#'
#' @return
#' @export
finalize <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#assignVariablesFromList(data)
for(i in 1:length(data)){
assign(names(data)[i],data[[i]])
}
#8 finalize and write lipd files
#DF <- purrr::map(DF,removeEmptyPubs)
#9 update the google version file
versionDf <- read_sheet_retry(googledrive::as_id(versionMetaId),col_types = "cdddccccc")
#versionDf <- read_sheet_retry(googledrive::as_id(versionMetaId))
versionDf$versionCreated <- lubridate::ymd_hms(versionDf$versionCreated)
newRow <- versionDf[1,]
newRow$project <- project
pdm <- as.numeric(unlist(str_split(projVersion,"_")))
newRow$publication <- pdm[1]
newRow$dataset <- pdm[2]
newRow$metadata <- pdm[3]
newRow$dsns <- paste(unique(dsnInComp),collapse = "|")
newRow$versionCreated <- lubridate::ymd_hms(lubridate::now(tzone = "UTC"))
newRow$`zip MD5` <- directoryMD5(lipdDir)
#check for differences in dsns
dsndiff <- filter(versionDf,project == (!!project)) %>%
filter(versionCreated == max(versionCreated,na.rm = TRUE))
lastVersionNumber <- paste(dsndiff[1,2:4],collapse = "_")
oldDsns <- stringr::str_split(dsndiff$dsns,pattern = "[|]",simplify = T)
newDsns <- stringr::str_split(newRow$dsns,pattern = "[|]",simplify = T)
newRow$`dataSets removed` <- paste(setdiff(oldDsns,newDsns),collapse = "|")
newRow$`dataSets added` <- paste(setdiff(newDsns,oldDsns),collapse = "|")
nvdf <- dplyr::bind_rows(versionDf,newRow)
nvdf$versionCreated <- as.character(nvdf$versionCreated)
readr::write_csv(nvdf,file = file.path(tempdir(),"versTemp.csv"))
data$lastVersionNumber <- lastVersionNumber
#remove unneeded data
neededVariablesMovingForward <- c("dsidKey",
"webDirectory",
"project",
"lastVersionNumber",
"DF",
"projVersion",
"webDirectory",
"googEmail",
"versionMetaId",
"filesToUltimatelyDelete",
"lipdDir")
vToRemove <- names(data)[!names(data) %in% neededVariablesMovingForward]
for(v2r in vToRemove){
data[v2r] <- NULL
}
return(data)
}
#' Log changes and update
#'
#' @param params
#' @param data
#'
#' @return
#' @export
changeloggingAndUpdating <- function(params,data){
#assignVariablesFromList(params)
for(i in 1:length(params)){
assign(names(params)[i],params[[i]])
}
#assignVariablesFromList(data)
for(i in 1:length(data)){
assign(names(data)[i],data[[i]])
}
#write project changelog
#get last project's data. Try serialiation first:
lastSerial <- try(load(file.path(webDirectory,project,lastVersionNumber,paste0(project,lastVersionNumber,".RData"))),silent = TRUE)
if(!is(lastSerial,"try-error")){
Dpo <- D
}else{#try to load from lipd
Dpo <- readLipd(file.path(webDirectory,project,lastVersionNumber))
}
if(length(Dpo)>0){
createProjectChangelog(Dold = Dpo,
Dnew = DF,
proj = project,
projVersOld = lastVersionNumber,
projVersNew = projVersion,
webDirectory = webDirectory,
notesTib = dsidKey)
}else{#write empty changelog
cle <- glue::glue("## Changelog is empty - probably because there were no files in the web directory for {project} version {lastVersionNumber}")
readr::write_file(cle,file.path(webDirectory,project,projVersion,"changelogEmpty.Rmd"))
rmarkdown::render(file.path(webDirectory,project,projVersion,"changelogEmpty.Rmd"),
output_file = file.path(webDirectory,project,projVersion,"changelogSummary.html"))
rmarkdown::render(file.path(webDirectory,project,projVersion,"changelogEmpty.Rmd"),
output_file = file.path(webDirectory,project,projVersion,"changelogDetail.html"))
}
vt <- readr::read_csv(file.path(tempdir(),"versTemp.csv"),col_types = "cdddccccc")
googlesheets4::gs4_auth(email = googEmail,cache = ".secret")
wrote <- try(write_sheet_retry(vt,ss = versionMetaId,sheet = 1))
if(is(wrote,"try-error")){
print("failed to write lipdverse versioning - do this manually")
}
#update datasetId information
updateDatasetIdDereferencer(DF,
compilation = project,
version = projVersion,
dateUpdated = lubridate::today())
#update vocab
try(updateVocabWebsites())
#give permissions back
#drive_share(as_id(qcId),role = "writer", type = "user",emailAddress = "")
#update the files
unlink(file.path(webDirectory,project,"current_version"),force = TRUE,recursive = TRUE)
dir.create(file.path(webDirectory,project,"current_version"))
file.copy(file.path(webDirectory,project,projVersion,.Platform$file.sep), file.path(webDirectory,project,"current_version",.Platform$file.sep), recursive=TRUE,overwrite = TRUE)
file.copy(file.path(webDirectory,project,projVersion,str_c(project,projVersion,".zip")),
file.path(webDirectory,project,"current_version","current_version.zip"),overwrite = TRUE)
unlink(x = filesToUltimatelyDelete,force = TRUE, recursive = TRUE)
writeLipd(DF,path = lipdDir,removeNamesFromLists = TRUE)
unFlagUpdate()
}
#' create serializations of a database in R, matlab and python
#'
#' @param D
#' @param matlabUtilitiesPath
#' @param matlabPath
#' @param webDirectory
#' @param project
#' @param projVersion
#' @param python3Path
#'
#' @import stringr
#' @import lipdR
#' @import readr
#' @description creates serialization; requires that Matlab and Python be installed, along with lipd utilities for those languages.
#' @return
#' @export
createSerializations <- function(D,
webDirectory,
project,
projVersion,
remove.ensembles = TRUE,
matlabUtilitiesPath = "/Volumes/data/GitHub/LiPD-utilities/Matlab",
matlabPath = "/Applications/MATLAB_R2021b.app/bin/matlab",
python3Path="/Users/nicholas/opt/anaconda3/envs/pyleo/bin/python3"){
#create serializations for web
#R
if(remove.ensembles){
Do <- D
D <- purrr::map(D,removeEnsembles)
}
if(object.size(Do) > object.size(D)){
has.ensembles <- TRUE
}else{
has.ensembles <- FALSE
}
TS <- extractTs(D)
#sTS <- splitInterpretationByScope(TS)
save(list = c("D","TS"),file = file.path(webDirectory,project,projVersion,stringr::str_c(project,projVersion,".RData")))
#write files to a temporary directory
lpdtmp <- file.path(tempdir(),"lpdTempSerialization")
unlink(lpdtmp,recursive = TRUE)
dir.create(lpdtmp)
writeLipd(D,path = lpdtmp)
#zip it
zip(zipfile = file.path(webDirectory,project,projVersion,str_c(project,projVersion,".zip")),files = list.files(lpdtmp,pattern= "*.lpd",full.names = TRUE),extras = '-j')
if(has.ensembles){
print("writing again with ensembles")
TS <- extractTs(Do)
#sTS <- splitInterpretationByScope(TS)
save(list = c("D","TS"),file = file.path(webDirectory,project,projVersion,stringr::str_c(project,projVersion,"-ensembles.RData")))
#write files to a temporary directory
lpdtmpens <- file.path(tempdir(),"lpdTempSerializationEnsembles")
unlink(lpdtmpens,recursive = TRUE)
dir.create(lpdtmpens)
writeLipd(Do,path = lpdtmpens)
#zip it
zip(zipfile = file.path(webDirectory,project,projVersion,str_c(project,projVersion,"-ensembles.zip")),files = list.files(lpdtmpens,pattern= "*.lpd",full.names = TRUE))
}
#matlab
mfile <- stringr::str_c("addpath(genpath('",matlabUtilitiesPath,"'));\n") %>%
stringr::str_c("D = readLiPD('",lpdtmp,"');\n") %>%
stringr::str_c("TS = extractTs(D);\n") %>%
stringr::str_c("sTS = splitInterpretationByScope(TS);\n") %>%
stringr::str_c("save ",file.path(webDirectory,project,projVersion,stringr::str_c(project,projVersion,".mat")),' D TS sTS\n') %>%
stringr::str_c("exit")
#write the file
readr::write_file(mfile,path = file.path(webDirectory,project,projVersion,"createSerialization.m"))
#run the file
try(system(stringr::str_c(matlabPath," -nodesktop -nosplash -nodisplay -r \"run('",file.path(webDirectory,project,projVersion,"createSerialization.m"),"')\"")))
#Python
pyfile <- "import lipd\n" %>%
stringr::str_c("import pickle\n") %>%
stringr::str_c("D = lipd.readLipd('",lpdtmp,"/')\n") %>%
stringr::str_c("TS = lipd.extractTs(D)\n") %>%
stringr::str_c("filetosave = open('",file.path(webDirectory,project,projVersion,stringr::str_c(project,projVersion,".pkl'")),",'wb')\n") %>%
stringr::str_c("all_data = {}\n") %>%
stringr::str_c("all_data['D'] = D\n") %>%
stringr::str_c("all_data['TS'] = TS\n") %>%
stringr::str_c("pickle.dump(all_data, filetosave,protocol = 2)\n") %>%
stringr::str_c("filetosave.close()")
#write the file
readr::write_file(pyfile,path = file.path(webDirectory,project,projVersion,"createSerialization.py"))
#run the file
try(system(stringr::str_c(python3Path, " ",file.path(webDirectory,project,projVersion,"createSerialization.py"))))
}
|
## Functions for caching and accessing the inverse of a matrix.
## Creates a special matrix allowing the value of its inverse to be cached.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## Takes a special matrix and returns its inverse.
## If the inverse has already been cached then the cached value is returned.
## If not then the inverse is calculated, returned and cached.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("Getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
| /cachematrix.R | no_license | elainebettaney/ProgrammingAssignment2 | R | false | false | 903 | r | ## Functions for caching and accessing the inverse of a matrix.
## Creates a special matrix allowing the value of its inverse to be cached.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## Takes a special matrix and returns its inverse.
## If the inverse has already been cached then the cached value is returned.
## If not then the inverse is calculated, returned and cached.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("Getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
#' @title dfo.rv.analysis
#' @description Stratified analysis of DFO lobster data with bootstrapped resampling and set-up the data for sensitivity analysis
#' @param \code{DS} :the selection of analysis, options include \code{stratified.estimates}
#' @param \code{out.dir} : specify the location of data saves, default is null and uses the project.datadirectory function as default
#' @param \code{p} : the parameter list which contains the specifics of the analysis at a minimum includes the season and area for analysis
#' @return saves or loads .rdata objects
#' @examples
#' require(devtools)
#' load_all('E:/git/LobsterScience/bio.lobster') #to load from directory rather than package if modifying
#' dfo.rv.analysis(DS = 'stratified.estimates')
#' @author Adam Cook, \email{Adam.Cook@@dfo-mpo.gc.ca}
#' @export
dfo.rv.analysis <- function(DS='stratified.estimates', out.dir = 'bio.lobster', p=p, ip=NULL,save=T) {
loc = file.path( project.datadirectory(out.dir), "analysis" )
dir.create( path=loc, recursive=T, showWarnings=F )
props = 1
if(p$series=='summer') {mns = c('June','July','August') ; strat = c(440:495)}
if(p$series=='georges') {mns = c('February','March','April'); strat = c('5Z1','5Z2','5Z3','5Z4','5Z5','5Z6','5Z7','5Z8','5Z9')}
if(p$area=='Georges.Canada' & p$series == 'georges') {strat = c('5Z1','5Z2') }
if(p$area=='Georges.US' & p$series =='georges') {strat = c('5Z3','5Z4','5Z5','5Z6','5Z7','5Z8')}
if(p$area== 'LFA41' & p$series =='summer') {strat = c(472,473,477,478,481,482,483,484,485,480); props = 1}
if(p$area== 'LFA41' & p$series =='summer' & p$define.by.polygons) {strat = c(472,473,477,478,481,482,483,484,485); props = c(0.2196,0.4415,0.7593,0.7151,0.1379,0.6991,0.8869,0.50897,0.070409)}
if(p$area== 'adjacentLFA41' & p$series =='summer') {strat = c(472,473,477,478,481,482,483,484,485,480); props = 1-c(0.2196,0.4415,0.7593,0.7151,0.1379,0.6991,0.8869,0.50897,0.070409,0)}
if(p$lobster.subunits==T &p$area=='Georges.Basin' & p$series=='summer') {strat = c(482,483); props = c(0.1462, 0.2696)}
if(p$lobster.subunits==T &p$area=='Crowell.Basin' & p$series=='summer') {strat = c(482,483,484,485); props = c(0.1963,0.1913,0.3935,0.0483)}
if(p$lobster.subunits==T &p$area=='SE.Browns' & p$series=='summer') {strat = c(472,473,475,477,478,481,482); props = c(0.2196,0.4415,0.00202,0.7592,0.7151,0.0868,0.0871)}
if(p$lobster.subunits==T &p$area=='SW.Browns' & p$series=='summer') {strat = c(481,482,483,484,485); props=c(0.0509,0.2684,0.4358,0.1143,0.02197)}
if(p$lobster.subunits==T & p$area=='Georges.Bank' & p$series=='georges') {strat = c('5Z1','5Z2'); props = c(0.6813, 0.5474)}
if(p$lobster.subunits==T &p$area=='Georges.Basin' & p$series=='georges') {strat = c('5Z1','5Z2'); props = c(0.3187, 0.4537)}
if(p$area == 'custom') {strat = p$strat; props=rep(1,length(strat))}
if (exists( "libs", p)) {
p0 = p;
# RLibrary( p$libs )
p=p0
}
# if (exists( "libs", p)) RLibrary( p$libs )
if (is.null(ip)) ip = 1:p$nruns
if(DS %in% c('species.set.data')) {
outa = NULL
a = dir(loc)
a = a[grep('strata.files',a)]
a = a[grep(paste(p$species,collapse="|"),a)]
if(exists('strata.files.return',p)){
it = grep(paste(p$size.class,collapse="-"),a)
load(file.path(loc,a[it]))
return(strata.files)
}
for(op in a) {
load(file.path(loc,op))
al = lapply(strata.files,"[[",2)
al = do.call('rbind',al)
al$Sp= strsplit(op,"\\.")[[1]][3]
b = strsplit(op,"\\.")
b = b[[1]][grep('length',b[[1]])+1]
al = rename.df(al,c('totwgt','totno'),c(paste('totwgt',b,sep="."),paste('totno',b,sep=".")))
if(is.null(outa)) {outa = rbind(al,outa)
} else {
outa = merge(outa,al[,c('mission','setno',paste('totwgt',b,sep="."),paste('totno',b,sep="."))],by=c('mission','setno'))
}
}
return(outa)
}
if(DS %in% c('stratified.estimates','stratified.estimates.redo')) {
if(DS=='stratified.estimates'){
outa = NULL
a = dir(loc)
a = a[grep('stratified',a)]
a = a[grep(p$area,a)]
a = a[grep(p$series,a)]
if(p$length.based) {
a = a[grep(p$size.class[1],a)]
a = a[grep(p$size.class[2],a)]
}
if(p$by.sex) {
k = ifelse(p$sex==1,'male',ifelse(p$sex==2,'female','berried'))
a = a[grep(k,a)]
}
load(file.path(loc,a))
return(out)
}
set = groundfish.db(DS='gsinf.odbc')
cas = groundfish.db(DS='gscat.odbc')
stra = groundfish.db(DS='gsstratum')
de = groundfish.db(DS='gsdet.odbc')
set$X = convert.dd.dddd(set$slong) *-1
set$Y = convert.dd.dddd(set$slat)
stra$NH = as.numeric(stra$area)/0.011801
ii = which(months(set$sdate) %in% mns & set$strat %in% strat & set$type %in% c(1,5))
print('Both set types 1 and 5 are saved in data frame but only 1 is used for stratified')
set = set[ii,]
io = which(is.na(cas$totwgt) | cas$totwgt==0 & cas$totno>0)
cas[io,'totwgt'] <- 1
io = which(is.na(cas$totno) & !is.na(cas$totwgt))
cas[io,'totno'] = cas[io,'totwgt']/0.806 #mean weight of individual per tow taken from 1999 to 2015
io = which(is.na(cas$sampwgt) & !is.na(cas$totwgt))
cas[io,'sampwgt'] <- cas[io,'totwgt']
strata.files = list()
out = data.frame(yr=NA,w.yst=NA,w.yst.se=NA,w.ci.yst.l=NA,w.ci.yst.u=NA,w.Yst=NA,w.ci.Yst.l=NA,w.ci.Yst.u=NA,n.yst=NA,n.yst.se=NA,n.ci.yst.l=NA,n.ci.yst.u=NA,n.Yst=NA,n.ci.Yst.l=NA,n.ci.Yst.u=NA,dwao=NA,Nsets=NA,NsetswithLobster=NA,ObsLobs = NA,gini = NA,gini.lo =NA, gini.hi=NA)
big.out = matrix(NA,nrow=p$nruns,ncol=length(seq(0.01,0.99,0.01))+1)
mp=0
np=1
effic.out = data.frame(yr=NA,strat.effic.wt=NA,alloc.effic.wt=NA,strat.effic.n=NA,alloc.effic.n=NA)
nopt.out = list()
for(iip in ip) {
mp = mp+1
yr = p$runs[iip,"yrs"]
print ( p$runs[iip,] )
iy = which(year(set$sdate) %in% yr)
iv = which(cas$spec==2550)
pi='base'
if(p$define.by.polygons) {
l = l41 = read.csv(file.path(project.datadirectory('bio.lobster'),'data','maps','LFA41Offareas.csv'))
pi = 'restratified'
if(p$lobster.subunits) {
l = l41[which(l41$OFFAREA == p$area),]
} else {
print('All LFA41 subsetted by LFA Area')
l41 = joinPolys(as.PolySet(l41),operation='UNION')
attr(l41,'projection') <- 'LL'
l41 = subset(l41, SID==1)
}
set$EID = 1:nrow(set)
a = findPolys(set,l)
iz = which(set$EID %in% a$EID)
if(p$area=='adjacentLFA41') {
iz = which(set$EID %ni% a$EID)
ir = which(set$strat %in% c(strat))
iz = intersect(iz,ir)
}
} else {
iz = which(set$strat %in% c(strat))
}
se = set[intersect(iy,iz),]
se$EID = 1:nrow(se)
ca = cas[iv,]
se$z = (se$dmin+se$dmax) / 2 * 1.8288 #from fm to m
vars.2.keep = c('mission','X','Y','setno','sdate','dist','strat','z','bottom_temperature','bottom_salinity','type')
se = se[,vars.2.keep]
p$lb = p$length.based
if(p$by.sex & !p$length.based) {p$size.class=c(0,1000); p$length.based=T}
if(!p$lb) { vars.2.keep =c('mission','setno','totwgt','totno','size_class','spec')
ca = ca[,vars.2.keep]
}
if(p$length.based){
dp = de[which(de$spec %in% 2550),]
ids = paste(se$mission,se$setno,sep="~")
dp$ids = paste(dp$mission,dp$setno,sep="~")
dp = dp[which(dp$ids %in% ids),]
flf = p$size.class[1]:p$size.class[2]
dp$clen2 = ifelse(dp$flen %in% flf,dp$clen,0)
if(p$by.sex) dp$clen2 = ifelse(dp$fsex %in% p$sex, dp$clen2, 0)
if(any(!is.finite(dp$fwt))) {
io = which(!is.finite(dp$fwt))
fit = nls(fwt~a*flen^b,de[which(de$spec==2550 & is.finite(de$fwt)),],start=list(a=0.001,b=3.3))
ab = coef(fit)
dp$fwt[io] = ab[1]*dp$flen[io]^ab[2]
}
dp$pb = dp$fwt * dp$clen
dp$pb1 = dp$fwt * dp$clen2
dpp = data.frame(mission=NA,setno=NA,size_class=NA,pn=NA,pw=NA)
if(nrow(dp)>0) {
dpp = aggregate(cbind(clen,clen2,pb,pb1)~mission+setno+size_class,data=dp,FUN=sum)
dpp$pn = dpp$clen2/dpp$clen
dpp$pw = dpp$pb1/dpp$pb
dpp = dpp[,c('mission','setno','size_class','pn','pw')]
}
ca1 = merge(ca,dpp,by=c('mission','setno','size_class'))
ca1$totwgt = ca1$totwgt * ca1$pw
ca1$totno = ca1$totno * ca1$pn
vars.2.keep =c('mission','setno','totwgt','totno','size_class','spec')
ca = ca1[,vars.2.keep]
}
if(p$vessel.correction) {
ca$id = ca$mission
if(!exists('vessel.correction.fixed',p)) {
ca = correct.vessel(ca)
ca$totwgt = ca$totwgt * ca$cfvessel
ca$totno = ca$totno * ca$cfvessel
print('Totno and Totwgt are adjusted by Fannings Conversion Factors')
}
if(exists('vessel.correction.fixed',p) & yr %in% 1970:1981) {
ca$totwgt = ca$totwgt * p$vessel.correction.fixed
ca$totno = ca$totno * p$vessel.correction.fixed
print(paste('Totno and Totwgt are adjusted by Conversion Factor of',p$vessel.correction.fixed))
} else {
print('Into Needler Years No Need for Vessel Correction')
}
}
if(nrow(ca)>=1) {
ca = aggregate(cbind(totwgt,totno)~mission+setno,data=ca,FUN=sum)
sc = merge(se,ca,by=c('mission','setno'),all.x=T)
sc[,c('totwgt','totno')] = na.zero(sc[,c('totwgt','totno')])
sc$totno = sc$totno * 1.75 / sc$dist
sc$totwgt = sc$totwgt * 1.75 / sc$dist
io = which(stra$strat %in% unique(sc$strat))
st = stra[io,c('strat','NH')]
st = st[order(st$strat),]
st$Strata = st$strat
spr = data.frame(Strata = strat, Pr = props)
st = merge(st,spr)
if(p$reweight.strata) st$NH = st$NH * st$Pr #weights the strata based on area in selected region
if(exists('temperature',p)) {sc = sc[!is.na(sc$bottom_temperature),] ; sc$totno = sc$bottom_temperature; sc$totwgt = sc$bottom_temperature }
if(nrow(sc)>0){
st = Prepare.strata.file(st)
sc1= sc
sc = sc[which(sc$type==1),]
sc = Prepare.strata.data(sc)
strata.files[[mp]] = list(st,sc1)
sW = Stratify(sc,st,sc$totwgt)
sN = Stratify(sc,st,sc$totno)
ssW = summary(sW)
ssN = summary(sN)
if(p$strata.efficiencies) {
ssW = summary(sW,effic=T,nopt=T)
ssN = summary(sN,effic=T,nopt=T)
effic.out[mp,] = c(yr,ssW$effic.str,ssW$effic.alloc,ssN$effic.str,ssN$effic.alloc)
nopt.out[[mp]] = list(yr,ssW$n.opt,ssN$n.opt)
}
if(!p$strata.efficiencies) {
bsW = list(NA,NA,NA)
bsN = list(NA,NA,NA)
nt = NA
if(p$bootstrapped.ci) {
bsW = summary(boot.strata(sW,method='BWR',nresamp=1000),ci.method='BC')
bsN = summary(boot.strata(sN,method='BWR',nresamp=1000),ci.method='BC')
nt = sum(sW$Nh)/1000
}
if(exists('big.ci',p)) {
big.out[mp,] = c(yr,summary(boot.strata(sN,method='BWR',nresamp=1000),ci.method='BC',big.ci=T))
}
out[mp,] = c(yr,ssW[[1]],ssW[[2]],bsW[[1]][1],bsW[[1]][2],ssW[[3]]/1000,bsW[[1]][1]*nt,bsW[[1]][2]*nt,
ssN[[1]],ssN[[2]],bsN[[1]][1],bsN[[1]][2],ssN[[3]]/1000,bsN[[1]][1]*nt,bsN[[1]][2]*nt,ssW$dwao,sum(sW[['nh']]),sum(sW[['nhws']]),round(sum(sc$totno)),ssN$gini,bsN[[2]][1],bsN[[2]][2])
print(out[mp,'yr'])
} else {
out[mp,] = c(yr,rep(0,22))
print(out[mp,'yr'])
}
}
}
}
if(p$strata.efficiencies) {
return(list(effic.out,nopt.out))
}
if(exists('big.ci',p)) {
return(big.out)
}
lle = 'all'
lbs = 'not'
if(p$length.based) lle = paste(p$size.class[1],p$size.class[2],sep="-")
if(p$by.sex) lbs = ifelse(p$sex==1,'male',ifelse(p$sex==2,'female','berried'))
if(length(lbs)>1) lbs = paste(lbs[1],lbs[2],sep='&')
fn = paste('stratified',p$series,p$area,pi,'length',lle,lbs,'sexed','rdata',sep=".")
fn.st = paste('strata.files',p$series,p$area,pi,'length',lle,lbs,'sexed','rdata',sep=".")
if(save) {
print(fn)
save(out,file=file.path(loc,fn))
save(strata.files,file=file.path(loc,fn.st))
}
if(p$strata.files.return) return(strata.files)
return(out)
}
}
| /R/dfo.rv.analysis.r | no_license | gomezcatalina/bio.lobster | R | false | false | 15,032 | r | #' @title dfo.rv.analysis
#' @description Stratified analysis of DFO lobster data with bootstrapped resampling and set-up the data for sensitivity analysis
#' @param \code{DS} :the selection of analysis, options include \code{stratified.estimates}
#' @param \code{out.dir} : specify the location of data saves, default is null and uses the project.datadirectory function as default
#' @param \code{p} : the parameter list which contains the specifics of the analysis at a minimum includes the season and area for analysis
#' @return saves or loads .rdata objects
#' @examples
#' require(devtools)
#' load_all('E:/git/LobsterScience/bio.lobster') #to load from directory rather than package if modifying
#' dfo.rv.analysis(DS = 'stratified.estimates')
#' @author Adam Cook, \email{Adam.Cook@@dfo-mpo.gc.ca}
#' @export
dfo.rv.analysis <- function(DS='stratified.estimates', out.dir = 'bio.lobster', p=p, ip=NULL,save=T) {
loc = file.path( project.datadirectory(out.dir), "analysis" )
dir.create( path=loc, recursive=T, showWarnings=F )
props = 1
if(p$series=='summer') {mns = c('June','July','August') ; strat = c(440:495)}
if(p$series=='georges') {mns = c('February','March','April'); strat = c('5Z1','5Z2','5Z3','5Z4','5Z5','5Z6','5Z7','5Z8','5Z9')}
if(p$area=='Georges.Canada' & p$series == 'georges') {strat = c('5Z1','5Z2') }
if(p$area=='Georges.US' & p$series =='georges') {strat = c('5Z3','5Z4','5Z5','5Z6','5Z7','5Z8')}
if(p$area== 'LFA41' & p$series =='summer') {strat = c(472,473,477,478,481,482,483,484,485,480); props = 1}
if(p$area== 'LFA41' & p$series =='summer' & p$define.by.polygons) {strat = c(472,473,477,478,481,482,483,484,485); props = c(0.2196,0.4415,0.7593,0.7151,0.1379,0.6991,0.8869,0.50897,0.070409)}
if(p$area== 'adjacentLFA41' & p$series =='summer') {strat = c(472,473,477,478,481,482,483,484,485,480); props = 1-c(0.2196,0.4415,0.7593,0.7151,0.1379,0.6991,0.8869,0.50897,0.070409,0)}
if(p$lobster.subunits==T &p$area=='Georges.Basin' & p$series=='summer') {strat = c(482,483); props = c(0.1462, 0.2696)}
if(p$lobster.subunits==T &p$area=='Crowell.Basin' & p$series=='summer') {strat = c(482,483,484,485); props = c(0.1963,0.1913,0.3935,0.0483)}
if(p$lobster.subunits==T &p$area=='SE.Browns' & p$series=='summer') {strat = c(472,473,475,477,478,481,482); props = c(0.2196,0.4415,0.00202,0.7592,0.7151,0.0868,0.0871)}
if(p$lobster.subunits==T &p$area=='SW.Browns' & p$series=='summer') {strat = c(481,482,483,484,485); props=c(0.0509,0.2684,0.4358,0.1143,0.02197)}
if(p$lobster.subunits==T & p$area=='Georges.Bank' & p$series=='georges') {strat = c('5Z1','5Z2'); props = c(0.6813, 0.5474)}
if(p$lobster.subunits==T &p$area=='Georges.Basin' & p$series=='georges') {strat = c('5Z1','5Z2'); props = c(0.3187, 0.4537)}
if(p$area == 'custom') {strat = p$strat; props=rep(1,length(strat))}
if (exists( "libs", p)) {
p0 = p;
# RLibrary( p$libs )
p=p0
}
# if (exists( "libs", p)) RLibrary( p$libs )
if (is.null(ip)) ip = 1:p$nruns
if(DS %in% c('species.set.data')) {
outa = NULL
a = dir(loc)
a = a[grep('strata.files',a)]
a = a[grep(paste(p$species,collapse="|"),a)]
if(exists('strata.files.return',p)){
it = grep(paste(p$size.class,collapse="-"),a)
load(file.path(loc,a[it]))
return(strata.files)
}
for(op in a) {
load(file.path(loc,op))
al = lapply(strata.files,"[[",2)
al = do.call('rbind',al)
al$Sp= strsplit(op,"\\.")[[1]][3]
b = strsplit(op,"\\.")
b = b[[1]][grep('length',b[[1]])+1]
al = rename.df(al,c('totwgt','totno'),c(paste('totwgt',b,sep="."),paste('totno',b,sep=".")))
if(is.null(outa)) {outa = rbind(al,outa)
} else {
outa = merge(outa,al[,c('mission','setno',paste('totwgt',b,sep="."),paste('totno',b,sep="."))],by=c('mission','setno'))
}
}
return(outa)
}
if(DS %in% c('stratified.estimates','stratified.estimates.redo')) {
if(DS=='stratified.estimates'){
outa = NULL
a = dir(loc)
a = a[grep('stratified',a)]
a = a[grep(p$area,a)]
a = a[grep(p$series,a)]
if(p$length.based) {
a = a[grep(p$size.class[1],a)]
a = a[grep(p$size.class[2],a)]
}
if(p$by.sex) {
k = ifelse(p$sex==1,'male',ifelse(p$sex==2,'female','berried'))
a = a[grep(k,a)]
}
load(file.path(loc,a))
return(out)
}
set = groundfish.db(DS='gsinf.odbc')
cas = groundfish.db(DS='gscat.odbc')
stra = groundfish.db(DS='gsstratum')
de = groundfish.db(DS='gsdet.odbc')
set$X = convert.dd.dddd(set$slong) *-1
set$Y = convert.dd.dddd(set$slat)
stra$NH = as.numeric(stra$area)/0.011801
ii = which(months(set$sdate) %in% mns & set$strat %in% strat & set$type %in% c(1,5))
print('Both set types 1 and 5 are saved in data frame but only 1 is used for stratified')
set = set[ii,]
io = which(is.na(cas$totwgt) | cas$totwgt==0 & cas$totno>0)
cas[io,'totwgt'] <- 1
io = which(is.na(cas$totno) & !is.na(cas$totwgt))
cas[io,'totno'] = cas[io,'totwgt']/0.806 #mean weight of individual per tow taken from 1999 to 2015
io = which(is.na(cas$sampwgt) & !is.na(cas$totwgt))
cas[io,'sampwgt'] <- cas[io,'totwgt']
strata.files = list()
out = data.frame(yr=NA,w.yst=NA,w.yst.se=NA,w.ci.yst.l=NA,w.ci.yst.u=NA,w.Yst=NA,w.ci.Yst.l=NA,w.ci.Yst.u=NA,n.yst=NA,n.yst.se=NA,n.ci.yst.l=NA,n.ci.yst.u=NA,n.Yst=NA,n.ci.Yst.l=NA,n.ci.Yst.u=NA,dwao=NA,Nsets=NA,NsetswithLobster=NA,ObsLobs = NA,gini = NA,gini.lo =NA, gini.hi=NA)
big.out = matrix(NA,nrow=p$nruns,ncol=length(seq(0.01,0.99,0.01))+1)
mp=0
np=1
effic.out = data.frame(yr=NA,strat.effic.wt=NA,alloc.effic.wt=NA,strat.effic.n=NA,alloc.effic.n=NA)
nopt.out = list()
for(iip in ip) {
mp = mp+1
yr = p$runs[iip,"yrs"]
print ( p$runs[iip,] )
iy = which(year(set$sdate) %in% yr)
iv = which(cas$spec==2550)
pi='base'
if(p$define.by.polygons) {
l = l41 = read.csv(file.path(project.datadirectory('bio.lobster'),'data','maps','LFA41Offareas.csv'))
pi = 'restratified'
if(p$lobster.subunits) {
l = l41[which(l41$OFFAREA == p$area),]
} else {
print('All LFA41 subsetted by LFA Area')
l41 = joinPolys(as.PolySet(l41),operation='UNION')
attr(l41,'projection') <- 'LL'
l41 = subset(l41, SID==1)
}
set$EID = 1:nrow(set)
a = findPolys(set,l)
iz = which(set$EID %in% a$EID)
if(p$area=='adjacentLFA41') {
iz = which(set$EID %ni% a$EID)
ir = which(set$strat %in% c(strat))
iz = intersect(iz,ir)
}
} else {
iz = which(set$strat %in% c(strat))
}
se = set[intersect(iy,iz),]
se$EID = 1:nrow(se)
ca = cas[iv,]
se$z = (se$dmin+se$dmax) / 2 * 1.8288 #from fm to m
vars.2.keep = c('mission','X','Y','setno','sdate','dist','strat','z','bottom_temperature','bottom_salinity','type')
se = se[,vars.2.keep]
p$lb = p$length.based
if(p$by.sex & !p$length.based) {p$size.class=c(0,1000); p$length.based=T}
if(!p$lb) { vars.2.keep =c('mission','setno','totwgt','totno','size_class','spec')
ca = ca[,vars.2.keep]
}
if(p$length.based){
dp = de[which(de$spec %in% 2550),]
ids = paste(se$mission,se$setno,sep="~")
dp$ids = paste(dp$mission,dp$setno,sep="~")
dp = dp[which(dp$ids %in% ids),]
flf = p$size.class[1]:p$size.class[2]
dp$clen2 = ifelse(dp$flen %in% flf,dp$clen,0)
if(p$by.sex) dp$clen2 = ifelse(dp$fsex %in% p$sex, dp$clen2, 0)
if(any(!is.finite(dp$fwt))) {
io = which(!is.finite(dp$fwt))
fit = nls(fwt~a*flen^b,de[which(de$spec==2550 & is.finite(de$fwt)),],start=list(a=0.001,b=3.3))
ab = coef(fit)
dp$fwt[io] = ab[1]*dp$flen[io]^ab[2]
}
dp$pb = dp$fwt * dp$clen
dp$pb1 = dp$fwt * dp$clen2
dpp = data.frame(mission=NA,setno=NA,size_class=NA,pn=NA,pw=NA)
if(nrow(dp)>0) {
dpp = aggregate(cbind(clen,clen2,pb,pb1)~mission+setno+size_class,data=dp,FUN=sum)
dpp$pn = dpp$clen2/dpp$clen
dpp$pw = dpp$pb1/dpp$pb
dpp = dpp[,c('mission','setno','size_class','pn','pw')]
}
ca1 = merge(ca,dpp,by=c('mission','setno','size_class'))
ca1$totwgt = ca1$totwgt * ca1$pw
ca1$totno = ca1$totno * ca1$pn
vars.2.keep =c('mission','setno','totwgt','totno','size_class','spec')
ca = ca1[,vars.2.keep]
}
if(p$vessel.correction) {
ca$id = ca$mission
if(!exists('vessel.correction.fixed',p)) {
ca = correct.vessel(ca)
ca$totwgt = ca$totwgt * ca$cfvessel
ca$totno = ca$totno * ca$cfvessel
print('Totno and Totwgt are adjusted by Fannings Conversion Factors')
}
if(exists('vessel.correction.fixed',p) & yr %in% 1970:1981) {
ca$totwgt = ca$totwgt * p$vessel.correction.fixed
ca$totno = ca$totno * p$vessel.correction.fixed
print(paste('Totno and Totwgt are adjusted by Conversion Factor of',p$vessel.correction.fixed))
} else {
print('Into Needler Years No Need for Vessel Correction')
}
}
if(nrow(ca)>=1) {
ca = aggregate(cbind(totwgt,totno)~mission+setno,data=ca,FUN=sum)
sc = merge(se,ca,by=c('mission','setno'),all.x=T)
sc[,c('totwgt','totno')] = na.zero(sc[,c('totwgt','totno')])
sc$totno = sc$totno * 1.75 / sc$dist
sc$totwgt = sc$totwgt * 1.75 / sc$dist
io = which(stra$strat %in% unique(sc$strat))
st = stra[io,c('strat','NH')]
st = st[order(st$strat),]
st$Strata = st$strat
spr = data.frame(Strata = strat, Pr = props)
st = merge(st,spr)
if(p$reweight.strata) st$NH = st$NH * st$Pr #weights the strata based on area in selected region
if(exists('temperature',p)) {sc = sc[!is.na(sc$bottom_temperature),] ; sc$totno = sc$bottom_temperature; sc$totwgt = sc$bottom_temperature }
if(nrow(sc)>0){
st = Prepare.strata.file(st)
sc1= sc
sc = sc[which(sc$type==1),]
sc = Prepare.strata.data(sc)
strata.files[[mp]] = list(st,sc1)
sW = Stratify(sc,st,sc$totwgt)
sN = Stratify(sc,st,sc$totno)
ssW = summary(sW)
ssN = summary(sN)
if(p$strata.efficiencies) {
ssW = summary(sW,effic=T,nopt=T)
ssN = summary(sN,effic=T,nopt=T)
effic.out[mp,] = c(yr,ssW$effic.str,ssW$effic.alloc,ssN$effic.str,ssN$effic.alloc)
nopt.out[[mp]] = list(yr,ssW$n.opt,ssN$n.opt)
}
if(!p$strata.efficiencies) {
bsW = list(NA,NA,NA)
bsN = list(NA,NA,NA)
nt = NA
if(p$bootstrapped.ci) {
bsW = summary(boot.strata(sW,method='BWR',nresamp=1000),ci.method='BC')
bsN = summary(boot.strata(sN,method='BWR',nresamp=1000),ci.method='BC')
nt = sum(sW$Nh)/1000
}
if(exists('big.ci',p)) {
big.out[mp,] = c(yr,summary(boot.strata(sN,method='BWR',nresamp=1000),ci.method='BC',big.ci=T))
}
out[mp,] = c(yr,ssW[[1]],ssW[[2]],bsW[[1]][1],bsW[[1]][2],ssW[[3]]/1000,bsW[[1]][1]*nt,bsW[[1]][2]*nt,
ssN[[1]],ssN[[2]],bsN[[1]][1],bsN[[1]][2],ssN[[3]]/1000,bsN[[1]][1]*nt,bsN[[1]][2]*nt,ssW$dwao,sum(sW[['nh']]),sum(sW[['nhws']]),round(sum(sc$totno)),ssN$gini,bsN[[2]][1],bsN[[2]][2])
print(out[mp,'yr'])
} else {
out[mp,] = c(yr,rep(0,22))
print(out[mp,'yr'])
}
}
}
}
if(p$strata.efficiencies) {
return(list(effic.out,nopt.out))
}
if(exists('big.ci',p)) {
return(big.out)
}
lle = 'all'
lbs = 'not'
if(p$length.based) lle = paste(p$size.class[1],p$size.class[2],sep="-")
if(p$by.sex) lbs = ifelse(p$sex==1,'male',ifelse(p$sex==2,'female','berried'))
if(length(lbs)>1) lbs = paste(lbs[1],lbs[2],sep='&')
fn = paste('stratified',p$series,p$area,pi,'length',lle,lbs,'sexed','rdata',sep=".")
fn.st = paste('strata.files',p$series,p$area,pi,'length',lle,lbs,'sexed','rdata',sep=".")
if(save) {
print(fn)
save(out,file=file.path(loc,fn))
save(strata.files,file=file.path(loc,fn.st))
}
if(p$strata.files.return) return(strata.files)
return(out)
}
}
|
#' trapz: trapezoidal rule to approximate the integral values
#'
#' Returns approximation of integral.
#'
#' @param x A vector with \code{n} elements, \code{x[i]} is a support, \code{i = 1, ..., n}.
#' If \code{y} is \code{NULL}, support is taken as \code{seq(1, length(x), by = 1)}.
#' @param y \code{y[i, j]} is jth values on corresponding value of \code{x[i], i = 1, ..., n}.
#' If \code{y} is vector, the length of \code{y} must be equal to the lenght of \code{x}.
#' If \code{y} is matrix, the number of rows must be equal to the lenght of \code{x}.
#' @return A value, the approximation of integral.
#' @section Reference:
#' Kai Habel, trapz, Octave.
#' @examples
#' # case 1
#' x <- c(1, 4, 9, 16, 25)
#' trapz(x) # 42
#'
#' # case 2
#' x <- matrix(c(1,4,9,16,25,1,8,27,64,125), 5)
#' trapz(x) # 42 162
#' @export
trapz <- function(x, y = NULL){
if (is.null(y)) {
y <- x
x <- switch(is.matrix(x) + 1, {1:length(x)}, {1:nrow(x)})
}
return(as.vector(trapz_cpp(x, as.matrix(y))))
}
# function to perform data binning
#' @importFrom data.table setnames
#' @importFrom utils head
binData <- function(data, numBins){
# check data
assert_that(!is.na(numBins), is.finite(numBins), numBins > 0, numBins - floor(numBins) < 1e-6)
# find the boudaries to split data
boundaries <- seq(min(data$timePnt), max(data$timePnt), length.out = numBins + 1)
# find the middle points to stand time points of binned data
newTimePnts <- head(boundaries, numBins) + diff(boundaries) / 2
# average the data in the interval for data binning
newDataDT <- data %>>% `[`(j = idx_agg := findInterval(timePnt, boundaries, TRUE), by = .(subId,variable)) %>>%
`[`(j = .(value = mean(value), timePnt = newTimePnts[idx_agg]), by = .(subId,variable,idx_agg)) %>>%
`[`(j = idx_agg := NULL)
return(newDataDT)
}
# sub-function for bwCandChooser
#' @importFrom utils head tail
find_max_diff_f <- function(t, lag_n){
assert_that(!is.na(lag_n), is.finite(lag_n), lag_n > 0, lag_n - floor(lag_n) < 1e-6)
sort_t <- sort(t)
n <- length(t)
if (n < lag_n)
return(NA)
if (lag_n > 1)
return(max(tail(sort_t, n - lag_n + 1) - head(sort_t, n - lag_n + 1)))
else
return(max(diff(sort_t))/2)
}
#' Find the candidates of bandwidths for locPoly1d and locLinear2d
#'
#' The difference between \code{bwCandChooser2} and \code{bwCandChooser3} is whether the
#' candidates of bandwidths are the same on the x-axis and y-axis.
#' In our application, \code{bwCandChooser2} is used in finding the candidates of bandwidth of covariance
#' surface and \code{bwCandChooser3} is used in finding the candidates of bandwidth of cross-covariance surface.
#'
#' @param data An data.frame or data.table containing the variables in model.
#' @param id.var A string. The variable name of subject ID.
#' @param timeVarName A string. The variable name of time points.
#' @param sparsity The sparsity of data which is tested by \code{\link{checkSparsity}}.
#' @param kernel A string. It could be 'gauss', 'gaussvar', 'epan' or 'quar'.
#' @param degree An integer, the degree of polynomial.
#' @return The candidates of bandwidths
#' @examples
#' ## examples for bwCandChooser
#' data("regularExData", package = 'rfda')
#' bwCandChooser(regularExData, "sampleID", "t", 2, "gauss", 1)
#' @rdname bwCandChooser
#' @export
bwCandChooser <- function(data, id.var, timeVarName, sparsity, kernel, degree = 1){
# check data
assert_that(is.data.frame(data), is.character(id.var), is.character(timeVarName),
!is.na(sparsity), is.finite(sparsity), sparsity %in% c(0, 1, 2),
kernel %in% c('gauss','epan','gaussvar','quar'),
!is.na(degree), is.finite(degree), degree > 0, degree - floor(degree) < 1e-6)
# get the range of time points
r <- diff(range(data[[timeVarName]]))
# get the minimum bandwidth given sparsity of data
if (sparsity == 0) {
dstar <- find_max_diff_f(data[[timeVarName]], degree + 2)
minBW <- ifelse(!is.na(dstar), ifelse(dstar > r/4, 0.75, 1) * 2.5 * dstar, NA)
} else if (sparsity == 1) {
minBW <- 2.0 * find_max_diff_f(data[[timeVarName]], degree + 1)
} else if (sparsity == 2) {
minBW <- 1.5 * find_max_diff_f(data[[timeVarName]], degree + 1)
}
# use range / 2 if kernel is gaussian and minimum is not found
if ((is.na(minBW) || minBW < 0) && kernel == "gauss") {
message("Data is too sparse, use the range / 2 as minimum bandwidth.")
minBW <- 0.5 * r
} else if ((is.na(minBW) || minBW < 0) && kernel != "gauss") {
stop("Data is too sparse, no suitable bandwidth can be found! Try Gaussian kernel instead!\n");
}
# find the candidates
minBW <- min(minBW, r)
q <- (r / minBW / 4)^(1/9)
return(sort(q^(0:9) * minBW, decreasing = FALSE))
}
#' @param dataAllGrid An data.table containing the grid of timepoints with
#' naming \code{t1} and \code{t2} in model. (see examples.)
#' @examples
#'
#' ## examples for bwCandChooser2
#' require(data.table)
#' require(pipeR)
#'
#' data("sparseExData", package = 'rfda')
#' sparsity <- checkSparsity(sparseExData, "sampleID", "t")
#' sparseExData %>>% data.table %>>% `[`( , .(t1 = rep(t, length(t)),
#' t2 = rep(t, each = length(t))), by = .(sampleID)) %>>%
#' bwCandChooser2(sparsity, "gauss", 1)
#'
#' data("regularExData", package = 'rfda')
#' sparsity <- checkSparsity(regularExData, "sampleID", "t")
#' regularExData %>>% data.table %>>% `[`( , .(t1 = rep(t, length(t)),
#' t2 = rep(t, each = length(t))), by = .(sampleID)) %>>%
#' bwCandChooser2(sparsity, "gauss", 1)
#' @rdname bwCandChooser
#' @importFrom data.table data.table setorder is.data.table
#' @export
bwCandChooser2 <- function(dataAllGrid, sparsity, kernel, degree = 1){
# cehck data
assert_that(is.data.table(dataAllGrid), !is.na(sparsity), is.finite(sparsity), sparsity %in% c(0, 1, 2),
kernel %in% c("gauss", "epan", "gaussvar", "quar"), !is.na(degree), is.finite(degree),
degree > 0, degree - floor(degree) < 1e-6)
# get output grid
xout <- unique(dataAllGrid$t1)
# get range of time points
r <- diff(range(dataAllGrid$t1))
# get the minimum bandwidth given sparsity of data
if (sparsity == 0) {
outGrid <- data.table(expand.grid(t1 = range(xout), t2 = xout))
b <- dataAllGrid[t1 != t2, .(t1, t2)] %>>% rbind(outGrid) %>>% unique %>>% setorder(t2, t1) %>>% `$`(t1)
minBW <- max(find_max_diff_f(xout, degree + 2), max(diff(b)) / 2.0)
} else if (sparsity == 1) {
minBW <- 2.0 * find_max_diff_f(xout, degree + 1)
} else if (sparsity == 2) {
minBW <- 1.5 * find_max_diff_f(xout, degree + 2)
}
# shrink the minimum bandwidth if kernel is gaussian
if (kernel == "gauss") {
if (is.na(minBW) || minBW < 0){
message("Data is too sparse, use the max(t) as minimum bandwidth.")
minBW <- max(xout)
}
minBW <- 0.2 * minBW;
} else if ((is.na(minBW) || minBW < 0) && kernel != "gauss") {
stop("Data is too sparse, no suitable bandwidth can be found! Try Gaussian kernel instead!\n");
}
# find the candidates
minBW <- min(minBW, r / 4)
q <- (r / minBW / 4)^(1/9)
bwMat <- matrix(rep(q^(0:9) * minBW, 2), 10)
return(bwMat[order(bwMat[,1], bwMat[,2], decreasing = FALSE), ])
}
#' @examples
#'
#' ## examples for bwCandChooser3
#' # These examples are demo cases, we does not use this to find the candidates of
#' # bandwidths for smoothing covariance surface.
#' require(data.table)
#' require(pipeR)
#'
#' data("sparseExData", package = 'rfda')
#' sparsity <- checkSparsity(sparseExData, "sampleID", "t")
#' sparseExData %>>% data.table %>>% `[`( , .(t1 = rep(t, length(t)),
#' t2 = rep(t, each = length(t))), by = .(sampleID)) %>>%
#' bwCandChooser3(sparsity, "gauss", 1)
#'
#' data("regularExData", package = 'rfda')
#' sparsity <- checkSparsity(regularExData, "sampleID", "t")
#' regularExData %>>% data.table %>>% `[`( , .(t1 = rep(t, length(t)),
#' t2 = rep(t, each = length(t))), by = .(sampleID)) %>>%
#' bwCandChooser3(sparsity, "gauss", 1)
#' @rdname bwCandChooser
#' @importFrom data.table data.table setorder is.data.table
#' @export
bwCandChooser3 <- function(dataAllGrid, sparsity, kernel, degree = 1){
# cehck data
assert_that(is.data.table(dataAllGrid), !is.na(sparsity), is.finite(sparsity), sparsity %in% c(0, 1, 2),
kernel %in% c("gauss", "epan", "gaussvar", "quar"), !is.na(degree), is.finite(degree),
degree > 0, degree - floor(degree) < 1e-6)
# get output grid
xout <- unique(dataAllGrid$t1)
# get range of time points
r <- diff(range(dataAllGrid$t1))
# get the minimum bandwidth given sparsity of data
if (sparsity == 0) {
outGrid <- data.table(expand.grid(t1 = range(xout), t2 = xout))
b <- dataAllGrid[t1 != t2, .(t1, t2)] %>>% rbind(outGrid) %>>% unique %>>% setorder(t2, t1) %>>% `$`(t1)
minBW <- max(find_max_diff_f(xout, degree + 2), max(diff(b)) / 2.0)
} else if (sparsity == 1) {
minBW <- 2.0 * find_max_diff_f(xout, degree + 1)
} else if (sparsity == 2) {
minBW <- 1.5 * find_max_diff_f(xout, degree + 2)
}
# shrink the minimum bandwidth if kernel is gaussian
if (kernel == "gauss") {
if (is.na(minBW) || minBW < 0){
message("Data is too sparse, use the max(t) as minimum bandwidth.")
minBW <- max(xout)
}
minBW <- 0.2 * minBW;
} else if ((is.na(minBW) || minBW < 0) && kernel != "gauss") {
stop("Data is too sparse, no suitable bandwidth can be found! Try Gaussian kernel instead!\n");
}
# find the candidates
minBW <- min(minBW, r / 4)
q <- (r / minBW / 4)^(1/4)
bwMat <- (q^(0:4) * minBW) %>>% expand.grid(.) %>>% as.matrix %>>% `colnames<-`(NULL)
return(bwMat[order(bwMat[,1], bwMat[,2], decreasing = FALSE), ])
}
#' Adjustment of optimal bandwidth choosed by gcv
#'
#' The usage of this function can be found in the examples of \code{\link{gcvLocPoly1d}} and
#' \code{\link{gcvLocLinear2d}}.
#'
#' @param bwOpt A numeric. The optimal bandwidth choosed by gcv.
#' @param sparsity The sparsity of data which is tested by \code{\link{checkSparsity}}.
#' @param kernel A string. It could be 'gauss', 'gaussvar', 'epan' or 'quar'.
#' @param drv An integer, the order of derivative.
#' @return An adjusted bandwidth.
#' @export
adjGcvBw <- function(bwOpt, sparsity, kernel, drv = 0){
if (kernel == "gauss") {
bwAdjFac <- switch(as.integer(sparsity == 2) + 1, c(1.1, 1.2, 2), c(1.1, 0.8, 0.8))
} else if (kernel == "epan") {
bwAdjFac <- switch(as.integer(sparsity == 2) + 1, c(1.1, 1.2, 1.5), c(1.1, 1.0, 1.1))
}
facTake <- ifelse(drv > 2, 2L, ifelse(drv >= 0, as.integer(drv) + 1, 0L))
return(bwOpt * bwAdjFac[facTake])
}
#' Find the candidates of bandwidths for locLinear2d
#'
#' @param data A data.frame containing sample id, observed time points and correponding observed values.
#' @param subid The column name of the id of subjects.
#' @param sparsityRate A numeric vector between \code{0} and \code{1}. The proportion of data will be extracted.
#' The length of sparsity must \code{1} or the number of curves (\code{n} in \code{\link{get_FPCA_opts}}).
#' @return A data.frame after sparsifying.
#' @examples
#' require(ggplot2)
#' tp <- seq(1, 10, len = 101)
#' DT <- funcDataGen(3, tp, function(x) sin(x), function(x) rep(1, length(x)), "BesselJ")
#' sparseDT <- sparsify(DT, "sampleID", 0.85)
#' ggplot(sparseDT, aes(x = t, y = y, color = factor(sampleID))) +
#' geom_line() + geom_point() + labs(color = "Sample ID")
#'
#' message("The number of observation is ", no <- length(unique(DT$sampleID)))
#' sparseDT2 <- sparsify(DT, "sampleID", runif(no))
#' ggplot(sparseDT2, aes(x = t, y = y, color = factor(sampleID))) +
#' geom_line() + geom_point() + labs(color = "Sample ID")
#' @export
sparsify <- function(data, subid, sparsityRate){
# cehck data
assert_that(is.data.frame(data), subid %in% names(data), all(sparsityRate > 0 & sparsityRate < 1))
# convert data to data.table with copy (not change the data)
data <- data.table(data)
# get the unique subject id
uniSubIds <- unique(data[[subid]])
# check the length of sparsityRate
if (length(sparsityRate) != length(uniSubIds) && length(sparsityRate) != 1)
stop("The length of sparsityRate must 1 or the number of observation.")
if (length(sparsityRate) == 1)
sparsityRate <- rep(sparsityRate, length(uniSubIds))
# sparsify data
sparseDT <- mapply(function(dt, p) dt[sample(nrow(dt), round(nrow(dt)*p))],
split(data, data[[subid]]), 1 - sparsityRate, SIMPLIFY = FALSE) %>>% rbindlist
return(sparseDT)
}
#' Find the candidates of bandwidths for locLinear2d
#'
#' @param DT A data.table containing list or vector in the cell.
#' The cells in each row must have the same number of elements.
#' @param unnestCols The column names to unnest.
#' @return A unnested data.table.
#' @examples
#' require(data.table)
#' # all numerics
#' DT <- unnest(data.table(V1 = list(c(1,3,5), c(1,7)), V2 = list(c(2,5,3), c(4,6)), V3 = 1:2))
#' # mixed numerics and characters
#' DT2 <- unnest(data.table(V1 = list(c(1,3,5), c(1,7)), V2 = list(c("a","b","c"), c("d","e")),
#' V3 = 1:2, V4 = c("z","y")))
#' \dontrun{
#' require(jsonlite)
#' jsonDataFile <- system.file("extdata", "funcdata.json", package = "rfda")
#' # Following line may have a parse error with message "premature EOF has occured".
#' DT <- unnest(data.table(fromJSON(jsonDataFile)))
#' }
#' @importFrom data.table .SD
#' @export
unnest <- function(DT, unnestCols = NULL){
# check the columns to unnest
if (is.null(unnestCols)) {
unnestCols <- names(DT)[sapply(DT, function(x) any(class(x) %in% "list"))]
message("Automatically recognize the nested columns: ", paste0(unnestCols, collapse = ", "))
}
# check unnestCols is in the DT
if (any(!unnestCols %in% names(DT)))
stop(sprintf("The columns, %s, does not in the DT.",
paste0(unnestCols[!unnestCols %in% names(DT)], collapse = ", ")))
# get the group by variable
groupbyVar <- setdiff(names(DT), unnestCols)
# generate the expression to remove group by variable
chkExpr <- paste0(groupbyVar, "=NULL", collapse = ",") %>>% (paste0("`:=`(", ., ")"))
# check the lengths of each cell in list-column are all the same
chkLenAllEqual <- DT[ , lapply(.SD, function(x) sapply(x, length)), by = groupbyVar] %>>%
`[`(j = eval(parse(text = chkExpr))) %>>% as.matrix %>>% apply(1, diff) %>>% `==`(0) %>>% all
if (!chkLenAllEqual)
stop("The length in each cell is not equal.")
# generate unnest expression
expr <- unnestCols %>>% (paste0(., "=unlist(", ., ")")) %>>%
paste0(collapse = ",") %>>% (paste0(".(", ., ")"))
# return unnested data.table
return(DT[ , eval(parse(text = expr)), by = groupbyVar])
}
| /R/commFunc.R | permissive | alexchang2017/rfda | R | false | false | 14,736 | r | #' trapz: trapezoidal rule to approximate the integral values
#'
#' Returns approximation of integral.
#'
#' @param x A vector with \code{n} elements, \code{x[i]} is a support, \code{i = 1, ..., n}.
#' If \code{y} is \code{NULL}, support is taken as \code{seq(1, length(x), by = 1)}.
#' @param y \code{y[i, j]} is jth values on corresponding value of \code{x[i], i = 1, ..., n}.
#' If \code{y} is vector, the length of \code{y} must be equal to the lenght of \code{x}.
#' If \code{y} is matrix, the number of rows must be equal to the lenght of \code{x}.
#' @return A value, the approximation of integral.
#' @section Reference:
#' Kai Habel, trapz, Octave.
#' @examples
#' # case 1
#' x <- c(1, 4, 9, 16, 25)
#' trapz(x) # 42
#'
#' # case 2
#' x <- matrix(c(1,4,9,16,25,1,8,27,64,125), 5)
#' trapz(x) # 42 162
#' @export
trapz <- function(x, y = NULL){
if (is.null(y)) {
y <- x
x <- switch(is.matrix(x) + 1, {1:length(x)}, {1:nrow(x)})
}
return(as.vector(trapz_cpp(x, as.matrix(y))))
}
# function to perform data binning
#' @importFrom data.table setnames
#' @importFrom utils head
binData <- function(data, numBins){
# check data
assert_that(!is.na(numBins), is.finite(numBins), numBins > 0, numBins - floor(numBins) < 1e-6)
# find the boudaries to split data
boundaries <- seq(min(data$timePnt), max(data$timePnt), length.out = numBins + 1)
# find the middle points to stand time points of binned data
newTimePnts <- head(boundaries, numBins) + diff(boundaries) / 2
# average the data in the interval for data binning
newDataDT <- data %>>% `[`(j = idx_agg := findInterval(timePnt, boundaries, TRUE), by = .(subId,variable)) %>>%
`[`(j = .(value = mean(value), timePnt = newTimePnts[idx_agg]), by = .(subId,variable,idx_agg)) %>>%
`[`(j = idx_agg := NULL)
return(newDataDT)
}
# sub-function for bwCandChooser
#' @importFrom utils head tail
find_max_diff_f <- function(t, lag_n){
assert_that(!is.na(lag_n), is.finite(lag_n), lag_n > 0, lag_n - floor(lag_n) < 1e-6)
sort_t <- sort(t)
n <- length(t)
if (n < lag_n)
return(NA)
if (lag_n > 1)
return(max(tail(sort_t, n - lag_n + 1) - head(sort_t, n - lag_n + 1)))
else
return(max(diff(sort_t))/2)
}
#' Find the candidates of bandwidths for locPoly1d and locLinear2d
#'
#' The difference between \code{bwCandChooser2} and \code{bwCandChooser3} is whether the
#' candidates of bandwidths are the same on the x-axis and y-axis.
#' In our application, \code{bwCandChooser2} is used in finding the candidates of bandwidth of covariance
#' surface and \code{bwCandChooser3} is used in finding the candidates of bandwidth of cross-covariance surface.
#'
#' @param data An data.frame or data.table containing the variables in model.
#' @param id.var A string. The variable name of subject ID.
#' @param timeVarName A string. The variable name of time points.
#' @param sparsity The sparsity of data which is tested by \code{\link{checkSparsity}}.
#' @param kernel A string. It could be 'gauss', 'gaussvar', 'epan' or 'quar'.
#' @param degree An integer, the degree of polynomial.
#' @return The candidates of bandwidths
#' @examples
#' ## examples for bwCandChooser
#' data("regularExData", package = 'rfda')
#' bwCandChooser(regularExData, "sampleID", "t", 2, "gauss", 1)
#' @rdname bwCandChooser
#' @export
bwCandChooser <- function(data, id.var, timeVarName, sparsity, kernel, degree = 1){
# check data
assert_that(is.data.frame(data), is.character(id.var), is.character(timeVarName),
!is.na(sparsity), is.finite(sparsity), sparsity %in% c(0, 1, 2),
kernel %in% c('gauss','epan','gaussvar','quar'),
!is.na(degree), is.finite(degree), degree > 0, degree - floor(degree) < 1e-6)
# get the range of time points
r <- diff(range(data[[timeVarName]]))
# get the minimum bandwidth given sparsity of data
if (sparsity == 0) {
dstar <- find_max_diff_f(data[[timeVarName]], degree + 2)
minBW <- ifelse(!is.na(dstar), ifelse(dstar > r/4, 0.75, 1) * 2.5 * dstar, NA)
} else if (sparsity == 1) {
minBW <- 2.0 * find_max_diff_f(data[[timeVarName]], degree + 1)
} else if (sparsity == 2) {
minBW <- 1.5 * find_max_diff_f(data[[timeVarName]], degree + 1)
}
# use range / 2 if kernel is gaussian and minimum is not found
if ((is.na(minBW) || minBW < 0) && kernel == "gauss") {
message("Data is too sparse, use the range / 2 as minimum bandwidth.")
minBW <- 0.5 * r
} else if ((is.na(minBW) || minBW < 0) && kernel != "gauss") {
stop("Data is too sparse, no suitable bandwidth can be found! Try Gaussian kernel instead!\n");
}
# find the candidates
minBW <- min(minBW, r)
q <- (r / minBW / 4)^(1/9)
return(sort(q^(0:9) * minBW, decreasing = FALSE))
}
#' @param dataAllGrid An data.table containing the grid of timepoints with
#' naming \code{t1} and \code{t2} in model. (see examples.)
#' @examples
#'
#' ## examples for bwCandChooser2
#' require(data.table)
#' require(pipeR)
#'
#' data("sparseExData", package = 'rfda')
#' sparsity <- checkSparsity(sparseExData, "sampleID", "t")
#' sparseExData %>>% data.table %>>% `[`( , .(t1 = rep(t, length(t)),
#' t2 = rep(t, each = length(t))), by = .(sampleID)) %>>%
#' bwCandChooser2(sparsity, "gauss", 1)
#'
#' data("regularExData", package = 'rfda')
#' sparsity <- checkSparsity(regularExData, "sampleID", "t")
#' regularExData %>>% data.table %>>% `[`( , .(t1 = rep(t, length(t)),
#' t2 = rep(t, each = length(t))), by = .(sampleID)) %>>%
#' bwCandChooser2(sparsity, "gauss", 1)
#' @rdname bwCandChooser
#' @importFrom data.table data.table setorder is.data.table
#' @export
bwCandChooser2 <- function(dataAllGrid, sparsity, kernel, degree = 1){
# cehck data
assert_that(is.data.table(dataAllGrid), !is.na(sparsity), is.finite(sparsity), sparsity %in% c(0, 1, 2),
kernel %in% c("gauss", "epan", "gaussvar", "quar"), !is.na(degree), is.finite(degree),
degree > 0, degree - floor(degree) < 1e-6)
# get output grid
xout <- unique(dataAllGrid$t1)
# get range of time points
r <- diff(range(dataAllGrid$t1))
# get the minimum bandwidth given sparsity of data
if (sparsity == 0) {
outGrid <- data.table(expand.grid(t1 = range(xout), t2 = xout))
b <- dataAllGrid[t1 != t2, .(t1, t2)] %>>% rbind(outGrid) %>>% unique %>>% setorder(t2, t1) %>>% `$`(t1)
minBW <- max(find_max_diff_f(xout, degree + 2), max(diff(b)) / 2.0)
} else if (sparsity == 1) {
minBW <- 2.0 * find_max_diff_f(xout, degree + 1)
} else if (sparsity == 2) {
minBW <- 1.5 * find_max_diff_f(xout, degree + 2)
}
# shrink the minimum bandwidth if kernel is gaussian
if (kernel == "gauss") {
if (is.na(minBW) || minBW < 0){
message("Data is too sparse, use the max(t) as minimum bandwidth.")
minBW <- max(xout)
}
minBW <- 0.2 * minBW;
} else if ((is.na(minBW) || minBW < 0) && kernel != "gauss") {
stop("Data is too sparse, no suitable bandwidth can be found! Try Gaussian kernel instead!\n");
}
# find the candidates
minBW <- min(minBW, r / 4)
q <- (r / minBW / 4)^(1/9)
bwMat <- matrix(rep(q^(0:9) * minBW, 2), 10)
return(bwMat[order(bwMat[,1], bwMat[,2], decreasing = FALSE), ])
}
#' @examples
#'
#' ## examples for bwCandChooser3
#' # These examples are demo cases, we does not use this to find the candidates of
#' # bandwidths for smoothing covariance surface.
#' require(data.table)
#' require(pipeR)
#'
#' data("sparseExData", package = 'rfda')
#' sparsity <- checkSparsity(sparseExData, "sampleID", "t")
#' sparseExData %>>% data.table %>>% `[`( , .(t1 = rep(t, length(t)),
#' t2 = rep(t, each = length(t))), by = .(sampleID)) %>>%
#' bwCandChooser3(sparsity, "gauss", 1)
#'
#' data("regularExData", package = 'rfda')
#' sparsity <- checkSparsity(regularExData, "sampleID", "t")
#' regularExData %>>% data.table %>>% `[`( , .(t1 = rep(t, length(t)),
#' t2 = rep(t, each = length(t))), by = .(sampleID)) %>>%
#' bwCandChooser3(sparsity, "gauss", 1)
#' @rdname bwCandChooser
#' @importFrom data.table data.table setorder is.data.table
#' @export
bwCandChooser3 <- function(dataAllGrid, sparsity, kernel, degree = 1){
# cehck data
assert_that(is.data.table(dataAllGrid), !is.na(sparsity), is.finite(sparsity), sparsity %in% c(0, 1, 2),
kernel %in% c("gauss", "epan", "gaussvar", "quar"), !is.na(degree), is.finite(degree),
degree > 0, degree - floor(degree) < 1e-6)
# get output grid
xout <- unique(dataAllGrid$t1)
# get range of time points
r <- diff(range(dataAllGrid$t1))
# get the minimum bandwidth given sparsity of data
if (sparsity == 0) {
outGrid <- data.table(expand.grid(t1 = range(xout), t2 = xout))
b <- dataAllGrid[t1 != t2, .(t1, t2)] %>>% rbind(outGrid) %>>% unique %>>% setorder(t2, t1) %>>% `$`(t1)
minBW <- max(find_max_diff_f(xout, degree + 2), max(diff(b)) / 2.0)
} else if (sparsity == 1) {
minBW <- 2.0 * find_max_diff_f(xout, degree + 1)
} else if (sparsity == 2) {
minBW <- 1.5 * find_max_diff_f(xout, degree + 2)
}
# shrink the minimum bandwidth if kernel is gaussian
if (kernel == "gauss") {
if (is.na(minBW) || minBW < 0){
message("Data is too sparse, use the max(t) as minimum bandwidth.")
minBW <- max(xout)
}
minBW <- 0.2 * minBW;
} else if ((is.na(minBW) || minBW < 0) && kernel != "gauss") {
stop("Data is too sparse, no suitable bandwidth can be found! Try Gaussian kernel instead!\n");
}
# find the candidates
minBW <- min(minBW, r / 4)
q <- (r / minBW / 4)^(1/4)
bwMat <- (q^(0:4) * minBW) %>>% expand.grid(.) %>>% as.matrix %>>% `colnames<-`(NULL)
return(bwMat[order(bwMat[,1], bwMat[,2], decreasing = FALSE), ])
}
#' Adjustment of optimal bandwidth choosed by gcv
#'
#' The usage of this function can be found in the examples of \code{\link{gcvLocPoly1d}} and
#' \code{\link{gcvLocLinear2d}}.
#'
#' @param bwOpt A numeric. The optimal bandwidth choosed by gcv.
#' @param sparsity The sparsity of data which is tested by \code{\link{checkSparsity}}.
#' @param kernel A string. It could be 'gauss', 'gaussvar', 'epan' or 'quar'.
#' @param drv An integer, the order of derivative.
#' @return An adjusted bandwidth.
#' @export
adjGcvBw <- function(bwOpt, sparsity, kernel, drv = 0){
if (kernel == "gauss") {
bwAdjFac <- switch(as.integer(sparsity == 2) + 1, c(1.1, 1.2, 2), c(1.1, 0.8, 0.8))
} else if (kernel == "epan") {
bwAdjFac <- switch(as.integer(sparsity == 2) + 1, c(1.1, 1.2, 1.5), c(1.1, 1.0, 1.1))
}
facTake <- ifelse(drv > 2, 2L, ifelse(drv >= 0, as.integer(drv) + 1, 0L))
return(bwOpt * bwAdjFac[facTake])
}
#' Find the candidates of bandwidths for locLinear2d
#'
#' @param data A data.frame containing sample id, observed time points and correponding observed values.
#' @param subid The column name of the id of subjects.
#' @param sparsityRate A numeric vector between \code{0} and \code{1}. The proportion of data will be extracted.
#' The length of sparsity must \code{1} or the number of curves (\code{n} in \code{\link{get_FPCA_opts}}).
#' @return A data.frame after sparsifying.
#' @examples
#' require(ggplot2)
#' tp <- seq(1, 10, len = 101)
#' DT <- funcDataGen(3, tp, function(x) sin(x), function(x) rep(1, length(x)), "BesselJ")
#' sparseDT <- sparsify(DT, "sampleID", 0.85)
#' ggplot(sparseDT, aes(x = t, y = y, color = factor(sampleID))) +
#' geom_line() + geom_point() + labs(color = "Sample ID")
#'
#' message("The number of observation is ", no <- length(unique(DT$sampleID)))
#' sparseDT2 <- sparsify(DT, "sampleID", runif(no))
#' ggplot(sparseDT2, aes(x = t, y = y, color = factor(sampleID))) +
#' geom_line() + geom_point() + labs(color = "Sample ID")
#' @export
sparsify <- function(data, subid, sparsityRate){
# cehck data
assert_that(is.data.frame(data), subid %in% names(data), all(sparsityRate > 0 & sparsityRate < 1))
# convert data to data.table with copy (not change the data)
data <- data.table(data)
# get the unique subject id
uniSubIds <- unique(data[[subid]])
# check the length of sparsityRate
if (length(sparsityRate) != length(uniSubIds) && length(sparsityRate) != 1)
stop("The length of sparsityRate must 1 or the number of observation.")
if (length(sparsityRate) == 1)
sparsityRate <- rep(sparsityRate, length(uniSubIds))
# sparsify data
sparseDT <- mapply(function(dt, p) dt[sample(nrow(dt), round(nrow(dt)*p))],
split(data, data[[subid]]), 1 - sparsityRate, SIMPLIFY = FALSE) %>>% rbindlist
return(sparseDT)
}
#' Find the candidates of bandwidths for locLinear2d
#'
#' @param DT A data.table containing list or vector in the cell.
#' The cells in each row must have the same number of elements.
#' @param unnestCols The column names to unnest.
#' @return A unnested data.table.
#' @examples
#' require(data.table)
#' # all numerics
#' DT <- unnest(data.table(V1 = list(c(1,3,5), c(1,7)), V2 = list(c(2,5,3), c(4,6)), V3 = 1:2))
#' # mixed numerics and characters
#' DT2 <- unnest(data.table(V1 = list(c(1,3,5), c(1,7)), V2 = list(c("a","b","c"), c("d","e")),
#' V3 = 1:2, V4 = c("z","y")))
#' \dontrun{
#' require(jsonlite)
#' jsonDataFile <- system.file("extdata", "funcdata.json", package = "rfda")
#' # Following line may have a parse error with message "premature EOF has occured".
#' DT <- unnest(data.table(fromJSON(jsonDataFile)))
#' }
#' @importFrom data.table .SD
#' @export
unnest <- function(DT, unnestCols = NULL){
# check the columns to unnest
if (is.null(unnestCols)) {
unnestCols <- names(DT)[sapply(DT, function(x) any(class(x) %in% "list"))]
message("Automatically recognize the nested columns: ", paste0(unnestCols, collapse = ", "))
}
# check unnestCols is in the DT
if (any(!unnestCols %in% names(DT)))
stop(sprintf("The columns, %s, does not in the DT.",
paste0(unnestCols[!unnestCols %in% names(DT)], collapse = ", ")))
# get the group by variable
groupbyVar <- setdiff(names(DT), unnestCols)
# generate the expression to remove group by variable
chkExpr <- paste0(groupbyVar, "=NULL", collapse = ",") %>>% (paste0("`:=`(", ., ")"))
# check the lengths of each cell in list-column are all the same
chkLenAllEqual <- DT[ , lapply(.SD, function(x) sapply(x, length)), by = groupbyVar] %>>%
`[`(j = eval(parse(text = chkExpr))) %>>% as.matrix %>>% apply(1, diff) %>>% `==`(0) %>>% all
if (!chkLenAllEqual)
stop("The length in each cell is not equal.")
# generate unnest expression
expr <- unnestCols %>>% (paste0(., "=unlist(", ., ")")) %>>%
paste0(collapse = ",") %>>% (paste0(".(", ., ")"))
# return unnested data.table
return(DT[ , eval(parse(text = expr)), by = groupbyVar])
}
|
HypsoIntCurve <- function(basins, dem, labelfield, nrow,
manexcl = NULL, labelsize = 5, resthreshold = 2){
# Generate Hypsometric Curve and Hypsometric Integral
# of each stream of a network
# Args:
# basins: One SpatialPolygons* object
# If a string is provided, it will be used
# as the name for a vector map in the GRASS location
# containing the basins as areas.
# labelfield: One string with the column field to label the plots.
# dem: One Raster* object.
# If a string is provided, it will be used
# as the name for a raster map in the GRASS location
# containing the DEM.
# nrow: Number of rows of the facet_wrap of stream profiles.
# manexcl: Vector of elements to manually exclude from the analysis.
# resthreshold: Basins with area lower than resolution*resthreshold will be excluded
# Returns:
# GRASS GIS maps of the longest flow path and its tributaries
# Note:
# A GRASS session must be initiated using rgrass7 package
# Error handling
if (!is.character(labelfield)) {
stop("Argument prefix must be a character string.")
}
if (!length(nrow) == 1) {
stop("Argument nrow must be a vector with one single positive.")
}
# Packages
require(rgrass7)
require(sp)
require(raster)
require(ggplot2)
require(DescTools)
require(directlabels)
require(scales)
require(gtools)
require(plyr)
# Read the sources
if(class(basins)=="SpatialPolygonsDataFrame") {
basins <- basins
} else {
if(is.character(basins)) {
basins <- readVECT(basins)
}
}
if(class(dem)=='RasterLayer') {
dem <- dem
} else {
if(is.character(dem)) {
dem <- raster(readRAST(dem))
}
}
# Exclude fake basins and artifacts
excl <- which(sapply(area(basins), function(x) x > prod(res(dem)*resthreshold)))
basins <- basins[excl, ]
# Generate DEMs and data.frames of dimensionless A/Ao and H/Ho, and Hypsometric Integral (AUC)
index <- gtools::mixedsort(as.character(basins@data[, labelfield]))
if (!is.null(manexcl)) {
index <- index[!index %in% manexcl]
}
hypsodfl <- sapply(
index,
function(x){
foobasin <- basins[basins@data[,labelfield] == x,]
foodem <- mask(crop(dem, extent(foobasin)), foobasin)
z <- sort(na.omit(foodem[]), decreasing = T)
df <- data.frame(
cumarea = rescale(
cumsum(as.numeric(1:length(z)))*prod(res(dem))
),
height = rescale(z)
)
return(df = df)
},
simplify = F
)
hypsodf <- ldply(hypsodfl, data.frame, .id = labelfield)
HypsoInt <- ldply(
sapply(
index,
function(x){
data.frame(hypsoint = AUC(
hypsodf[hypsodf[,labelfield] == x, 'cumarea'],
hypsodf[hypsodf[,labelfield] == x, 'height'])
)
},
simplify = F
),
data.frame,
.id = labelfield
)
# Generate the Hypsometric Curve
p <- ggplot(hypsodf, aes(x = cumarea, y = height)) +
geom_line(col = 'red', lwd = 1) +
coord_equal() +
theme(
legend.position = "none",
text = element_text(size = 18),
panel.background = element_rect(fill = 'white', colour = 'black'),
panel.grid.major.y = element_line(colour = "grey", linetype = "dashed", size = 0.25),
strip.background = element_rect(colour = "black", fill = "black"),
panel.border = element_rect(color = "black", fill = NA, size = 1),
strip.text.x = element_text(colour = "white", face = "bold")
) +
scale_x_continuous(breaks = c(0,0.5,1), labels = c(0,0.5,1)) +
scale_y_continuous(breaks = c(0,0.5,1), labels = c(0,0.5,1)) +
# annotate(
# "text",
# x = 0.9, y = 0.9,
# label = paste0('HI==', round(HypsoInt[,'hypsoint'],2)),
# size = labelsize,
# hjust = 1,
# parse = T
# ) +
geom_text(
data = HypsoInt,
mapping = aes(x = 0.1, y = 0.9, label=paste0('HI==', round(hypsoint,2))),
size = 5,
hjust = 0,
parse = T
) +
facet_wrap(paste0('~', labelfield), nrow = nrow) +
labs(x = 'A/Ao', y = 'H/Ho')
# Returns
return(
list(
DataFrame = hypsodf,
HypsoInt = HypsoInt,
HypsoCurve = p
)
)
}
| /integral_hypsometric_curve.R | no_license | geomorfologia-master/unidad-4-asignacion-1-procesos-fluviales | R | false | false | 4,344 | r | HypsoIntCurve <- function(basins, dem, labelfield, nrow,
manexcl = NULL, labelsize = 5, resthreshold = 2){
# Generate Hypsometric Curve and Hypsometric Integral
# of each stream of a network
# Args:
# basins: One SpatialPolygons* object
# If a string is provided, it will be used
# as the name for a vector map in the GRASS location
# containing the basins as areas.
# labelfield: One string with the column field to label the plots.
# dem: One Raster* object.
# If a string is provided, it will be used
# as the name for a raster map in the GRASS location
# containing the DEM.
# nrow: Number of rows of the facet_wrap of stream profiles.
# manexcl: Vector of elements to manually exclude from the analysis.
# resthreshold: Basins with area lower than resolution*resthreshold will be excluded
# Returns:
# GRASS GIS maps of the longest flow path and its tributaries
# Note:
# A GRASS session must be initiated using rgrass7 package
# Error handling
if (!is.character(labelfield)) {
stop("Argument prefix must be a character string.")
}
if (!length(nrow) == 1) {
stop("Argument nrow must be a vector with one single positive.")
}
# Packages
require(rgrass7)
require(sp)
require(raster)
require(ggplot2)
require(DescTools)
require(directlabels)
require(scales)
require(gtools)
require(plyr)
# Read the sources
if(class(basins)=="SpatialPolygonsDataFrame") {
basins <- basins
} else {
if(is.character(basins)) {
basins <- readVECT(basins)
}
}
if(class(dem)=='RasterLayer') {
dem <- dem
} else {
if(is.character(dem)) {
dem <- raster(readRAST(dem))
}
}
# Exclude fake basins and artifacts
excl <- which(sapply(area(basins), function(x) x > prod(res(dem)*resthreshold)))
basins <- basins[excl, ]
# Generate DEMs and data.frames of dimensionless A/Ao and H/Ho, and Hypsometric Integral (AUC)
index <- gtools::mixedsort(as.character(basins@data[, labelfield]))
if (!is.null(manexcl)) {
index <- index[!index %in% manexcl]
}
hypsodfl <- sapply(
index,
function(x){
foobasin <- basins[basins@data[,labelfield] == x,]
foodem <- mask(crop(dem, extent(foobasin)), foobasin)
z <- sort(na.omit(foodem[]), decreasing = T)
df <- data.frame(
cumarea = rescale(
cumsum(as.numeric(1:length(z)))*prod(res(dem))
),
height = rescale(z)
)
return(df = df)
},
simplify = F
)
hypsodf <- ldply(hypsodfl, data.frame, .id = labelfield)
HypsoInt <- ldply(
sapply(
index,
function(x){
data.frame(hypsoint = AUC(
hypsodf[hypsodf[,labelfield] == x, 'cumarea'],
hypsodf[hypsodf[,labelfield] == x, 'height'])
)
},
simplify = F
),
data.frame,
.id = labelfield
)
# Generate the Hypsometric Curve
p <- ggplot(hypsodf, aes(x = cumarea, y = height)) +
geom_line(col = 'red', lwd = 1) +
coord_equal() +
theme(
legend.position = "none",
text = element_text(size = 18),
panel.background = element_rect(fill = 'white', colour = 'black'),
panel.grid.major.y = element_line(colour = "grey", linetype = "dashed", size = 0.25),
strip.background = element_rect(colour = "black", fill = "black"),
panel.border = element_rect(color = "black", fill = NA, size = 1),
strip.text.x = element_text(colour = "white", face = "bold")
) +
scale_x_continuous(breaks = c(0,0.5,1), labels = c(0,0.5,1)) +
scale_y_continuous(breaks = c(0,0.5,1), labels = c(0,0.5,1)) +
# annotate(
# "text",
# x = 0.9, y = 0.9,
# label = paste0('HI==', round(HypsoInt[,'hypsoint'],2)),
# size = labelsize,
# hjust = 1,
# parse = T
# ) +
geom_text(
data = HypsoInt,
mapping = aes(x = 0.1, y = 0.9, label=paste0('HI==', round(hypsoint,2))),
size = 5,
hjust = 0,
parse = T
) +
facet_wrap(paste0('~', labelfield), nrow = nrow) +
labs(x = 'A/Ao', y = 'H/Ho')
# Returns
return(
list(
DataFrame = hypsodf,
HypsoInt = HypsoInt,
HypsoCurve = p
)
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/streamSetApi.r
\name{streamSet$getEnd}
\alias{streamSet$getEnd}
\title{Returns End of stream values of the attributes for an Element, Event Frame or Attribute}
\arguments{
\item{webId}{The ID of an Element, Event Frame or Attribute, which is the base element or parent of all the stream attributes.}
\item{categoryName}{Specify that included attributes must have this category. The default is no category filter.}
\item{nameFilter}{The name query string used for filtering attributes. The default is no filter.}
\item{searchFullHierarchy}{Specifies if the search should include attributes nested further than the immediate attributes of the searchRoot. The default is 'false'.}
\item{selectedFields}{List of fields to be returned in the response, separated by semicolons (;). If this parameter is not specified, all available fields will be returned.}
\item{showExcluded}{Specified if the search should include attributes with the Excluded property set. The default is 'false'.}
\item{showHidden}{Specified if the search should include attributes with the Hidden property set. The default is 'false'.}
\item{templateName}{Specify that included attributes must be members of this template. The default is no template filter.}
}
\value{
Summary values of the streams that meet the specified conditions.
}
\description{
Returns End of stream values of the attributes for an Element, Event Frame or Attribute
}
| /man/streamSet-cash-getEnd.Rd | permissive | aj9253/PI-Web-API-Client-R | R | false | true | 1,492 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/streamSetApi.r
\name{streamSet$getEnd}
\alias{streamSet$getEnd}
\title{Returns End of stream values of the attributes for an Element, Event Frame or Attribute}
\arguments{
\item{webId}{The ID of an Element, Event Frame or Attribute, which is the base element or parent of all the stream attributes.}
\item{categoryName}{Specify that included attributes must have this category. The default is no category filter.}
\item{nameFilter}{The name query string used for filtering attributes. The default is no filter.}
\item{searchFullHierarchy}{Specifies if the search should include attributes nested further than the immediate attributes of the searchRoot. The default is 'false'.}
\item{selectedFields}{List of fields to be returned in the response, separated by semicolons (;). If this parameter is not specified, all available fields will be returned.}
\item{showExcluded}{Specified if the search should include attributes with the Excluded property set. The default is 'false'.}
\item{showHidden}{Specified if the search should include attributes with the Hidden property set. The default is 'false'.}
\item{templateName}{Specify that included attributes must be members of this template. The default is no template filter.}
}
\value{
Summary values of the streams that meet the specified conditions.
}
\description{
Returns End of stream values of the attributes for an Element, Event Frame or Attribute
}
|
################################################################################
# This file is released under the GNU General Public License, Version 3, GPL-3 #
# Copyright (C) 2020 Yohann Demont #
# #
# It is part of IFC package, please cite: #
# -IFC: An R Package for Imaging Flow Cytometry #
# -YEAR: 2020 #
# -COPYRIGHT HOLDERS: Yohann Demont, Gautier Stoll, Guido Kroemer, #
# Jean-Pierre Marolleau, Loïc Garçon, #
# INSERM, UPD, CHU Amiens #
# #
# DISCLAIMER: #
# -You are using this package on your own risk! #
# -We do not guarantee privacy nor confidentiality. #
# -This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. In no event shall the copyright holders or #
# contributors be liable for any direct, indirect, incidental, special, #
# exemplary, or consequential damages (including, but not limited to, #
# procurement of substitute goods or services; loss of use, data, or profits; #
# or business interruption) however caused and on any theory of liability, #
# whether in contract, strict liability, or tort (including negligence or #
# otherwise) arising in any way out of the use of this software, even if #
# advised of the possibility of such damage. #
# #
# You should have received a copy of the GNU General Public License #
# along with IFC. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
#' @title IFC_pops Object Numbers
#' @description
#' Retrieves objects ids belonging to a population.
#' @param obj an `IFC_data` object extracted with features extracted.
#' @param pop a population name from 'obj'. Default is "".
#' If left as is or not found an error is thrown displaying all available population in 'obj'.
#' @examples
#' if(requireNamespace("IFCdata", quietly = TRUE)) {
#' ## use a daf file
#' file_daf <- system.file("extdata", "example.daf", package = "IFCdata")
#' daf <- ExtractFromDAF(fileName = file_daf)
#' obj <- popsGetObjectsIds(obj = daf, pop = names(daf$pops)[length(daf$pops)])
#' } else {
#' message(sprintf('Please run `install.packages("IFCdata", repos = "%s", type = "source")` %s',
#' 'https://gitdemont.github.io/IFCdata/',
#' 'to install extra files required to run this example.'))
#' }
#' @return An integer vector is returned
#' @export
popsGetObjectsIds <- function(obj, pop = "") {
if(missing(obj)) stop("'obj' can't be missing")
if(!("IFC_data"%in%class(obj))) stop("'obj' is not of class `IFC_data`")
if(length(obj$pops)==0) stop("please use argument 'extract_features' = TRUE with ExtractFromDAF() or ExtractFromXIF() and ensure that features were correctly extracted")
if(length(pop) != 1) stop("'pop' should be of length 1")
N = names(obj$pops)
if(!all(pop%in%N)) stop(paste0("pop:[",pop,"] was not found in 'obj', valid names are:\n", paste0(paste("-", N), collapse = "\n")))
if("Object Number" %in% names(obj$features)) {
return(as.integer(obj$features[obj$pops[[pop]][["obj"]] ,"Object Number"]))
} else {
return(as.integer(which(obj$pops[[pop]][["obj"]])-1))
}
}
| /IFC/R/popsGetObjecstIds.R | no_license | akhikolla/InformationHouse | R | false | false | 3,962 | r | ################################################################################
# This file is released under the GNU General Public License, Version 3, GPL-3 #
# Copyright (C) 2020 Yohann Demont #
# #
# It is part of IFC package, please cite: #
# -IFC: An R Package for Imaging Flow Cytometry #
# -YEAR: 2020 #
# -COPYRIGHT HOLDERS: Yohann Demont, Gautier Stoll, Guido Kroemer, #
# Jean-Pierre Marolleau, Loïc Garçon, #
# INSERM, UPD, CHU Amiens #
# #
# DISCLAIMER: #
# -You are using this package on your own risk! #
# -We do not guarantee privacy nor confidentiality. #
# -This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. In no event shall the copyright holders or #
# contributors be liable for any direct, indirect, incidental, special, #
# exemplary, or consequential damages (including, but not limited to, #
# procurement of substitute goods or services; loss of use, data, or profits; #
# or business interruption) however caused and on any theory of liability, #
# whether in contract, strict liability, or tort (including negligence or #
# otherwise) arising in any way out of the use of this software, even if #
# advised of the possibility of such damage. #
# #
# You should have received a copy of the GNU General Public License #
# along with IFC. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
#' @title IFC_pops Object Numbers
#' @description
#' Retrieves objects ids belonging to a population.
#' @param obj an `IFC_data` object extracted with features extracted.
#' @param pop a population name from 'obj'. Default is "".
#' If left as is or not found an error is thrown displaying all available population in 'obj'.
#' @examples
#' if(requireNamespace("IFCdata", quietly = TRUE)) {
#' ## use a daf file
#' file_daf <- system.file("extdata", "example.daf", package = "IFCdata")
#' daf <- ExtractFromDAF(fileName = file_daf)
#' obj <- popsGetObjectsIds(obj = daf, pop = names(daf$pops)[length(daf$pops)])
#' } else {
#' message(sprintf('Please run `install.packages("IFCdata", repos = "%s", type = "source")` %s',
#' 'https://gitdemont.github.io/IFCdata/',
#' 'to install extra files required to run this example.'))
#' }
#' @return An integer vector is returned
#' @export
popsGetObjectsIds <- function(obj, pop = "") {
if(missing(obj)) stop("'obj' can't be missing")
if(!("IFC_data"%in%class(obj))) stop("'obj' is not of class `IFC_data`")
if(length(obj$pops)==0) stop("please use argument 'extract_features' = TRUE with ExtractFromDAF() or ExtractFromXIF() and ensure that features were correctly extracted")
if(length(pop) != 1) stop("'pop' should be of length 1")
N = names(obj$pops)
if(!all(pop%in%N)) stop(paste0("pop:[",pop,"] was not found in 'obj', valid names are:\n", paste0(paste("-", N), collapse = "\n")))
if("Object Number" %in% names(obj$features)) {
return(as.integer(obj$features[obj$pops[[pop]][["obj"]] ,"Object Number"]))
} else {
return(as.integer(which(obj$pops[[pop]][["obj"]])-1))
}
}
|
#' Standardize.
#'
#' Standardize objects. See the documentation for your object's class:
#' \itemize{
#' \item{\link[=standardize.numeric]{standardize.numeric}}
#' \item{\link[=standardize.data.frame]{standardize.data.frame}}
#' \item{\link[=standardize.stanreg]{standardize.stanreg}}
#' \item{\link[=standardize.lm]{standardize.lm}}
#' \item{\link[=standardize.glm]{standardize.glm}}
#' }
#'
#' @param x Object.
#' @param ... Arguments passed to or from other methods.
#'
#' @author \href{https://dominiquemakowski.github.io/}{Dominique Makowski}
#'
#' @export
standardize <- function(x, ...) {
UseMethod("standardize")
}
#' Standardize (scale and reduce) numeric variables.
#'
#' Standardize (Z-score, "normalize") a vector.
#'
#' @param x Numeric vector.
#' @param normalize Will perform a normalization instead of a standardization. This scales all numeric variables in the range 0 - 1.
#' @param ... Arguments passed to or from other methods.
#'
#' @examples
#' standardize(x = c(1, 4, 6, 2))
#' standardize(x = c(1, 4, 6, 2), normalize = TRUE)
#' @author \href{https://dominiquemakowski.github.io/}{Dominique Makowski}
#'
#'
#' @export
standardize.numeric <- function(x, normalize = FALSE, ...) {
if (all(is.na(x)) | length(unique(x)) == 2) {
return(x)
}
if (normalize == FALSE) {
return(as.vector(scale(x, ...)))
} else {
return(as.vector((x - min(x, na.rm = TRUE)) / diff(range(x, na.rm = TRUE), na.rm = TRUE)))
}
}
#' Standardize (scale and reduce) Dataframe.
#'
#' Selects numeric variables and standardize (Z-score, "normalize") them.
#'
#' @param x Dataframe.
#' @param subset Character or list of characters of column names to be
#' standardized.
#' @param except Character or list of characters of column names to be excluded
#' from standardization.
#' @param normalize Will perform a normalization instead of a standardization. This scales all numeric variables in the range 0 - 1.
#' @param ... Arguments passed to or from other methods.
#'
#' @return Dataframe.
#'
#' @examples
#' \dontrun{
#' df <- data.frame(
#' Participant = as.factor(rep(1:25, each = 4)),
#' Condition = base::rep_len(c("A", "B", "C", "D"), 100),
#' V1 = rnorm(100, 30, .2),
#' V2 = runif(100, 3, 5),
#' V3 = rnorm(100, 100, 10)
#' )
#'
#' dfZ <- standardize(df)
#' dfZ <- standardize(df, except = "V3")
#' dfZ <- standardize(df, except = c("V1", "V2"))
#' dfZ <- standardize(df, subset = "V3")
#' dfZ <- standardize(df, subset = c("V1", "V2"))
#' dfZ <- standardize(df, normalize = TRUE)
#'
#' # Respects grouping
#' dfZ <- df %>%
#' dplyr::group_by(Participant) %>%
#' standardize(df)
#' }
#'
#' @author \href{https://dominiquemakowski.github.io/}{Dominique Makowski}
#'
#'
#' @importFrom purrr keep discard
#' @import dplyr
#' @export
standardize.data.frame <- function(x, subset = NULL, except = NULL, normalize = FALSE, ...) {
if (inherits(x, "grouped_df")) {
dfZ <- x %>% dplyr::do_(".standardize_df(., subset=subset, except=except, normalize=normalize, ...)")
} else {
dfZ <- .standardize_df(x, subset = subset, except = except, normalize = normalize, ...)
}
return(dfZ)
}
#' @keywords internal
.standardize_df <- function(x, subset = NULL, except = NULL, normalize = FALSE, ...) {
df <- x
# Variable order
var_order <- names(df)
# Keep subset
if (!is.null(subset) && subset %in% names(df)) {
to_keep <- as.data.frame(df[!names(df) %in% c(subset)])
df <- df[names(df) %in% c(subset)]
} else {
to_keep <- NULL
}
# Remove exceptions
if (!is.null(except) && except %in% names(df)) {
if (is.null(to_keep)) {
to_keep <- as.data.frame(df[except])
} else {
to_keep <- cbind(to_keep, as.data.frame(df[except]))
}
df <- df[!names(df) %in% c(except)]
}
# Remove non-numerics
dfother <- purrr::discard(df, is.numeric)
dfnum <- purrr::keep(df, is.numeric)
# Scale
dfnum <- as.data.frame(sapply(dfnum, standardize, normalize = normalize))
# Add non-numerics
if (is.null(ncol(dfother))) {
df <- dfnum
} else {
df <- dplyr::bind_cols(dfother, dfnum)
}
# Add exceptions
if (!is.null(subset) | !is.null(except) && exists("to_keep")) {
df <- dplyr::bind_cols(df, to_keep)
}
# Reorder
df <- df[var_order]
return(df)
}
#' Standardize Posteriors.
#'
#' Compute standardized posteriors from which to get standardized coefficients.
#'
#' @param x A stanreg model.
#' @param method "refit" (default) will entirely refit the model based on standardized data. Can take a long time. Other post-hoc methods are "posterior" (based on estimated SD) or "sample" (based on the sample SD).
#' @param ... Arguments passed to or from other methods.
#'
#' @examples
#' \dontrun{
#' library(psycho)
#' library(rstanarm)
#'
#' fit <- rstanarm::stan_glm(Sepal.Length ~ Sepal.Width * Species, data = iris)
#' fit <- rstanarm::stan_glm(Sepal.Length ~ Sepal.Width * Species, data = standardize(iris))
#' posteriors <- standardize(fit)
#' posteriors <- standardize(fit, method = "posterior")
#' }
#'
#' @author \href{https://github.com/jgabry}{Jonah Gabry}, \href{https://github.com/bgoodri}{bgoodri}
#'
#' @seealso https://github.com/stan-dev/rstanarm/issues/298
#'
#' @importFrom utils capture.output
#' @export
standardize.stanreg <- function(x, method = "refit", ...) {
fit <- x
predictors <- get_info(fit)$predictors
predictors <- c("(Intercept)", predictors)
if (method == "sample") {
# By jgabry
predictors <- all.vars(as.formula(fit$formula))
outcome <- predictors[[1]]
X <- as.matrix(model.matrix(fit)[, -1]) # -1 to drop column of 1s for intercept
sd_X_over_sd_y <- apply(X, 2, sd) / sd(fit$data[[outcome]])
beta <- as.matrix(fit, pars = colnames(X)) # posterior distribution of regression coefficients
posteriors_std <- sweep(beta, 2, sd_X_over_sd_y, "*") # multiply each row of b by sd_X_over_sd_y
} else if (method == "posterior") {
# By bgoordi
X <- model.matrix(fit)
# if(preserve_factors == TRUE){
# X <- as.data.frame(X)
# X[!names(as.data.frame(X)) %in% predictors] <- scale(X[!names(as.data.frame(X)) %in% predictors])
# X <- as.matrix(X)
# }
sd_X <- apply(X, MARGIN = 2, FUN = sd)[-1]
sd_Y <- apply(rstanarm::posterior_predict(fit), MARGIN = 1, FUN = sd)
beta <- as.matrix(fit)[, 2:ncol(X), drop = FALSE]
posteriors_std <- sweep(
sweep(beta, MARGIN = 2, STATS = sd_X, FUN = `*`),
MARGIN = 1, STATS = sd_Y, FUN = `/`
)
} else {
useless_output <- capture.output(fit_std <- update(fit, data = standardize(fit$data)))
posteriors_std <- as.data.frame(fit_std)
}
return(posteriors_std)
}
#' Standardize Coefficients.
#'
#' Compute standardized coefficients.
#'
#' @param x A linear model.
#' @param method The standardization method. Can be "refit" (will entirely refit the model based on standardized data. Can take some time) or "agresti".
#' @param ... Arguments passed to or from other methods.
#'
#' @examples
#' \dontrun{
#' library(psycho)
#' fit <- glm(Sex ~ Adjusting, data = psycho::affective, family = "binomial")
#' fit <- lme4::glmer(Sex ~ Adjusting + (1 | Sex), data = psycho::affective, family = "binomial")
#'
#' standardize(fit)
#' }
#'
#' @author Kamil Barton
#' @importFrom stats model.frame model.response model.matrix
#'
#' @seealso https://think-lab.github.io/d/205/
#'
#' @export
standardize.glm <- function(x, method = "refit", ...) {
fit <- x
if (method == "agresti") {
coefs <- MuMIn::coefTable(fit)[, 1:2]
X <- as.matrix(model.matrix(fit)[, -1]) # -1 to drop column of 1s for intercept
sd_X <- sd(X, na.rm = TRUE)
coefs <- coefs * sd_X
} else {
# refit method
data <- get_data(fit)
fit_std <- update(fit, data = standardize(data))
coefs <- MuMIn::coefTable(fit_std)[, 1:2]
}
coefs <- as.data.frame(coefs)
names(coefs) <- c("Coef_std", "SE_std")
return(coefs)
}
#' @export
standardize.glmerMod <- standardize.glm
#' Standardize Coefficients.
#'
#' Compute standardized coefficients.
#'
#' @param x A linear model.
#' @param method The standardization method. Can be "refit" (will entirely refit the model based on standardized data. Can take some time) or "posthoc".
#' @param partial_sd Logical, if set to TRUE, model coefficients are multiplied by partial SD, otherwise they are multiplied by the ratio of the standard deviations of the independent variable and dependent variable.
#' @param preserve_factors Standardize factors-related coefs only by the dependent variable (i.e., do not standardize the dummies generated by factors).
#' @param ... Arguments passed to or from other methods.
#'
#' @examples
#' \dontrun{
#' library(psycho)
#'
#' df <- mtcars %>%
#' mutate(cyl = as.factor(cyl))
#'
#' fit <- lm(wt ~ mpg * cyl, data = df)
#' fit <- lmerTest::lmer(wt ~ mpg * cyl + (1 | gear), data = df)
#'
#' summary(fit)
#' standardize(fit)
#' }
#'
#' @author Kamil Barton
#' @importFrom stats model.frame model.response model.matrix
#'
#' @export
standardize.lm <- function(x, method = "refit", partial_sd = FALSE, preserve_factors = TRUE, ...) {
fit <- x
if (method == "posthoc") {
coefs <- .standardize_coefs(fit, partial_sd = partial_sd, preserve_factors = preserve_factors)
} else {
data <- get_data(fit)
fit_std <- update(fit, data = standardize(data))
coefs <- MuMIn::coefTable(fit_std)[, 1:2]
}
coefs <- as.data.frame(coefs)
names(coefs) <- c("Coef_std", "SE_std")
return(coefs)
}
#' @export
standardize.lmerMod <- standardize.lm
#' @keywords internal
.partialsd <-
function(x, sd, vif, n, p = length(x) - 1) {
sd * sqrt(1 / vif) * sqrt((n - 1) / (n - p))
}
#' @importFrom stats vcov
#' @keywords internal
.vif <-
function(x) {
v <- vcov(x)
nam <- dimnames(v)[[1L]]
if (dim(v)[1L] < 2L) {
return(structure(rep_len(1, dim(v)[1L]),
names = dimnames(v)[[1L]]
))
}
if ((ndef <- sum(is.na(MuMIn::coeffs(x)))) > 0L) {
stop(sprintf(ngettext(
ndef, "one coefficient is not defined",
"%d coefficients are not defined"
), ndef))
}
o <- attr(model.matrix(x), "assign")
if (any(int <- (o == 0))) {
v <- v[!int, !int, drop = FALSE]
} else {
warning("no intercept: VIFs may not be sensible")
}
d <- sqrt(diag(v))
rval <- numeric(length(nam))
names(rval) <- nam
rval[!int] <- diag(solve(v / (d %o% d)))
rval[int] <- 1
rval
}
#' @importFrom stats nobs vcov
#' @keywords internal
.standardize_coefs <- function(fit, partial_sd = FALSE, preserve_factors = TRUE, ...) {
# coefs <- MuMIn::coefTable(fit, ...)
coefs <- as.data.frame(MuMIn::coefTable(fit))
model_matrix <- model.matrix(fit)
predictors <- get_info(fit)$predictors
predictors <- c("(Intercept)", predictors)
if (preserve_factors == TRUE) {
response_sd <- sd(model.response(model.frame(fit)))
factors <- as.data.frame(model_matrix)[!names(as.data.frame(model_matrix)) %in% predictors]
bx_factors <- rep(1 / response_sd, length(names(factors)))
bx_factors <- data.frame(t(bx_factors))
names(bx_factors) <- names(factors)
coefs_factors <- coefs[names(factors), ]
model_matrix_factors <- as.matrix(factors)
coefs <- coefs[!rownames(coefs) %in% names(factors), ]
model_matrix <- as.matrix(as.data.frame(model_matrix)[names(as.data.frame(model_matrix)) %in% predictors])
}
if (partial_sd == TRUE) {
bx <- .partialsd(
coefs[, 1L],
apply(model_matrix, 2L, sd),
.vif(fit),
nobs(fit),
sum(attr(model_matrix, "assign") != 0)
)
} else {
response_sd <- sd(model.response(model.frame(fit)))
bx <- apply(model_matrix, 2L, sd) / response_sd
}
bx <- as.data.frame(t(bx))
names(bx) <- row.names(coefs)
if (preserve_factors == TRUE) {
bx <- cbind(bx, bx_factors)
}
# coefs <- MuMIn::coefTable(fit, ...)
coefs <- as.data.frame(MuMIn::coefTable(fit))
multiplier <- as.numeric(bx[row.names(coefs)])
coefs[, 1L:2L] <- coefs[, 1L:2L] * multiplier
colnames(coefs)[1L:2L] <- c("Coef.std", "SE.std")
return(coefs)
}
| /R/standardize.R | permissive | anhnguyendepocen/psycho.R | R | false | false | 12,200 | r | #' Standardize.
#'
#' Standardize objects. See the documentation for your object's class:
#' \itemize{
#' \item{\link[=standardize.numeric]{standardize.numeric}}
#' \item{\link[=standardize.data.frame]{standardize.data.frame}}
#' \item{\link[=standardize.stanreg]{standardize.stanreg}}
#' \item{\link[=standardize.lm]{standardize.lm}}
#' \item{\link[=standardize.glm]{standardize.glm}}
#' }
#'
#' @param x Object.
#' @param ... Arguments passed to or from other methods.
#'
#' @author \href{https://dominiquemakowski.github.io/}{Dominique Makowski}
#'
#' @export
standardize <- function(x, ...) {
UseMethod("standardize")
}
#' Standardize (scale and reduce) numeric variables.
#'
#' Standardize (Z-score, "normalize") a vector.
#'
#' @param x Numeric vector.
#' @param normalize Will perform a normalization instead of a standardization. This scales all numeric variables in the range 0 - 1.
#' @param ... Arguments passed to or from other methods.
#'
#' @examples
#' standardize(x = c(1, 4, 6, 2))
#' standardize(x = c(1, 4, 6, 2), normalize = TRUE)
#' @author \href{https://dominiquemakowski.github.io/}{Dominique Makowski}
#'
#'
#' @export
standardize.numeric <- function(x, normalize = FALSE, ...) {
if (all(is.na(x)) | length(unique(x)) == 2) {
return(x)
}
if (normalize == FALSE) {
return(as.vector(scale(x, ...)))
} else {
return(as.vector((x - min(x, na.rm = TRUE)) / diff(range(x, na.rm = TRUE), na.rm = TRUE)))
}
}
#' Standardize (scale and reduce) Dataframe.
#'
#' Selects numeric variables and standardize (Z-score, "normalize") them.
#'
#' @param x Dataframe.
#' @param subset Character or list of characters of column names to be
#' standardized.
#' @param except Character or list of characters of column names to be excluded
#' from standardization.
#' @param normalize Will perform a normalization instead of a standardization. This scales all numeric variables in the range 0 - 1.
#' @param ... Arguments passed to or from other methods.
#'
#' @return Dataframe.
#'
#' @examples
#' \dontrun{
#' df <- data.frame(
#' Participant = as.factor(rep(1:25, each = 4)),
#' Condition = base::rep_len(c("A", "B", "C", "D"), 100),
#' V1 = rnorm(100, 30, .2),
#' V2 = runif(100, 3, 5),
#' V3 = rnorm(100, 100, 10)
#' )
#'
#' dfZ <- standardize(df)
#' dfZ <- standardize(df, except = "V3")
#' dfZ <- standardize(df, except = c("V1", "V2"))
#' dfZ <- standardize(df, subset = "V3")
#' dfZ <- standardize(df, subset = c("V1", "V2"))
#' dfZ <- standardize(df, normalize = TRUE)
#'
#' # Respects grouping
#' dfZ <- df %>%
#' dplyr::group_by(Participant) %>%
#' standardize(df)
#' }
#'
#' @author \href{https://dominiquemakowski.github.io/}{Dominique Makowski}
#'
#'
#' @importFrom purrr keep discard
#' @import dplyr
#' @export
standardize.data.frame <- function(x, subset = NULL, except = NULL, normalize = FALSE, ...) {
if (inherits(x, "grouped_df")) {
dfZ <- x %>% dplyr::do_(".standardize_df(., subset=subset, except=except, normalize=normalize, ...)")
} else {
dfZ <- .standardize_df(x, subset = subset, except = except, normalize = normalize, ...)
}
return(dfZ)
}
#' @keywords internal
.standardize_df <- function(x, subset = NULL, except = NULL, normalize = FALSE, ...) {
df <- x
# Variable order
var_order <- names(df)
# Keep subset
if (!is.null(subset) && subset %in% names(df)) {
to_keep <- as.data.frame(df[!names(df) %in% c(subset)])
df <- df[names(df) %in% c(subset)]
} else {
to_keep <- NULL
}
# Remove exceptions
if (!is.null(except) && except %in% names(df)) {
if (is.null(to_keep)) {
to_keep <- as.data.frame(df[except])
} else {
to_keep <- cbind(to_keep, as.data.frame(df[except]))
}
df <- df[!names(df) %in% c(except)]
}
# Remove non-numerics
dfother <- purrr::discard(df, is.numeric)
dfnum <- purrr::keep(df, is.numeric)
# Scale
dfnum <- as.data.frame(sapply(dfnum, standardize, normalize = normalize))
# Add non-numerics
if (is.null(ncol(dfother))) {
df <- dfnum
} else {
df <- dplyr::bind_cols(dfother, dfnum)
}
# Add exceptions
if (!is.null(subset) | !is.null(except) && exists("to_keep")) {
df <- dplyr::bind_cols(df, to_keep)
}
# Reorder
df <- df[var_order]
return(df)
}
#' Standardize Posteriors.
#'
#' Compute standardized posteriors from which to get standardized coefficients.
#'
#' @param x A stanreg model.
#' @param method "refit" (default) will entirely refit the model based on standardized data. Can take a long time. Other post-hoc methods are "posterior" (based on estimated SD) or "sample" (based on the sample SD).
#' @param ... Arguments passed to or from other methods.
#'
#' @examples
#' \dontrun{
#' library(psycho)
#' library(rstanarm)
#'
#' fit <- rstanarm::stan_glm(Sepal.Length ~ Sepal.Width * Species, data = iris)
#' fit <- rstanarm::stan_glm(Sepal.Length ~ Sepal.Width * Species, data = standardize(iris))
#' posteriors <- standardize(fit)
#' posteriors <- standardize(fit, method = "posterior")
#' }
#'
#' @author \href{https://github.com/jgabry}{Jonah Gabry}, \href{https://github.com/bgoodri}{bgoodri}
#'
#' @seealso https://github.com/stan-dev/rstanarm/issues/298
#'
#' @importFrom utils capture.output
#' @export
standardize.stanreg <- function(x, method = "refit", ...) {
fit <- x
predictors <- get_info(fit)$predictors
predictors <- c("(Intercept)", predictors)
if (method == "sample") {
# By jgabry
predictors <- all.vars(as.formula(fit$formula))
outcome <- predictors[[1]]
X <- as.matrix(model.matrix(fit)[, -1]) # -1 to drop column of 1s for intercept
sd_X_over_sd_y <- apply(X, 2, sd) / sd(fit$data[[outcome]])
beta <- as.matrix(fit, pars = colnames(X)) # posterior distribution of regression coefficients
posteriors_std <- sweep(beta, 2, sd_X_over_sd_y, "*") # multiply each row of b by sd_X_over_sd_y
} else if (method == "posterior") {
# By bgoordi
X <- model.matrix(fit)
# if(preserve_factors == TRUE){
# X <- as.data.frame(X)
# X[!names(as.data.frame(X)) %in% predictors] <- scale(X[!names(as.data.frame(X)) %in% predictors])
# X <- as.matrix(X)
# }
sd_X <- apply(X, MARGIN = 2, FUN = sd)[-1]
sd_Y <- apply(rstanarm::posterior_predict(fit), MARGIN = 1, FUN = sd)
beta <- as.matrix(fit)[, 2:ncol(X), drop = FALSE]
posteriors_std <- sweep(
sweep(beta, MARGIN = 2, STATS = sd_X, FUN = `*`),
MARGIN = 1, STATS = sd_Y, FUN = `/`
)
} else {
useless_output <- capture.output(fit_std <- update(fit, data = standardize(fit$data)))
posteriors_std <- as.data.frame(fit_std)
}
return(posteriors_std)
}
#' Standardize Coefficients.
#'
#' Compute standardized coefficients.
#'
#' @param x A linear model.
#' @param method The standardization method. Can be "refit" (will entirely refit the model based on standardized data. Can take some time) or "agresti".
#' @param ... Arguments passed to or from other methods.
#'
#' @examples
#' \dontrun{
#' library(psycho)
#' fit <- glm(Sex ~ Adjusting, data = psycho::affective, family = "binomial")
#' fit <- lme4::glmer(Sex ~ Adjusting + (1 | Sex), data = psycho::affective, family = "binomial")
#'
#' standardize(fit)
#' }
#'
#' @author Kamil Barton
#' @importFrom stats model.frame model.response model.matrix
#'
#' @seealso https://think-lab.github.io/d/205/
#'
#' @export
standardize.glm <- function(x, method = "refit", ...) {
fit <- x
if (method == "agresti") {
coefs <- MuMIn::coefTable(fit)[, 1:2]
X <- as.matrix(model.matrix(fit)[, -1]) # -1 to drop column of 1s for intercept
sd_X <- sd(X, na.rm = TRUE)
coefs <- coefs * sd_X
} else {
# refit method
data <- get_data(fit)
fit_std <- update(fit, data = standardize(data))
coefs <- MuMIn::coefTable(fit_std)[, 1:2]
}
coefs <- as.data.frame(coefs)
names(coefs) <- c("Coef_std", "SE_std")
return(coefs)
}
#' @export
standardize.glmerMod <- standardize.glm
#' Standardize Coefficients.
#'
#' Compute standardized coefficients.
#'
#' @param x A linear model.
#' @param method The standardization method. Can be "refit" (will entirely refit the model based on standardized data. Can take some time) or "posthoc".
#' @param partial_sd Logical, if set to TRUE, model coefficients are multiplied by partial SD, otherwise they are multiplied by the ratio of the standard deviations of the independent variable and dependent variable.
#' @param preserve_factors Standardize factors-related coefs only by the dependent variable (i.e., do not standardize the dummies generated by factors).
#' @param ... Arguments passed to or from other methods.
#'
#' @examples
#' \dontrun{
#' library(psycho)
#'
#' df <- mtcars %>%
#' mutate(cyl = as.factor(cyl))
#'
#' fit <- lm(wt ~ mpg * cyl, data = df)
#' fit <- lmerTest::lmer(wt ~ mpg * cyl + (1 | gear), data = df)
#'
#' summary(fit)
#' standardize(fit)
#' }
#'
#' @author Kamil Barton
#' @importFrom stats model.frame model.response model.matrix
#'
#' @export
standardize.lm <- function(x, method = "refit", partial_sd = FALSE, preserve_factors = TRUE, ...) {
fit <- x
if (method == "posthoc") {
coefs <- .standardize_coefs(fit, partial_sd = partial_sd, preserve_factors = preserve_factors)
} else {
data <- get_data(fit)
fit_std <- update(fit, data = standardize(data))
coefs <- MuMIn::coefTable(fit_std)[, 1:2]
}
coefs <- as.data.frame(coefs)
names(coefs) <- c("Coef_std", "SE_std")
return(coefs)
}
#' @export
standardize.lmerMod <- standardize.lm
#' @keywords internal
.partialsd <-
function(x, sd, vif, n, p = length(x) - 1) {
sd * sqrt(1 / vif) * sqrt((n - 1) / (n - p))
}
#' @importFrom stats vcov
#' @keywords internal
.vif <-
function(x) {
v <- vcov(x)
nam <- dimnames(v)[[1L]]
if (dim(v)[1L] < 2L) {
return(structure(rep_len(1, dim(v)[1L]),
names = dimnames(v)[[1L]]
))
}
if ((ndef <- sum(is.na(MuMIn::coeffs(x)))) > 0L) {
stop(sprintf(ngettext(
ndef, "one coefficient is not defined",
"%d coefficients are not defined"
), ndef))
}
o <- attr(model.matrix(x), "assign")
if (any(int <- (o == 0))) {
v <- v[!int, !int, drop = FALSE]
} else {
warning("no intercept: VIFs may not be sensible")
}
d <- sqrt(diag(v))
rval <- numeric(length(nam))
names(rval) <- nam
rval[!int] <- diag(solve(v / (d %o% d)))
rval[int] <- 1
rval
}
#' @importFrom stats nobs vcov
#' @keywords internal
.standardize_coefs <- function(fit, partial_sd = FALSE, preserve_factors = TRUE, ...) {
# coefs <- MuMIn::coefTable(fit, ...)
coefs <- as.data.frame(MuMIn::coefTable(fit))
model_matrix <- model.matrix(fit)
predictors <- get_info(fit)$predictors
predictors <- c("(Intercept)", predictors)
if (preserve_factors == TRUE) {
response_sd <- sd(model.response(model.frame(fit)))
factors <- as.data.frame(model_matrix)[!names(as.data.frame(model_matrix)) %in% predictors]
bx_factors <- rep(1 / response_sd, length(names(factors)))
bx_factors <- data.frame(t(bx_factors))
names(bx_factors) <- names(factors)
coefs_factors <- coefs[names(factors), ]
model_matrix_factors <- as.matrix(factors)
coefs <- coefs[!rownames(coefs) %in% names(factors), ]
model_matrix <- as.matrix(as.data.frame(model_matrix)[names(as.data.frame(model_matrix)) %in% predictors])
}
if (partial_sd == TRUE) {
bx <- .partialsd(
coefs[, 1L],
apply(model_matrix, 2L, sd),
.vif(fit),
nobs(fit),
sum(attr(model_matrix, "assign") != 0)
)
} else {
response_sd <- sd(model.response(model.frame(fit)))
bx <- apply(model_matrix, 2L, sd) / response_sd
}
bx <- as.data.frame(t(bx))
names(bx) <- row.names(coefs)
if (preserve_factors == TRUE) {
bx <- cbind(bx, bx_factors)
}
# coefs <- MuMIn::coefTable(fit, ...)
coefs <- as.data.frame(MuMIn::coefTable(fit))
multiplier <- as.numeric(bx[row.names(coefs)])
coefs[, 1L:2L] <- coefs[, 1L:2L] * multiplier
colnames(coefs)[1L:2L] <- c("Coef.std", "SE.std")
return(coefs)
}
|
# q3.1 : get row numbers for data meeting condition
mydata=read.csv('http://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv')
mydata2=subset(mydata, mydata$ACR==3 & mydata$AGS==6 )
head(mydata2, 3)
# need libjpeg-turbo-devel
install.packages('jpeg')
library(jpeg)
myurl="http://d396qusza40orc.cloudfront.net/getdata%2Fjeff.jpg"
download.file(myurl, destfile='./myimg.jpg',method='curl')
myimg=readJPEG('./myimg.jpg', native=T)
quantile(myimg, probs=c(0.3,0.8))
#
urlgdp='http://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv'
urledu='http://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv'
mygdp=read.csv(urlgdp, colClasses='character') # need to skip the few header rows
myedu=read.csv(urledu, colClasses='character')
mygdp$Gross.domestic.product.2012=suppressWarnings(as.numeric(mygdp$Gross.domestic.product.2012))
mygdp2=mygdp[mygdp$Gross.domestic.product.2012 > 0 & !is.na(mygdp$Gross.domestic.product.2012), ]
mygdp2[ c(12:14),] # print spain as rank 13th entry
# 13 from the last
mybad=mygdp2[order( mygdp2[,2], decreasing=T),]
head(mybad[, c(1,2)],15)
mygdpcode=unique(mygdp2[,1]) # 190
myeducode=unique(myedu[,1]) # 234
length( intersect( mygdpcode, myeducode)) #
mycode=myedu[,c(1,3)] # code, incomeGroup
myoecd=mycode[ mycode$Income.Group=='High income: OECD',] #30,2
mynonoecd=mycode[ mycode$Income.Group=='High income: nonOECD',] #37,2
#foo=cbind( mygdp2$X, mygdp2$Gross.domestic.product.2012 )
#colnames(foo)<-c('code','rank')
myY=subset(mygdp2, match( mygdp2$X, myoecd$CountryCode ) > 0)
myN=subset(mygdp2, match( mygdp2$X, mynonoecd$CountryCode ) > 0)
mean(myY$Gross.domestic.product.2012)
mean(myN$Gross.domestic.product.2012)
#mylowermiddlecode=mycode[ mycode$Income.Group=='Lower middle income' |mycode$Income.Group=='Low income' ,]
#mylowermiddle=subset(mygdp2, match( mygdp2$X, mylowermiddlecode$CountryCode ) > 0)
mylowermiddlecode=mycode[ mycode$Income.Group=='Lower middle income',]
mylowermiddle=subset(mygdp2, match( mygdp2$X, mylowermiddlecode$CountryCode ) > 0 )
mybest=subset(mylowermiddle, mylowermiddle$Gross.product.2012 <=38)
mybest
| /coursera/getdata/q3.R | no_license | billtang/sandbox | R | false | false | 2,121 | r | # q3.1 : get row numbers for data meeting condition
mydata=read.csv('http://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv')
mydata2=subset(mydata, mydata$ACR==3 & mydata$AGS==6 )
head(mydata2, 3)
# need libjpeg-turbo-devel
install.packages('jpeg')
library(jpeg)
myurl="http://d396qusza40orc.cloudfront.net/getdata%2Fjeff.jpg"
download.file(myurl, destfile='./myimg.jpg',method='curl')
myimg=readJPEG('./myimg.jpg', native=T)
quantile(myimg, probs=c(0.3,0.8))
#
urlgdp='http://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv'
urledu='http://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv'
mygdp=read.csv(urlgdp, colClasses='character') # need to skip the few header rows
myedu=read.csv(urledu, colClasses='character')
mygdp$Gross.domestic.product.2012=suppressWarnings(as.numeric(mygdp$Gross.domestic.product.2012))
mygdp2=mygdp[mygdp$Gross.domestic.product.2012 > 0 & !is.na(mygdp$Gross.domestic.product.2012), ]
mygdp2[ c(12:14),] # print spain as rank 13th entry
# 13 from the last
mybad=mygdp2[order( mygdp2[,2], decreasing=T),]
head(mybad[, c(1,2)],15)
mygdpcode=unique(mygdp2[,1]) # 190
myeducode=unique(myedu[,1]) # 234
length( intersect( mygdpcode, myeducode)) #
mycode=myedu[,c(1,3)] # code, incomeGroup
myoecd=mycode[ mycode$Income.Group=='High income: OECD',] #30,2
mynonoecd=mycode[ mycode$Income.Group=='High income: nonOECD',] #37,2
#foo=cbind( mygdp2$X, mygdp2$Gross.domestic.product.2012 )
#colnames(foo)<-c('code','rank')
myY=subset(mygdp2, match( mygdp2$X, myoecd$CountryCode ) > 0)
myN=subset(mygdp2, match( mygdp2$X, mynonoecd$CountryCode ) > 0)
mean(myY$Gross.domestic.product.2012)
mean(myN$Gross.domestic.product.2012)
#mylowermiddlecode=mycode[ mycode$Income.Group=='Lower middle income' |mycode$Income.Group=='Low income' ,]
#mylowermiddle=subset(mygdp2, match( mygdp2$X, mylowermiddlecode$CountryCode ) > 0)
mylowermiddlecode=mycode[ mycode$Income.Group=='Lower middle income',]
mylowermiddle=subset(mygdp2, match( mygdp2$X, mylowermiddlecode$CountryCode ) > 0 )
mybest=subset(mylowermiddle, mylowermiddle$Gross.product.2012 <=38)
mybest
|
testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656529241717e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result) | /dcurver/inst/testfiles/ddc/AFL_ddc/ddc_valgrind_files/1609867293-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 831 | r | testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656529241717e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result) |
#' Summarize group comparisons
#'
#' Summarize a group comparisons object or a ddo of group comparisons objects. This function
#' applies a summary function to the columns of \code{compData$e_data} corresponding to each
#' column to calculate a summary column for each group.
#'
#' Currently this function does not allow executing the same summary function multiple times
#' with different parameters.
#'
#' @param compData a groupComparison object or a ddo of groupComparison objects, i.e. the output
#' of \code{\link{divideByGroupComparisons}}.
#' @param summary_functions vector of summary function names to apply to each row of \code{ftmsObj$e_data} for each group. Valid
#' summary function names are given by \code{\link{getGroupComparisonSummaryFunctionNames}}.
#' @param summary_function_params named list of list of other parameters to pass to the summary functions. Names should
#' match values in \code{summary_functions}, each value should be a list of name/value parameters, e.g.
#' \code{list(uniqueness_gtest=list(pval_threshold=0.01))}.
#'
#' @return a comparisonSummary object or a ddo of comparisonSummary objects
#' @export
summarizeGroupComparisons <- function(compData, summary_functions, summary_function_params=NULL) {
if (missing(compData)) stop("compData is missing")
if (missing(compData)) stop("summary_functions is missing")
#if (length(summary_functions) != 1) stop("summary_functions must have length 1")
if (!(inherits(compData, "groupComparison") | inherits(compData, "ddo") ) )
stop("compData must be of type groupComparison or a ddo containing groupComparisons")
if (!is.null(summary_function_params)) {
if (!is.list(summary_function_params)) {
stop("summary_function_params must be a list")
}
if (!all(names(summary_function_params) %in% summary_functions)) {
stop("all names(summary_function_params) must appear in summary_functions")
}
}
if (inherits(compData, "ddo")) {
res <- drPersist(addTransform(compData, function(v) {
ftmsRanalysis:::.summarizeGroupComparisonsInternal(v, summary_functions, summary_function_params)
}))
} else {
res <- .summarizeGroupComparisonsInternal(compData, summary_functions, summary_function_params)
}
return(res)
}
#' @title Group comparison summary functions
#' @description \code{getGroupComparisonSummaryFunctionNames} returns the names of valid group comparison
#' summary functions that may be used with the \code{\link{summarizeGroups}} function.
#' @export
getGroupComparisonSummaryFunctionNames <- function() {
return(c("uniqueness_gtest", "uniqueness_nsamps", "uniqueness_prop"))
}
.summarizeGroupComparisonsInternal <- function(compData, summary_functions, summary_function_params=NULL) {
# Get function objects from names
summary_func_names <- as.vector(unlist(summary_functions))
validNames <- getGroupComparisonSummaryFunctionNames()
summary_functions <- lapply(summary_functions, function(nn) {
nn <- as.character(nn)
if (!(nn %in% validNames)) stop(sprintf("'%s' is not a valid function name, see getGroupSummaryFunctionNames() for valid options", nn))
return(get(nn, envir=asNamespace("ftmsRanalysis"), mode="function"))
})
names(summary_functions) <- summary_func_names
groupDF <- getGroupDF(compData)
data_scale <- getDataScale(compData)
# for each group of sample columns, apply all summary functions and recombine columns
edata_cols <- lapply(summary_func_names, function(fname) {
parms <- list(edata_df=dplyr::select(compData$e_data, -dplyr::matches(getEDataColName(compData))),
group_df=dplyr::select(groupDF, dplyr::one_of("Group", getFDataColName(compData))),
data_scale=data_scale)
names(parms)<- NULL
if (!is.null(summary_function_params[[fname]])) {
parms <- c(parms, summary_function_params[[fname]])
}
# tmp_result <- f(dplyr::select(compData$e_data, -dplyr::matches(getEDataColName(compData))),
# dplyr::select(groupDF, dplyr::one_of("Group", getFDataColName(compData))),
# data_scale)
tmp_result <- do.call(summary_functions[[fname]], parms)
if (is.null(summary_function_params[[fname]])) {
summary_params <- NA
} else {
summary_params <- list(summary_function_params[[fname]])
}
tmp_fdata <- tibble::tibble(Comparison_Summary_Column=colnames(tmp_result),
Summary_Function_Name=fname,
Parameters=summary_params)
attr(tmp_result, "f_data") <- tmp_fdata
return(tmp_result)
})
new_fdata <- do.call(rbind, lapply(edata_cols, function(x) attr(x, "f_data")))
new_edata <- data.frame(compData$e_data[, getEDataColName(compData)], do.call(cbind, edata_cols))
colnames(new_edata)[1] <- getEDataColName(compData)
if (inherits(compData, "peakData")) {
res <- as.peakData(new_edata, new_fdata, compData$e_meta, getEDataColName(compData), "Comparison_Summary_Column",
getMassColName(compData), mf_cname=getMFColName(compData), instrument_type=getInstrumentType(compData) )
} else if (inherits(compData, "compoundData")) {
res <- as.compoundData(new_edata, new_fdata, compData$e_meta, getEDataColName(compData), "Comparison_Summary_Column",
mass_cname=getMassColName(compData), getCompoundColName(compData), instrument_type=getInstrumentType(compData) )
} else if (inherits(compData, "reactionData")) {
res <- as.reactionData(new_edata, new_fdata, compData$e_meta, getEDataColName(compData), "Comparison_Summary_Column",
getReactionColName(compData), instrument_type=getInstrumentType(compData), db=getDatabase(compData) )
} else if (inherits(compData, "moduleData")) {
res <- as.moduleData(new_edata, new_fdata, compData$e_meta, getEDataColName(compData), "Comparison_Summary_Column",
getModuleColName(compData), getModuleNodeColName(compData),
instrument_type=getInstrumentType(compData) )
}
# copy other attributes to new object
cnames.new <- attr(res, "cnames")
cnames.old <- attr(compData, "cnames")
for (cc in setdiff(names(cnames.old), c("edata_cname", "fdata_cname", "mass_cname", "mf_cname", "compound_cname"))) {
if (!is.null(cnames.old[[cc]]))
cnames.new[[cc]] <- cnames.old[[cc]]
}
attr(res, "cnames") <- cnames.new
# set class to include 'comparisonSummary'
class(res) <- c("comparisonSummary", setdiff(class(res), "groupComparison"))
# copy other attributes
diffAttrNames <- c("cnames", "class", "names", "split") #attribute names that should not be the same in the result object
for (attr_name in setdiff(names(attributes(compData)), diffAttrNames)) {
attr(res, attr_name) <- attr(compData, attr_name)
}
res <- ftmsRanalysis:::setDataScale(res, "summary")
if (!is.null(getDatabase(compData))) {
res <- ftmsRanalysis:::setDatabase(res, getDatabase(compData))
}
return(res)
} | /R/summarizeGroupComparisons.R | permissive | EMSL-Computing/ftmsRanalysis | R | false | false | 7,031 | r | #' Summarize group comparisons
#'
#' Summarize a group comparisons object or a ddo of group comparisons objects. This function
#' applies a summary function to the columns of \code{compData$e_data} corresponding to each
#' column to calculate a summary column for each group.
#'
#' Currently this function does not allow executing the same summary function multiple times
#' with different parameters.
#'
#' @param compData a groupComparison object or a ddo of groupComparison objects, i.e. the output
#' of \code{\link{divideByGroupComparisons}}.
#' @param summary_functions vector of summary function names to apply to each row of \code{ftmsObj$e_data} for each group. Valid
#' summary function names are given by \code{\link{getGroupComparisonSummaryFunctionNames}}.
#' @param summary_function_params named list of list of other parameters to pass to the summary functions. Names should
#' match values in \code{summary_functions}, each value should be a list of name/value parameters, e.g.
#' \code{list(uniqueness_gtest=list(pval_threshold=0.01))}.
#'
#' @return a comparisonSummary object or a ddo of comparisonSummary objects
#' @export
summarizeGroupComparisons <- function(compData, summary_functions, summary_function_params=NULL) {
if (missing(compData)) stop("compData is missing")
if (missing(compData)) stop("summary_functions is missing")
#if (length(summary_functions) != 1) stop("summary_functions must have length 1")
if (!(inherits(compData, "groupComparison") | inherits(compData, "ddo") ) )
stop("compData must be of type groupComparison or a ddo containing groupComparisons")
if (!is.null(summary_function_params)) {
if (!is.list(summary_function_params)) {
stop("summary_function_params must be a list")
}
if (!all(names(summary_function_params) %in% summary_functions)) {
stop("all names(summary_function_params) must appear in summary_functions")
}
}
if (inherits(compData, "ddo")) {
res <- drPersist(addTransform(compData, function(v) {
ftmsRanalysis:::.summarizeGroupComparisonsInternal(v, summary_functions, summary_function_params)
}))
} else {
res <- .summarizeGroupComparisonsInternal(compData, summary_functions, summary_function_params)
}
return(res)
}
#' @title Group comparison summary functions
#' @description \code{getGroupComparisonSummaryFunctionNames} returns the names of valid group comparison
#' summary functions that may be used with the \code{\link{summarizeGroups}} function.
#' @export
getGroupComparisonSummaryFunctionNames <- function() {
return(c("uniqueness_gtest", "uniqueness_nsamps", "uniqueness_prop"))
}
.summarizeGroupComparisonsInternal <- function(compData, summary_functions, summary_function_params=NULL) {
# Get function objects from names
summary_func_names <- as.vector(unlist(summary_functions))
validNames <- getGroupComparisonSummaryFunctionNames()
summary_functions <- lapply(summary_functions, function(nn) {
nn <- as.character(nn)
if (!(nn %in% validNames)) stop(sprintf("'%s' is not a valid function name, see getGroupSummaryFunctionNames() for valid options", nn))
return(get(nn, envir=asNamespace("ftmsRanalysis"), mode="function"))
})
names(summary_functions) <- summary_func_names
groupDF <- getGroupDF(compData)
data_scale <- getDataScale(compData)
# for each group of sample columns, apply all summary functions and recombine columns
edata_cols <- lapply(summary_func_names, function(fname) {
parms <- list(edata_df=dplyr::select(compData$e_data, -dplyr::matches(getEDataColName(compData))),
group_df=dplyr::select(groupDF, dplyr::one_of("Group", getFDataColName(compData))),
data_scale=data_scale)
names(parms)<- NULL
if (!is.null(summary_function_params[[fname]])) {
parms <- c(parms, summary_function_params[[fname]])
}
# tmp_result <- f(dplyr::select(compData$e_data, -dplyr::matches(getEDataColName(compData))),
# dplyr::select(groupDF, dplyr::one_of("Group", getFDataColName(compData))),
# data_scale)
tmp_result <- do.call(summary_functions[[fname]], parms)
if (is.null(summary_function_params[[fname]])) {
summary_params <- NA
} else {
summary_params <- list(summary_function_params[[fname]])
}
tmp_fdata <- tibble::tibble(Comparison_Summary_Column=colnames(tmp_result),
Summary_Function_Name=fname,
Parameters=summary_params)
attr(tmp_result, "f_data") <- tmp_fdata
return(tmp_result)
})
new_fdata <- do.call(rbind, lapply(edata_cols, function(x) attr(x, "f_data")))
new_edata <- data.frame(compData$e_data[, getEDataColName(compData)], do.call(cbind, edata_cols))
colnames(new_edata)[1] <- getEDataColName(compData)
if (inherits(compData, "peakData")) {
res <- as.peakData(new_edata, new_fdata, compData$e_meta, getEDataColName(compData), "Comparison_Summary_Column",
getMassColName(compData), mf_cname=getMFColName(compData), instrument_type=getInstrumentType(compData) )
} else if (inherits(compData, "compoundData")) {
res <- as.compoundData(new_edata, new_fdata, compData$e_meta, getEDataColName(compData), "Comparison_Summary_Column",
mass_cname=getMassColName(compData), getCompoundColName(compData), instrument_type=getInstrumentType(compData) )
} else if (inherits(compData, "reactionData")) {
res <- as.reactionData(new_edata, new_fdata, compData$e_meta, getEDataColName(compData), "Comparison_Summary_Column",
getReactionColName(compData), instrument_type=getInstrumentType(compData), db=getDatabase(compData) )
} else if (inherits(compData, "moduleData")) {
res <- as.moduleData(new_edata, new_fdata, compData$e_meta, getEDataColName(compData), "Comparison_Summary_Column",
getModuleColName(compData), getModuleNodeColName(compData),
instrument_type=getInstrumentType(compData) )
}
# copy other attributes to new object
cnames.new <- attr(res, "cnames")
cnames.old <- attr(compData, "cnames")
for (cc in setdiff(names(cnames.old), c("edata_cname", "fdata_cname", "mass_cname", "mf_cname", "compound_cname"))) {
if (!is.null(cnames.old[[cc]]))
cnames.new[[cc]] <- cnames.old[[cc]]
}
attr(res, "cnames") <- cnames.new
# set class to include 'comparisonSummary'
class(res) <- c("comparisonSummary", setdiff(class(res), "groupComparison"))
# copy other attributes
diffAttrNames <- c("cnames", "class", "names", "split") #attribute names that should not be the same in the result object
for (attr_name in setdiff(names(attributes(compData)), diffAttrNames)) {
attr(res, attr_name) <- attr(compData, attr_name)
}
res <- ftmsRanalysis:::setDataScale(res, "summary")
if (!is.null(getDatabase(compData))) {
res <- ftmsRanalysis:::setDatabase(res, getDatabase(compData))
}
return(res)
} |
/faq/tk/general.rd | no_license | wanabe/rubydoc | R | false | false | 136 | rd | ||
testlist <- list(a = 0L, b = 0L, x = c(-1L, -1L, -14024705L, -16384000L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610131950-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 151 | r | testlist <- list(a = 0L, b = 0L, x = c(-1L, -1L, -14024705L, -16384000L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
## Read in and annotate spatial data set; from Cumming1995
## the spreadsheet we have is for late summer. but could also redo analysis for spring using
## Cumming1995
## units: nutrients; elements; DIC + DOC: mg/L; conductivity uS (assuming cm-1); note that
## specific conductance is reported in Cumming1995, and we are using conductivity;
## alt m;
## load necessary packages
library("sp")
library("rgdal")
## read in data; the csv is my revamp of the xls kerri sent; it now has lat longs and altitude
## of each site
if (!file.exists("../data/piscesallorgh.csv")) {
stop("Get piscesallorgh.csv from emma")
}
pisces <- read.csv("../data/piscesallorgh.csv")
## coordinates currently as lat longs in separate columns. need to make into decimal degrees
lats <- data.frame(latdegmin = paste0(pisces$latdeg,"d", sprintf("%.1f", pisces$latmin), '\\ "N"'))
# coerce all instances of even number minutes to have trailing zeroes with sprintf
longs <- data.frame(longdegmin = paste0(pisces$longdeg, "d", sprintf("%.1f", pisces$longmin), "W"))
latsdec <- within(lats, { # see https://stat.ethz.ch/pipermail/r-help/2010-August/249374.html
latdegmins <- do.call(rbind, strsplit(as.character(latdegmin), ".", fixed = TRUE))
latdec <- as.numeric(latdegmins[,1]) +
(as.numeric(latdegmins[,2]) + as.numeric(latdegmins[,3])/60)/60
rm(latdegmins)
})
longsdec <- within(longs, { # see https://stat.ethz.ch/pipermail/r-help/2010-August/249374.html
longdegmins <- do.call(rbind, strsplit(as.character(longdegmin), ".", fixed = TRUE))
longdec <- abs(as.numeric(longdegmins[,1])) +
(as.numeric(longdegmins[,2]) + as.numeric(longdegmins[,3])/60)/60
longdec = -longdec
rm(longdegmins)
})
googlecoords <- data.frame(LAKE = pisces$LAKE, lakename = pisces$lakename,
latdegminsec = lats$latdegmin, longdegminsec = longs$longdegmin,
latdecim = latsdec$latdec, longdecim = longsdec$longdec)
testing <- cbind(as.character(googlecoords$latdegminsec[1:3]),
as.character(googlecoords$longdegminsec[1:3]))
testing[,2] <- gsub("-", "", testing[,2])
testing[,1] <- paste(testing[,1], "N")
testing[,2] <- paste(testing[,2], "E")
char2dms(testing, chd = ".", chm = ".", chs = "")
write.csv(googlecoords, "data/googlecoords.csv")
coordinates(googlecoords) <- c("longdecim", "latdecim")
proj4string(googlecoords) <- CRS("+proj=longlat +datum=WGS84")
#then coordinates(dfProj) will give you back projected coordinates.
state.ll83 <- spTransform(states, CRS("+proj=longlat +ellps=GRS80"))
writeOGR(googlecoords, dsn = "data/testing.kml", layer = "wtf", driver = "KML")
head(pisces)
| /scripts/co2flux_spatial.R | no_license | ewiik/spatial | R | false | false | 2,668 | r | ## Read in and annotate spatial data set; from Cumming1995
## the spreadsheet we have is for late summer. but could also redo analysis for spring using
## Cumming1995
## units: nutrients; elements; DIC + DOC: mg/L; conductivity uS (assuming cm-1); note that
## specific conductance is reported in Cumming1995, and we are using conductivity;
## alt m;
## load necessary packages
library("sp")
library("rgdal")
## read in data; the csv is my revamp of the xls kerri sent; it now has lat longs and altitude
## of each site
if (!file.exists("../data/piscesallorgh.csv")) {
stop("Get piscesallorgh.csv from emma")
}
pisces <- read.csv("../data/piscesallorgh.csv")
## coordinates currently as lat longs in separate columns. need to make into decimal degrees
lats <- data.frame(latdegmin = paste0(pisces$latdeg,"d", sprintf("%.1f", pisces$latmin), '\\ "N"'))
# coerce all instances of even number minutes to have trailing zeroes with sprintf
longs <- data.frame(longdegmin = paste0(pisces$longdeg, "d", sprintf("%.1f", pisces$longmin), "W"))
latsdec <- within(lats, { # see https://stat.ethz.ch/pipermail/r-help/2010-August/249374.html
latdegmins <- do.call(rbind, strsplit(as.character(latdegmin), ".", fixed = TRUE))
latdec <- as.numeric(latdegmins[,1]) +
(as.numeric(latdegmins[,2]) + as.numeric(latdegmins[,3])/60)/60
rm(latdegmins)
})
longsdec <- within(longs, { # see https://stat.ethz.ch/pipermail/r-help/2010-August/249374.html
longdegmins <- do.call(rbind, strsplit(as.character(longdegmin), ".", fixed = TRUE))
longdec <- abs(as.numeric(longdegmins[,1])) +
(as.numeric(longdegmins[,2]) + as.numeric(longdegmins[,3])/60)/60
longdec = -longdec
rm(longdegmins)
})
googlecoords <- data.frame(LAKE = pisces$LAKE, lakename = pisces$lakename,
latdegminsec = lats$latdegmin, longdegminsec = longs$longdegmin,
latdecim = latsdec$latdec, longdecim = longsdec$longdec)
testing <- cbind(as.character(googlecoords$latdegminsec[1:3]),
as.character(googlecoords$longdegminsec[1:3]))
testing[,2] <- gsub("-", "", testing[,2])
testing[,1] <- paste(testing[,1], "N")
testing[,2] <- paste(testing[,2], "E")
char2dms(testing, chd = ".", chm = ".", chs = "")
write.csv(googlecoords, "data/googlecoords.csv")
coordinates(googlecoords) <- c("longdecim", "latdecim")
proj4string(googlecoords) <- CRS("+proj=longlat +datum=WGS84")
#then coordinates(dfProj) will give you back projected coordinates.
state.ll83 <- spTransform(states, CRS("+proj=longlat +ellps=GRS80"))
writeOGR(googlecoords, dsn = "data/testing.kml", layer = "wtf", driver = "KML")
head(pisces)
|
# Library
library(ukbtools)
library(tidyverse)
library(XML)
library(stringr)
library(rbgen)
| /munge/01_lib.R | permissive | sdufault15/breast-cancer | R | false | false | 93 | r | # Library
library(ukbtools)
library(tidyverse)
library(XML)
library(stringr)
library(rbgen)
|
#'
#' kernel2d.R
#'
#' Two-dimensional smoothing kernels
#'
#' $Revision: 1.11 $ $Date: 2016/11/13 01:54:57 $
#'
.Spatstat.2D.KernelTable <- list(
#' table entries:
#' d = density of standardised kernel
#' sd = standard deviation of x coordinate, for standardised kernel
#' hw = halfwidth of support of standardised kernel
gaussian=list(
d = function(x,y, ...) { dnorm(x) * dnorm(y) },
sd = 1,
hw = 8,
symmetric = TRUE),
epanechnikov=list(
d = function(x,y, ...) { (2/pi) * pmax(1 - (x^2+y^2), 0) },
sd = 1/sqrt(6),
hw = 1,
symmetric = TRUE),
quartic=list(
d = function(x,y, ...) { (3/pi) * pmax(1 - (x^2+y^2), 0)^2 },
sd = 1/sqrt(8),
hw = 1,
symmetric = TRUE),
disc=list(
d = function(x,y,...) { (1/pi) * as.numeric(x^2 + y^2 <= 1) },
sd = 1/2,
hw = 1,
symmetric = TRUE)
)
validate2Dkernel <- function(kernel, fatal=TRUE) {
if(is.character(match2DkernelName(kernel))) return(TRUE)
if(is.im(kernel) || is.function(kernel)) return(TRUE)
if(!fatal) return(FALSE)
if(is.character(kernel))
stop(paste("Unrecognised choice of kernel", sQuote(kernel),
paren(paste("options are",
commasep(sQuote(names(.Spatstat.2D.KernelTable)))))),
call.=FALSE)
stop(paste("kernel should be a character string,",
"a pixel image, or a function (x,y)"),
call.=FALSE)
}
match2DkernelName <- function(kernel) {
if(!is.character(kernel) || length(kernel) != 1) return(NULL)
nama <- names(.Spatstat.2D.KernelTable)
m <- pmatch(kernel, nama)
if(is.na(m)) return(NULL)
return(nama[m])
}
lookup2DkernelInfo <- function(kernel) {
validate2Dkernel(kernel)
kernel <- match2DkernelName(kernel)
if(is.null(kernel)) return(NULL)
return(.Spatstat.2D.KernelTable[[kernel]])
}
evaluate2Dkernel <- function(kernel, x, y, sigma=NULL, varcov=NULL, ...,
scalekernel=is.character(kernel)) {
info <- lookup2DkernelInfo(kernel)
if(scalekernel) {
## kernel adjustment factor
sdK <- if(is.character(kernel)) info$sd else 1
## transform coordinates to x',y' such that kerfun(x', y')
## yields density k(x,y) at desired bandwidth
if(is.null(varcov)) {
rr <- sdK/sigma
x <- x * rr
y <- y * rr
const <- rr^2
} else {
SinvH <- matrixinvsqrt(varcov)
rSinvH <- sdK * SinvH
XY <- cbind(x, y) %*% rSinvH
x <- XY[,1]
y <- XY[,2]
const <- det(rSinvH)
}
}
## now evaluate kernel
if(is.character(kernel)) {
kerfun <- info$d
result <- kerfun(x, y)
if(scalekernel)
result <- const * result
return(result)
}
if(is.function(kernel)) {
argh <- list(...)
if(length(argh) > 0)
argh <- argh[names(argh) %in% names(formals(kernel))]
result <- do.call(kernel, append(list(x, y), argh))
if(anyNA(result))
stop("NA values returned from kernel function")
if(length(result) != length(x))
stop("Kernel function returned the wrong number of values")
if(scalekernel)
result <- const * result
return(result)
}
if(is.im(kernel)) {
result <- kernel[list(x=x, y=y)]
if(anyNA(result))
stop("Domain of kernel image is not large enough")
return(result)
if(scalekernel)
result <- const * result
}
# never reached
stop("Unrecognised format for kernel")
}
| /R/kernel2d.R | no_license | mirca/spatstat | R | false | false | 3,438 | r | #'
#' kernel2d.R
#'
#' Two-dimensional smoothing kernels
#'
#' $Revision: 1.11 $ $Date: 2016/11/13 01:54:57 $
#'
.Spatstat.2D.KernelTable <- list(
#' table entries:
#' d = density of standardised kernel
#' sd = standard deviation of x coordinate, for standardised kernel
#' hw = halfwidth of support of standardised kernel
gaussian=list(
d = function(x,y, ...) { dnorm(x) * dnorm(y) },
sd = 1,
hw = 8,
symmetric = TRUE),
epanechnikov=list(
d = function(x,y, ...) { (2/pi) * pmax(1 - (x^2+y^2), 0) },
sd = 1/sqrt(6),
hw = 1,
symmetric = TRUE),
quartic=list(
d = function(x,y, ...) { (3/pi) * pmax(1 - (x^2+y^2), 0)^2 },
sd = 1/sqrt(8),
hw = 1,
symmetric = TRUE),
disc=list(
d = function(x,y,...) { (1/pi) * as.numeric(x^2 + y^2 <= 1) },
sd = 1/2,
hw = 1,
symmetric = TRUE)
)
validate2Dkernel <- function(kernel, fatal=TRUE) {
if(is.character(match2DkernelName(kernel))) return(TRUE)
if(is.im(kernel) || is.function(kernel)) return(TRUE)
if(!fatal) return(FALSE)
if(is.character(kernel))
stop(paste("Unrecognised choice of kernel", sQuote(kernel),
paren(paste("options are",
commasep(sQuote(names(.Spatstat.2D.KernelTable)))))),
call.=FALSE)
stop(paste("kernel should be a character string,",
"a pixel image, or a function (x,y)"),
call.=FALSE)
}
match2DkernelName <- function(kernel) {
if(!is.character(kernel) || length(kernel) != 1) return(NULL)
nama <- names(.Spatstat.2D.KernelTable)
m <- pmatch(kernel, nama)
if(is.na(m)) return(NULL)
return(nama[m])
}
lookup2DkernelInfo <- function(kernel) {
validate2Dkernel(kernel)
kernel <- match2DkernelName(kernel)
if(is.null(kernel)) return(NULL)
return(.Spatstat.2D.KernelTable[[kernel]])
}
evaluate2Dkernel <- function(kernel, x, y, sigma=NULL, varcov=NULL, ...,
scalekernel=is.character(kernel)) {
info <- lookup2DkernelInfo(kernel)
if(scalekernel) {
## kernel adjustment factor
sdK <- if(is.character(kernel)) info$sd else 1
## transform coordinates to x',y' such that kerfun(x', y')
## yields density k(x,y) at desired bandwidth
if(is.null(varcov)) {
rr <- sdK/sigma
x <- x * rr
y <- y * rr
const <- rr^2
} else {
SinvH <- matrixinvsqrt(varcov)
rSinvH <- sdK * SinvH
XY <- cbind(x, y) %*% rSinvH
x <- XY[,1]
y <- XY[,2]
const <- det(rSinvH)
}
}
## now evaluate kernel
if(is.character(kernel)) {
kerfun <- info$d
result <- kerfun(x, y)
if(scalekernel)
result <- const * result
return(result)
}
if(is.function(kernel)) {
argh <- list(...)
if(length(argh) > 0)
argh <- argh[names(argh) %in% names(formals(kernel))]
result <- do.call(kernel, append(list(x, y), argh))
if(anyNA(result))
stop("NA values returned from kernel function")
if(length(result) != length(x))
stop("Kernel function returned the wrong number of values")
if(scalekernel)
result <- const * result
return(result)
}
if(is.im(kernel)) {
result <- kernel[list(x=x, y=y)]
if(anyNA(result))
stop("Domain of kernel image is not large enough")
return(result)
if(scalekernel)
result <- const * result
}
# never reached
stop("Unrecognised format for kernel")
}
|
#This code is written for Predicting Fraud Txn
#Author: Zhe Consulting
#July 2017
#Load all the required librarires here
library(party)
library(rpart)
library(rpart.plot)
library(randomForest)
library(rattle)
library(caTools)
library(InformationValue)
library(ROCR)
library(e1071)
#clear the memory
rm(list=ls())
#load data
setwd("D:/Vaibhav/Zhe Consulting/Real Time Fraud Detection - Suraj PPH/FinalDelivery")
train <- read.csv('DataSetToBeUsed.csv', header = T)
#create training and validation data from given data
set.seed(88)
split <- sample.split(train$Fraud, SplitRatio = 0.75)
#get training and test data
trainData <- subset(train, split == TRUE)
testData <- subset(train, split == FALSE)
#The EDA is already done
#Here we build the logistic Regression model
#------------------------------------------------------------------
#------------------------------------------------------------------
#####logistic regression model
model <- glm (Fraud ~ ., data = trainData, family = binomial)
summary(model)
predict <- predict(model, type = 'response')
#predict
#confusion matrix on Train Data Set
table(trainData$Fraud, predict > 0.5)
predictTest <- predict(model,newdata=testData, type = 'response')
#confusion matrix on Test Data Set
table(testData$Fraud, predictTest > 0.5)
#0.27
optCutOff <- optimalCutoff(trainData$Fraud, predictTest)[1]
#optCutOff
#0.1256477
table(trainData$Fraud, predict > 0.1256)
table(testData$Fraud, predictTest > 0.1256)
#again with significant variables
model <- glm (Fraud ~ Status.of.existing.checking.account
+Duration.in.months
+Savings.account.bonds
+Present.employment.since
+Other.installment.plans
, data = trainData, family = binomial)
summary(model)
predict <- predict(model, type = 'response')
#confusion matrix
table(trainData$Fraud, predict > 0.5)
predictTest <- predict(model,newdata=testData, type = 'response')
table(testData$Fraud, predictTest > 0.5)
optCutOff <- optimalCutoff(trainData$Fraud, predictTest)[1]
optCutOff
#0.18874
table(trainData$Fraud, predict > 0.1887)
table(testData$Fraud, predictTest > 0.1887)
#ROCR Curve
ROCRpred <- prediction(predict, trainData$Fraud)
ROCRperf <- performance(ROCRpred, 'tpr','fpr')
plot(ROCRperf, col = "green", lty=2)
ROCRpredTest <- prediction(predictTest, testData$Fraud)
ROCRperfTest <- performance(ROCRpredTest, 'tpr','fpr')
plot(ROCRperfTest, col = "red", lty=2)
plot(ROCRperf,main="ROC Curve",col="green")
par(new=TRUE)
plot(ROCRperfTest,col="red",lty=2)
legend("bottomright", c("Train","Test"), cex=0.8,
col=c("green","red"),lty=1:2)
#AUC
performance (ROCRpred,"auc")
performance (ROCRpredTest,"auc")
#Lift Chart
# Plot the lift chart.
lifttrain<- performance(ROCRpred, "lift", "rpp")
lifttest<- performance(ROCRpredTest, "lift", "rpp")
# Plot the lift chart.
plot(lifttrain, col="green", lty=1, xlab="Caseload (%)", add=FALSE,main="Lift Chart")
par(new=TRUE)
plot(lifttest, col="red", lty=2, xlab="Caseload (%)", add=FALSE)
legend("topright",c("Train","Test"), cex=0.8,
col=c("green","red"),lty=1:2)
#logistic regression ends here
#-------------------------------------------------------------------------------------------
###apply decision tree here
rtree_fit <- rpart(Fraud ~ .,method= "class", data= trainData)
print(rtree_fit)
printcp(rtree_fit) # display the results
plotcp(rtree_fit) # visualize cross-validation results
summary(rtree_fit) # detailed summary of splits
# plot tree
plot(rtree_fit, uniform=TRUE,
main="Classification Tree for Fraud Detection")
text(rtree_fit, use.n=TRUE, all=TRUE, cex=.8)
fancyRpartPlot(rtree_fit)
predTrain <- predict(rtree_fit, newdata=trainData, type="class")
pred.probTrain <- predict(rtree_fit, newdata=trainData, type="prob")
table(predTrain, trainData$Fraud)
predTest <- predict(rtree_fit, newdata=testData, type="class")
pred.probTest <- predict(rtree_fit, newdata=testData, type="prob")
table(predTest, testData$Fraud)
#signiicant variables
rtree_fit <- rpart(Fraud ~ Status.of.existing.checking.account
+Duration.in.months
+Savings.account.bonds
+Purpose
, trainData)
printcp(rtree_fit) # display the results
plotcp(rtree_fit) # visualize cross-validation results
summary(rtree_fit) # detailed summary of splits
# plot tree
plot(rtree_fit, uniform=TRUE,
main="Classification Tree for Fraud Detection")
text(rtree_fit, use.n=TRUE, all=TRUE, cex=.8)
fancyRpartPlot(rtree_fit)
pred <- predict(rtree_fit, newdata=trainData, type="class")
pred.prob <- predict(rtree_fit, newdata=trainData, type="prob")
table(pred, trainData$Fraud)
pred <- predict(rtree_fit, newdata=testData, type="class")
pred.prob <- predict(rtree_fit, newdata=testData, type="prob")
table(pred, testData$Fraud)
########################
#######################Decion Tree Ends Here
#random forest starts here
randomModel <- randomForest(Fraud~., data=trainData)
pred <- predict(randomModel, newdata=trainData, type="class")
pred.prob <- predict(randomModel, newdata=trainData, type="prob")
summary(randomModel)
table(pred, trainData$Fraud)
pred <- predict(randomModel, newdata=testData, type="class")
pred.prob <- predict(randomModel, newdata=testData, type="prob")
table(pred, testData$Fraud)
##making with soem selected variables
randomModel <- randomForest(Fraud~Status.of.existing.checking.account
+Credit.History+Savings.account.bonds
+Present.employment.since
+Other.installment.plans, data=trainData
,ntrees=1000, cutoff = c(0.7,1-0.7))
pred <- predict(randomModel, newdata=trainData, type="class")
pred.prob <- predict(randomModel, newdata=trainData, type="prob")
table(pred, trainData$Fraud)
pred <- predict(randomModel, newdata=testData, type="class")
pred.prob <- predict(randomModel, newdata=testData, type="prob")
table(pred, testData$Fraud)
##########################################
##Develop SVM Model
svm.model <- svm(Fraud ~ ., data = trainData, cost = 100, gamma = 1)
svm.pred <- predict(svm.model, trainData)
y<-trainData$Fraud
table(svm.pred,y)
svm.pred <- predict(svm.model, testData)
y<-testData$Fraud
table(svm.pred,y)
#tune the model
tuneResult <- tune(svm, Fraud ~ ., data = trainData,
ranges = list(epsilon = seq(0,1,0.1), cost = 2^(2:9))
)
print(tuneResult)
plot(tuneResult)
tunedModel <- tuneResult$best.model
tunedModelY <- predict(tunedModel, trainData)
y<-trainData$Fraud
table(tunedModelY,y)
tunedModelY <- predict(tunedModel, testData)
y<-testData$Fraud
table(tunedModelY,y)
################################
| /PredictionCode.R | no_license | srikar156/R-Codes | R | false | false | 7,017 | r |
#This code is written for Predicting Fraud Txn
#Author: Zhe Consulting
#July 2017
#Load all the required librarires here
library(party)
library(rpart)
library(rpart.plot)
library(randomForest)
library(rattle)
library(caTools)
library(InformationValue)
library(ROCR)
library(e1071)
#clear the memory
rm(list=ls())
#load data
setwd("D:/Vaibhav/Zhe Consulting/Real Time Fraud Detection - Suraj PPH/FinalDelivery")
train <- read.csv('DataSetToBeUsed.csv', header = T)
#create training and validation data from given data
set.seed(88)
split <- sample.split(train$Fraud, SplitRatio = 0.75)
#get training and test data
trainData <- subset(train, split == TRUE)
testData <- subset(train, split == FALSE)
#The EDA is already done
#Here we build the logistic Regression model
#------------------------------------------------------------------
#------------------------------------------------------------------
#####logistic regression model
model <- glm (Fraud ~ ., data = trainData, family = binomial)
summary(model)
predict <- predict(model, type = 'response')
#predict
#confusion matrix on Train Data Set
table(trainData$Fraud, predict > 0.5)
predictTest <- predict(model,newdata=testData, type = 'response')
#confusion matrix on Test Data Set
table(testData$Fraud, predictTest > 0.5)
#0.27
optCutOff <- optimalCutoff(trainData$Fraud, predictTest)[1]
#optCutOff
#0.1256477
table(trainData$Fraud, predict > 0.1256)
table(testData$Fraud, predictTest > 0.1256)
#again with significant variables
model <- glm (Fraud ~ Status.of.existing.checking.account
+Duration.in.months
+Savings.account.bonds
+Present.employment.since
+Other.installment.plans
, data = trainData, family = binomial)
summary(model)
predict <- predict(model, type = 'response')
#confusion matrix
table(trainData$Fraud, predict > 0.5)
predictTest <- predict(model,newdata=testData, type = 'response')
table(testData$Fraud, predictTest > 0.5)
optCutOff <- optimalCutoff(trainData$Fraud, predictTest)[1]
optCutOff
#0.18874
table(trainData$Fraud, predict > 0.1887)
table(testData$Fraud, predictTest > 0.1887)
#ROCR Curve
ROCRpred <- prediction(predict, trainData$Fraud)
ROCRperf <- performance(ROCRpred, 'tpr','fpr')
plot(ROCRperf, col = "green", lty=2)
ROCRpredTest <- prediction(predictTest, testData$Fraud)
ROCRperfTest <- performance(ROCRpredTest, 'tpr','fpr')
plot(ROCRperfTest, col = "red", lty=2)
plot(ROCRperf,main="ROC Curve",col="green")
par(new=TRUE)
plot(ROCRperfTest,col="red",lty=2)
legend("bottomright", c("Train","Test"), cex=0.8,
col=c("green","red"),lty=1:2)
#AUC
performance (ROCRpred,"auc")
performance (ROCRpredTest,"auc")
#Lift Chart
# Plot the lift chart.
lifttrain<- performance(ROCRpred, "lift", "rpp")
lifttest<- performance(ROCRpredTest, "lift", "rpp")
# Plot the lift chart.
plot(lifttrain, col="green", lty=1, xlab="Caseload (%)", add=FALSE,main="Lift Chart")
par(new=TRUE)
plot(lifttest, col="red", lty=2, xlab="Caseload (%)", add=FALSE)
legend("topright",c("Train","Test"), cex=0.8,
col=c("green","red"),lty=1:2)
#logistic regression ends here
#-------------------------------------------------------------------------------------------
###apply decision tree here
rtree_fit <- rpart(Fraud ~ .,method= "class", data= trainData)
print(rtree_fit)
printcp(rtree_fit) # display the results
plotcp(rtree_fit) # visualize cross-validation results
summary(rtree_fit) # detailed summary of splits
# plot tree
plot(rtree_fit, uniform=TRUE,
main="Classification Tree for Fraud Detection")
text(rtree_fit, use.n=TRUE, all=TRUE, cex=.8)
fancyRpartPlot(rtree_fit)
predTrain <- predict(rtree_fit, newdata=trainData, type="class")
pred.probTrain <- predict(rtree_fit, newdata=trainData, type="prob")
table(predTrain, trainData$Fraud)
predTest <- predict(rtree_fit, newdata=testData, type="class")
pred.probTest <- predict(rtree_fit, newdata=testData, type="prob")
table(predTest, testData$Fraud)
#signiicant variables
rtree_fit <- rpart(Fraud ~ Status.of.existing.checking.account
+Duration.in.months
+Savings.account.bonds
+Purpose
, trainData)
printcp(rtree_fit) # display the results
plotcp(rtree_fit) # visualize cross-validation results
summary(rtree_fit) # detailed summary of splits
# plot tree
plot(rtree_fit, uniform=TRUE,
main="Classification Tree for Fraud Detection")
text(rtree_fit, use.n=TRUE, all=TRUE, cex=.8)
fancyRpartPlot(rtree_fit)
pred <- predict(rtree_fit, newdata=trainData, type="class")
pred.prob <- predict(rtree_fit, newdata=trainData, type="prob")
table(pred, trainData$Fraud)
pred <- predict(rtree_fit, newdata=testData, type="class")
pred.prob <- predict(rtree_fit, newdata=testData, type="prob")
table(pred, testData$Fraud)
########################
#######################Decion Tree Ends Here
#random forest starts here
randomModel <- randomForest(Fraud~., data=trainData)
pred <- predict(randomModel, newdata=trainData, type="class")
pred.prob <- predict(randomModel, newdata=trainData, type="prob")
summary(randomModel)
table(pred, trainData$Fraud)
pred <- predict(randomModel, newdata=testData, type="class")
pred.prob <- predict(randomModel, newdata=testData, type="prob")
table(pred, testData$Fraud)
##making with soem selected variables
randomModel <- randomForest(Fraud~Status.of.existing.checking.account
+Credit.History+Savings.account.bonds
+Present.employment.since
+Other.installment.plans, data=trainData
,ntrees=1000, cutoff = c(0.7,1-0.7))
pred <- predict(randomModel, newdata=trainData, type="class")
pred.prob <- predict(randomModel, newdata=trainData, type="prob")
table(pred, trainData$Fraud)
pred <- predict(randomModel, newdata=testData, type="class")
pred.prob <- predict(randomModel, newdata=testData, type="prob")
table(pred, testData$Fraud)
##########################################
##Develop SVM Model
svm.model <- svm(Fraud ~ ., data = trainData, cost = 100, gamma = 1)
svm.pred <- predict(svm.model, trainData)
y<-trainData$Fraud
table(svm.pred,y)
svm.pred <- predict(svm.model, testData)
y<-testData$Fraud
table(svm.pred,y)
#tune the model
tuneResult <- tune(svm, Fraud ~ ., data = trainData,
ranges = list(epsilon = seq(0,1,0.1), cost = 2^(2:9))
)
print(tuneResult)
plot(tuneResult)
tunedModel <- tuneResult$best.model
tunedModelY <- predict(tunedModel, trainData)
y<-trainData$Fraud
table(tunedModelY,y)
tunedModelY <- predict(tunedModel, testData)
y<-testData$Fraud
table(tunedModelY,y)
################################
|
## This pair of functions is used to cache the inverse of a square matrix.
## They allow to save time by looking up cache rather than recomputing.
## The function "makeCacheMatrix" creates a cacheable matrix used as an input
## into the "cacheSolve" function.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
## Set value of matrix
set <- function(y) {
x <<- y
m <<- NULL
}
## Get value of matrix
get <- function() x
## Set value of inverse matrix
setinv <- function(solve) m <<- solve
## Get value of inverse matrix
getinv <- function() m
## Create input for cacheSolve (cacheable matrix)
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## "cacheSolve" function uses the output of the previous "makeCacheMatrix"
## function to compute the inverse of the original matrix.
cacheSolve <- function(x, ...) {
m <- x$getinv()
## Check if the inverse matrix is available, if so skips calculation
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## Create an inverse matrix if not available
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
| /cachematrix.R | no_license | eegupova/ProgrammingAssignment2 | R | false | false | 1,233 | r | ## This pair of functions is used to cache the inverse of a square matrix.
## They allow to save time by looking up cache rather than recomputing.
## The function "makeCacheMatrix" creates a cacheable matrix used as an input
## into the "cacheSolve" function.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
## Set value of matrix
set <- function(y) {
x <<- y
m <<- NULL
}
## Get value of matrix
get <- function() x
## Set value of inverse matrix
setinv <- function(solve) m <<- solve
## Get value of inverse matrix
getinv <- function() m
## Create input for cacheSolve (cacheable matrix)
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## "cacheSolve" function uses the output of the previous "makeCacheMatrix"
## function to compute the inverse of the original matrix.
cacheSolve <- function(x, ...) {
m <- x$getinv()
## Check if the inverse matrix is available, if so skips calculation
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## Create an inverse matrix if not available
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
## first R script for getting and cleaning data | /first_r_script.R | no_license | niloynibhochaudhury/coursera_getting_and_cleaning_data | R | false | false | 47 | r | ## first R script for getting and cleaning data |
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 48766
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 48765
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 48765
c
c Input Parameter (command line, file):
c input filename QBFLIB/Sauer-Reimer/ITC99/b22_PR_4_90.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 16791
c no.of clauses 48766
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 48765
c
c QBFLIB/Sauer-Reimer/ITC99/b22_PR_4_90.qdimacs 16791 48766 E1 [1] 0 332 16401 48765 RED
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Sauer-Reimer/ITC99/b22_PR_4_90/b22_PR_4_90.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 719 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 48766
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 48765
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 48765
c
c Input Parameter (command line, file):
c input filename QBFLIB/Sauer-Reimer/ITC99/b22_PR_4_90.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 16791
c no.of clauses 48766
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 48765
c
c QBFLIB/Sauer-Reimer/ITC99/b22_PR_4_90.qdimacs 16791 48766 E1 [1] 0 332 16401 48765 RED
|
###########################################################################
# #
# Computing Lab Project: Churn prediction project #
# #
###########################################################################
#-------------------------------------------------------------------------#
#
# December 18
# Jordi
#
#-------------------------------------------------------------------------#
# Load libraries.
library("caret")
library("randomForest")
library("pROC")
library("doMC")
# Load source files.
source("./Scripts/io.R")
source("./Scripts/classDistribution.R")
# Set seed.
set.seed(321)
# Set path to data set.
path <- "./Data/churn.csv"
# Set column names
column.names <- c("Cookie", "Time_Stamp",
"Active_d1","Active_d2","Active_d3","Active_d4","Active_d5", "Active_d6", "Active_d7",
"Dwell_d1", "Dwell_d2","Dwell_d3","Dwell_d4","Dwell_d5","Dwell_d6","Dwell_d7",
"Sessions_d1","Sessions_d2","Sessions_d3","Sessions_d4","Sessions_d5","Sessions_d6","Sessions_d7",
"Views_d1","Views_d2","Views_d3","Views_d4","Views_d5","Views_d6","Views_d7",
"Clicks_d1", "Clicks_d2", "Clicks_d3","Clicks_d4","Clicks_d5","Clicks_d6","Clicks_d7",
"Cluster")
# Count number of lines in input file.
lines <- readChar(path, file.info(path)$size)
total.rows <- length(gregexpr("\n",lines)[[1L]])
rm(lines)
# Load data.
df <- load.data(p.path = path,
p.header = TRUE,
p.dec = ".",
p.sep = ",",
p.blank.lines.skip = TRUE,
p.stringsAsFactors = FALSE,
p.comment.char = "",
p.initial.rows = 100,
p.total.nrows = total.rows,
p.column.names = column.names,
p.id = FALSE)
# Start stop-watch
start.time <- as.numeric(as.POSIXct(Sys.time()))
# Remove missing cases.
df <- df[complete.cases(df),]
# Tranform the class values to factors.
df$Cluster <- as.factor(df$Cluster)
# Feature engineering.
# Get mean and standard deviation for each user.
# I tried to engineer the features in order to check if the user is a heavy/light user via mean
# and i he/she is a consistent/sporadic user via the standard deviation. The reason behind is that
# a heavy user with consistent use will be less likely to churn.
# -----------------------------------------------------------------------------------------------------#
# I also tried to capture the trend of the user over the 7 observed days fitting a line to value vs day and
# returning the line slope. However the results were worse than the ones obtained with the finally
# selected features. See an example below
#lin_reg <- function(d1,d2,d3,d4,d5,d6,d7){
# x <- c(1:7)
# y <- c(d1,d2,d3,d4,d5,d6,d7)
# model <- lm(y~x)
#
# return(summary(model)$coefficients[2])
#}
#
#df$dwell_trend <- pmap_dbl(select(df,Dwell_d1:Dwell_d7),~lin_reg(..1,..2,..3,..4,..5,..6,..7))
# -----------------------------------------------------------------------------------------------------#
df$active_sd <- pmap_dbl(select(df,Active_d1:Active_d7),~sd(c(..1,..2,..3,..4,..5,..6,..7)))
df$active_m <- pmap_dbl(select(df,Active_d1:Active_d7),~mean(c(..1,..2,..3,..4,..5,..6,..7)))
df$dwell_m <- pmap_dbl(select(df,Dwell_d1:Dwell_d7),~mean(c(..1,..2,..3,..4,..5,..6,..7)))
df$dwell_sd <- pmap_dbl(select(df,Dwell_d1:Dwell_d7),~sd(c(..1,..2,..3,..4,..5,..6,..7)))
df$sessions_m <- pmap_dbl(select(df,Sessions_d1:Sessions_d7),~mean(c(..1,..2,..3,..4,..5,..6,..7)))
df$sessions_sd <- pmap_dbl(select(df,Sessions_d1:Sessions_d7),~sd(c(..1,..2,..3,..4,..5,..6,..7)))
df$views_m <- pmap_dbl(select(df,Views_d1:Views_d7),~mean(c(..1,..2,..3,..4,..5,..6,..7)))
df$views_sd <- pmap_dbl(select(df,Views_d1:Views_d7),~sd(c(..1,..2,..3,..4,..5,..6,..7)))
df$clicks_m <- pmap_dbl(select(df,Clicks_d1:Clicks_d7),~mean(c(..1,..2,..3,..4,..5,..6,..7)))
df$clicks_sd <- pmap_dbl(select(df,Clicks_d1:Clicks_d7),~sd(c(..1,..2,..3,..4,..5,..6,..7)))
# Set the class.
class <- length(df)
# Perform stratified bootstrapping (keep 70% of observations for training and 30% for testing).
indices.training <- createDataPartition(df[,class],
times = 1,
p = .70,
list = FALSE)
# Get training and test set.
training <- df[indices.training[,1],]
test <- df[-indices.training[,1],]
# Print class distribution.
cat("\n\n")
classDistribution(dataset.name = "df",
table = df,
class = class-10)
classDistribution(dataset.name = "training",
table = training,
class = class-10)
classDistribution(dataset.name = "test",
table = test,
class = class-10)
# Setting the formula to introduce to the xgBoost.
formula <- as.formula(paste("Cluster ~", paste(names(df)[(ncol(df)-8):ncol(df)], collapse = '+')))
# Tuned parameters (Tunning grid commented)
xgboostGrid <- expand.grid(nrounds = 6,
#nrounds = seq(5,10,1),
eta = 0.2,
#eta = c(0.1,0.2),
gamma = 1,
#gamma = c(0.8,0.9,1),
colsample_bytree = 0.7,
#colsample_bytree = c(0.5,0.7,1.0),
max_depth = 4,
#max_depth = c(2,4,6),
min_child_weight = 5,
#min_child_weight = seq(1,8,1),
subsample = 1)
xgboostControl = trainControl(method = "cv",
number = 10,
classProbs = TRUE,
search = "grid",
allowParallel = TRUE)
#Number of threads paralellised computing
i = 4
# Model training
model.training <- train(formula,
data = training,
method = "xgbTree",
trControl = xgboostControl,
tuneGrid = xgboostGrid,
verbose = TRUE,
metric = "Accuracy",
nthread = i)
# Stop stop-watch
end.time <- as.numeric(as.POSIXct(Sys.time()))
print(c("Elapsed time: ",round(end.time-start.time,4), "seconds"),quote=FALSE)
# Print training results
model.training
model.training$results
# Predicting test fold class (30% remaining data)
model.test.pred <- predict(model.training,
test,
type = "raw",
norm.votes = TRUE)
# Predicting test fold probability (30% remaining data)
model.test.prob <- predict(model.training,
test,
type = "prob",
norm.votes = TRUE)
# Print confusion matrix
performance <- confusionMatrix(model.test.pred, test$Cluster)
print(performance)
print(performance$byClass)
# Compute AUC for the model.
model.roc <- plot.roc(predictor = model.test.prob[,2],
test$Cluster,
levels = rev(levels(test$Cluster)),
legacy.axes = FALSE,
percent = TRUE,
mar = c(4.1,4.1,0.2,0.3),
identity.col = "red",
identity.lwd = 2,
smooth = FALSE,
ci = TRUE,
print.auc = TRUE,
auc.polygon.border=NULL,
lwd = 2,
cex.lab = 2.0,
cex.axis = 1.6,
font.lab = 2,
font.axis = 2,
col = "blue")
# Compute and plot confidence interval for ROC curve
ciobj <- ci.se(model.roc, specificities = seq(0, 100, 5))
plot(ciobj, type = "shape", col = "#1c61b6AA")
plot(ci(model.roc, of = "thresholds", thresholds = "best"))
| /Jordi_Morera_Churn_Prediction.r | no_license | jrdmose/BGSE_Computing_Lab | R | false | false | 8,246 | r | ###########################################################################
# #
# Computing Lab Project: Churn prediction project #
# #
###########################################################################
#-------------------------------------------------------------------------#
#
# December 18
# Jordi
#
#-------------------------------------------------------------------------#
# Load libraries.
library("caret")
library("randomForest")
library("pROC")
library("doMC")
# Load source files.
source("./Scripts/io.R")
source("./Scripts/classDistribution.R")
# Set seed.
set.seed(321)
# Set path to data set.
path <- "./Data/churn.csv"
# Set column names
column.names <- c("Cookie", "Time_Stamp",
"Active_d1","Active_d2","Active_d3","Active_d4","Active_d5", "Active_d6", "Active_d7",
"Dwell_d1", "Dwell_d2","Dwell_d3","Dwell_d4","Dwell_d5","Dwell_d6","Dwell_d7",
"Sessions_d1","Sessions_d2","Sessions_d3","Sessions_d4","Sessions_d5","Sessions_d6","Sessions_d7",
"Views_d1","Views_d2","Views_d3","Views_d4","Views_d5","Views_d6","Views_d7",
"Clicks_d1", "Clicks_d2", "Clicks_d3","Clicks_d4","Clicks_d5","Clicks_d6","Clicks_d7",
"Cluster")
# Count number of lines in input file.
lines <- readChar(path, file.info(path)$size)
total.rows <- length(gregexpr("\n",lines)[[1L]])
rm(lines)
# Load data.
df <- load.data(p.path = path,
p.header = TRUE,
p.dec = ".",
p.sep = ",",
p.blank.lines.skip = TRUE,
p.stringsAsFactors = FALSE,
p.comment.char = "",
p.initial.rows = 100,
p.total.nrows = total.rows,
p.column.names = column.names,
p.id = FALSE)
# Start stop-watch
start.time <- as.numeric(as.POSIXct(Sys.time()))
# Remove missing cases.
df <- df[complete.cases(df),]
# Tranform the class values to factors.
df$Cluster <- as.factor(df$Cluster)
# Feature engineering.
# Get mean and standard deviation for each user.
# I tried to engineer the features in order to check if the user is a heavy/light user via mean
# and i he/she is a consistent/sporadic user via the standard deviation. The reason behind is that
# a heavy user with consistent use will be less likely to churn.
# -----------------------------------------------------------------------------------------------------#
# I also tried to capture the trend of the user over the 7 observed days fitting a line to value vs day and
# returning the line slope. However the results were worse than the ones obtained with the finally
# selected features. See an example below
#lin_reg <- function(d1,d2,d3,d4,d5,d6,d7){
# x <- c(1:7)
# y <- c(d1,d2,d3,d4,d5,d6,d7)
# model <- lm(y~x)
#
# return(summary(model)$coefficients[2])
#}
#
#df$dwell_trend <- pmap_dbl(select(df,Dwell_d1:Dwell_d7),~lin_reg(..1,..2,..3,..4,..5,..6,..7))
# -----------------------------------------------------------------------------------------------------#
df$active_sd <- pmap_dbl(select(df,Active_d1:Active_d7),~sd(c(..1,..2,..3,..4,..5,..6,..7)))
df$active_m <- pmap_dbl(select(df,Active_d1:Active_d7),~mean(c(..1,..2,..3,..4,..5,..6,..7)))
df$dwell_m <- pmap_dbl(select(df,Dwell_d1:Dwell_d7),~mean(c(..1,..2,..3,..4,..5,..6,..7)))
df$dwell_sd <- pmap_dbl(select(df,Dwell_d1:Dwell_d7),~sd(c(..1,..2,..3,..4,..5,..6,..7)))
df$sessions_m <- pmap_dbl(select(df,Sessions_d1:Sessions_d7),~mean(c(..1,..2,..3,..4,..5,..6,..7)))
df$sessions_sd <- pmap_dbl(select(df,Sessions_d1:Sessions_d7),~sd(c(..1,..2,..3,..4,..5,..6,..7)))
df$views_m <- pmap_dbl(select(df,Views_d1:Views_d7),~mean(c(..1,..2,..3,..4,..5,..6,..7)))
df$views_sd <- pmap_dbl(select(df,Views_d1:Views_d7),~sd(c(..1,..2,..3,..4,..5,..6,..7)))
df$clicks_m <- pmap_dbl(select(df,Clicks_d1:Clicks_d7),~mean(c(..1,..2,..3,..4,..5,..6,..7)))
df$clicks_sd <- pmap_dbl(select(df,Clicks_d1:Clicks_d7),~sd(c(..1,..2,..3,..4,..5,..6,..7)))
# Set the class.
class <- length(df)
# Perform stratified bootstrapping (keep 70% of observations for training and 30% for testing).
indices.training <- createDataPartition(df[,class],
times = 1,
p = .70,
list = FALSE)
# Get training and test set.
training <- df[indices.training[,1],]
test <- df[-indices.training[,1],]
# Print class distribution.
cat("\n\n")
classDistribution(dataset.name = "df",
table = df,
class = class-10)
classDistribution(dataset.name = "training",
table = training,
class = class-10)
classDistribution(dataset.name = "test",
table = test,
class = class-10)
# Setting the formula to introduce to the xgBoost.
formula <- as.formula(paste("Cluster ~", paste(names(df)[(ncol(df)-8):ncol(df)], collapse = '+')))
# Tuned parameters (Tunning grid commented)
xgboostGrid <- expand.grid(nrounds = 6,
#nrounds = seq(5,10,1),
eta = 0.2,
#eta = c(0.1,0.2),
gamma = 1,
#gamma = c(0.8,0.9,1),
colsample_bytree = 0.7,
#colsample_bytree = c(0.5,0.7,1.0),
max_depth = 4,
#max_depth = c(2,4,6),
min_child_weight = 5,
#min_child_weight = seq(1,8,1),
subsample = 1)
xgboostControl = trainControl(method = "cv",
number = 10,
classProbs = TRUE,
search = "grid",
allowParallel = TRUE)
#Number of threads paralellised computing
i = 4
# Model training
model.training <- train(formula,
data = training,
method = "xgbTree",
trControl = xgboostControl,
tuneGrid = xgboostGrid,
verbose = TRUE,
metric = "Accuracy",
nthread = i)
# Stop stop-watch
end.time <- as.numeric(as.POSIXct(Sys.time()))
print(c("Elapsed time: ",round(end.time-start.time,4), "seconds"),quote=FALSE)
# Print training results
model.training
model.training$results
# Predicting test fold class (30% remaining data)
model.test.pred <- predict(model.training,
test,
type = "raw",
norm.votes = TRUE)
# Predicting test fold probability (30% remaining data)
model.test.prob <- predict(model.training,
test,
type = "prob",
norm.votes = TRUE)
# Print confusion matrix
performance <- confusionMatrix(model.test.pred, test$Cluster)
print(performance)
print(performance$byClass)
# Compute AUC for the model.
model.roc <- plot.roc(predictor = model.test.prob[,2],
test$Cluster,
levels = rev(levels(test$Cluster)),
legacy.axes = FALSE,
percent = TRUE,
mar = c(4.1,4.1,0.2,0.3),
identity.col = "red",
identity.lwd = 2,
smooth = FALSE,
ci = TRUE,
print.auc = TRUE,
auc.polygon.border=NULL,
lwd = 2,
cex.lab = 2.0,
cex.axis = 1.6,
font.lab = 2,
font.axis = 2,
col = "blue")
# Compute and plot confidence interval for ROC curve
ciobj <- ci.se(model.roc, specificities = seq(0, 100, 5))
plot(ciobj, type = "shape", col = "#1c61b6AA")
plot(ci(model.roc, of = "thresholds", thresholds = "best"))
|
# ---- Libraries ----
library(tidyr)
library(dplyr)
library(ggplot2) ; theme_set(theme_bw())
library(devtools)
library(profvis)
# Load (and download if needed) my libraries
use.local.GI <- TRUE
if(use.local.GI){
library(GI, lib.loc = './lib')
}
if(!use.local.GI){
lib.GI <- try(library(GI))
if(class(lib.GI)=='try-error'){
install_github("davidchampredon/GI",
build_vignettes = FALSE, force=TRUE)
library(GI)
}
}
lib.seminribm <- try(library(seminribm))
if(class(lib.seminribm)=='try-error'){
install_github("davidchampredon/seminribm",
build_vignettes = FALSE, force=TRUE)
library(seminribm)
}
set.seed(1234)
# ---- Generate data from an individual-based model ----
horizon <- 100
popSize <- 2e4 # warning: above 5e3 takes long time if nE and nI large!
initInfectious <- 2
R0 <- 3.0
latent_mean <- 2
infectious_mean <- 4
nE <- 1
nI <- 1
calc_WIW_Re <- FALSE
doExact <- FALSE
timeStepTauLeap <- 0.1
rnd_seed <- 1234
gi.mean.true <- latent_mean + (nI+1)/2/nI * infectious_mean
target.val <- c(R0, gi.mean.true)
# See: ?seminribm_run
sim <- seminribm_run(horizon,
popSize ,
R0 ,
latent_mean ,
infectious_mean,
nE ,
nI ,
initInfectious ,
doExact ,
timeStepTauLeap,
rnd_seed ,
calc_WIW_Re)
# Retrieve backward generation intervals from simulation:
gi.true <- sim$GI_bck
at <- sim$acq_times
df <- data.frame(at=at, gi.true=gi.true, rt = round(at))
df2 <- df %>%
group_by(rt) %>%
summarise(bb = mean(gi.true))
# ---- Sampled imperfectly observed GIs ----
# We assume that :
# - not all GIs are observed
# - there is an observation error
# Sample the GIs observed:
prop.observed <- 0.999 # proportion of GIs observed
n.obs <- min(length(gi.true), round(prop.observed*popSize) ) # number of bckwd GIs observed
idx.obs <- sample(x = 1:length(gi.true), size = n.obs, replace = FALSE)
gi.obs.true <- gi.true[idx.obs]
at.obs <- at[idx.obs]
# Add observation error:
sd.err <- 0.001
gi.obs <- rnorm(n = n.obs, mean = gi.obs.true, sd = sd.err)
gi.obs[gi.obs<1] <- 1
gi.obs <- round(gi.obs)
df.gi.obs <- data.frame(t = at.obs,
gi.obs.true = gi.obs.true,
gi.obs = gi.obs)
df.gi.obs <- df.gi.obs[order(df.gi.obs$t),]
# Visualize 'true' vs observed:
plot(gi.obs.true, gi.obs, las=1,
main = 'observation error for backward GI')
grid()
abline(a = 0,b=1, lty=2)
# plot observed GIs as function of infectee's acquisition time:
df.b <- data.frame(at.obs, gi.obs) %>%
mutate(rt = round(at.obs))
df.b2 <- df.b %>%
group_by(rt) %>%
summarise(gi.obs_mean = mean(gi.obs))
df.b2%>%
ggplot(aes(x=rt,y=gi.obs_mean)) +
geom_point(data = df.b,
aes(x=at.obs,y=gi.obs),
alpha=0.15, colour="orange",
pch=16,size=4) +
geom_abline(slope = 1, intercept = 0, linetype=2, colour = "grey")+
geom_line(size=1.5) +
geom_point(size=2) +
ggtitle('Backward Generation Intervals (line: daily mean)')+
xlab('calendar time')+ylab('days')
# ---- Fit model from GIs ----
fxd.prm.resude <- list(horizon=horizon,
alpha=0,
kappa=0,
GI_span = 20,
GI_var = 5,
GI_type = 'pois',
dt = 1.0)
fxd.prm.seminr <- list(horizon=horizon,
nE=nE,
nI=nI,
latent_mean=latent_mean,
dt = 0.5)
R0.rng <- seq(1.5, 6, by=0.25)
gimean.rng <- seq(2, 10, by=0.5)
CI <- 0.90
do.plot <- TRUE
# See: ?gi_ct_fit
if(FALSE){
fit.resude <- gi_ct_fit(t.obs = at.obs,
gi.obs = gi.obs,
model.epi = 'resude',
fxd.prm = fxd.prm.resude,
R0.rng = R0.rng,
gimean.rng = gimean.rng,
CI = CI,
do.plot = do.plot,
R0.true = R0,
gimean.true = gi.mean.true)
}
if(TRUE){
# STOPPED HERE
# Tidy everything.
# Fit with SEmInR only, as the data were generated with this model.
# (although there is a link with ReSuDe)
# Make a function of all this, with the goal of increasing
# 'fit.first.n.obs' to show how the fit is better as it increases.
#
# Also, try to speed up further... (although running on HPC should be quick).
cal.t.bck <- 1:horizon
z.true.det <- GI.seminr(latent_mean = latent_mean,
infectious_mean = infectious_mean,
R0 = R0,
nE = nE,
nI = nI,
cal.times.fwdbck = cal.t.bck,
horizon = horizon,
calc.fwd = FALSE)
# Fit on only the n first observations:
fit.first.n.obs <- 15 # (30: 8min on 4 cpus)
gi.1st.obs <- df.gi.obs[df.gi.obs$t< fit.first.n.obs,]
gi.1st.obs$t.obs <- round(gi.1st.obs$t)
# Fitting to observed bckwd GIs:
fit.seminr <- gi_ct_fit(t.obs = gi.1st.obs$t.obs, # at.obs,cal.t.bck, #
gi.obs = gi.1st.obs$gi.obs, # gi.obs,round(z.true.det$bck.mean), #
model.epi = 'seminr',
fxd.prm = fxd.prm.seminr,
R0.rng = R0.rng,
gimean.rng = gimean.rng,
CI = CI,
R0.true = R0,
gimean.true = gi.mean.true,
do.plot = TRUE)
z.fit <- GI.seminr(latent_mean = latent_mean,
infectious_mean = infectious_mean,
R0 = fit.seminr$R0.best,
nE = nE,
nI = nI,
cal.times.fwdbck = cal.t.bck,
horizon = horizon,
calc.fwd = FALSE)
plot(x=df$at,
y=df$gi.true,
col=rgb(0,0.3,0,0.1),
las=1,
xlab='calendar time',
ylab='Backward GI',
log='y')
lines(df2$rt, df2$bb, lwd=3)
lines(cal.t.bck, z.true.det$bck.mean, col='black', lwd=2, lty=2)
lines(cal.t.bck, z.fit$bck.mean, col=rgb(0,0,1,0.5), lwd=6, lty=1)
abline(v=fit.first.n.obs, lty=4)
}
if(FALSE){ # TESTING....
profvis(expr =
{
z <- nllk(R0 = 3,
gimean = 3,
t.obs = cal.t.bck, #at.obs,
gi.obs = round(z.true.det$bck.mean), #gi.obs,
model.epi = 'seminr',
fxd.prm = fxd.prm.seminr)
})
}
if(0){
library(bbmle)
fr2 <- gi_ct_fit_mle2(t.obs = gi.1st.obs$t.obs, #at.obs,
gi.obs = gi.1st.obs$gi.obs, #gi.obs,
model.epi = 'seminr',
fxd.prm = fxd.prm.seminr,
start.optim = c(R0=2, gimean=5),
CI = CI,
do.plot = FALSE)
fr2 <- gi_ct_fit_mle2(t.obs = at.obs,
gi.obs = gi.obs,
model.epi = 'resude',
fxd.prm = fxd.prm.resude,
start.optim = c(R0=2, gimean=5),
CI = CI,
do.plot = FALSE)
} | /test-fit-ibm.R | no_license | davidchampredon/GI-dev | R | false | false | 7,879 | r |
# ---- Libraries ----
library(tidyr)
library(dplyr)
library(ggplot2) ; theme_set(theme_bw())
library(devtools)
library(profvis)
# Load (and download if needed) my libraries
use.local.GI <- TRUE
if(use.local.GI){
library(GI, lib.loc = './lib')
}
if(!use.local.GI){
lib.GI <- try(library(GI))
if(class(lib.GI)=='try-error'){
install_github("davidchampredon/GI",
build_vignettes = FALSE, force=TRUE)
library(GI)
}
}
lib.seminribm <- try(library(seminribm))
if(class(lib.seminribm)=='try-error'){
install_github("davidchampredon/seminribm",
build_vignettes = FALSE, force=TRUE)
library(seminribm)
}
set.seed(1234)
# ---- Generate data from an individual-based model ----
horizon <- 100
popSize <- 2e4 # warning: above 5e3 takes long time if nE and nI large!
initInfectious <- 2
R0 <- 3.0
latent_mean <- 2
infectious_mean <- 4
nE <- 1
nI <- 1
calc_WIW_Re <- FALSE
doExact <- FALSE
timeStepTauLeap <- 0.1
rnd_seed <- 1234
gi.mean.true <- latent_mean + (nI+1)/2/nI * infectious_mean
target.val <- c(R0, gi.mean.true)
# See: ?seminribm_run
sim <- seminribm_run(horizon,
popSize ,
R0 ,
latent_mean ,
infectious_mean,
nE ,
nI ,
initInfectious ,
doExact ,
timeStepTauLeap,
rnd_seed ,
calc_WIW_Re)
# Retrieve backward generation intervals from simulation:
gi.true <- sim$GI_bck
at <- sim$acq_times
df <- data.frame(at=at, gi.true=gi.true, rt = round(at))
df2 <- df %>%
group_by(rt) %>%
summarise(bb = mean(gi.true))
# ---- Sampled imperfectly observed GIs ----
# We assume that :
# - not all GIs are observed
# - there is an observation error
# Sample the GIs observed:
prop.observed <- 0.999 # proportion of GIs observed
n.obs <- min(length(gi.true), round(prop.observed*popSize) ) # number of bckwd GIs observed
idx.obs <- sample(x = 1:length(gi.true), size = n.obs, replace = FALSE)
gi.obs.true <- gi.true[idx.obs]
at.obs <- at[idx.obs]
# Add observation error:
sd.err <- 0.001
gi.obs <- rnorm(n = n.obs, mean = gi.obs.true, sd = sd.err)
gi.obs[gi.obs<1] <- 1
gi.obs <- round(gi.obs)
df.gi.obs <- data.frame(t = at.obs,
gi.obs.true = gi.obs.true,
gi.obs = gi.obs)
df.gi.obs <- df.gi.obs[order(df.gi.obs$t),]
# Visualize 'true' vs observed:
plot(gi.obs.true, gi.obs, las=1,
main = 'observation error for backward GI')
grid()
abline(a = 0,b=1, lty=2)
# plot observed GIs as function of infectee's acquisition time:
df.b <- data.frame(at.obs, gi.obs) %>%
mutate(rt = round(at.obs))
df.b2 <- df.b %>%
group_by(rt) %>%
summarise(gi.obs_mean = mean(gi.obs))
df.b2%>%
ggplot(aes(x=rt,y=gi.obs_mean)) +
geom_point(data = df.b,
aes(x=at.obs,y=gi.obs),
alpha=0.15, colour="orange",
pch=16,size=4) +
geom_abline(slope = 1, intercept = 0, linetype=2, colour = "grey")+
geom_line(size=1.5) +
geom_point(size=2) +
ggtitle('Backward Generation Intervals (line: daily mean)')+
xlab('calendar time')+ylab('days')
# ---- Fit model from GIs ----
fxd.prm.resude <- list(horizon=horizon,
alpha=0,
kappa=0,
GI_span = 20,
GI_var = 5,
GI_type = 'pois',
dt = 1.0)
fxd.prm.seminr <- list(horizon=horizon,
nE=nE,
nI=nI,
latent_mean=latent_mean,
dt = 0.5)
R0.rng <- seq(1.5, 6, by=0.25)
gimean.rng <- seq(2, 10, by=0.5)
CI <- 0.90
do.plot <- TRUE
# See: ?gi_ct_fit
if(FALSE){
fit.resude <- gi_ct_fit(t.obs = at.obs,
gi.obs = gi.obs,
model.epi = 'resude',
fxd.prm = fxd.prm.resude,
R0.rng = R0.rng,
gimean.rng = gimean.rng,
CI = CI,
do.plot = do.plot,
R0.true = R0,
gimean.true = gi.mean.true)
}
if(TRUE){
# STOPPED HERE
# Tidy everything.
# Fit with SEmInR only, as the data were generated with this model.
# (although there is a link with ReSuDe)
# Make a function of all this, with the goal of increasing
# 'fit.first.n.obs' to show how the fit is better as it increases.
#
# Also, try to speed up further... (although running on HPC should be quick).
cal.t.bck <- 1:horizon
z.true.det <- GI.seminr(latent_mean = latent_mean,
infectious_mean = infectious_mean,
R0 = R0,
nE = nE,
nI = nI,
cal.times.fwdbck = cal.t.bck,
horizon = horizon,
calc.fwd = FALSE)
# Fit on only the n first observations:
fit.first.n.obs <- 15 # (30: 8min on 4 cpus)
gi.1st.obs <- df.gi.obs[df.gi.obs$t< fit.first.n.obs,]
gi.1st.obs$t.obs <- round(gi.1st.obs$t)
# Fitting to observed bckwd GIs:
fit.seminr <- gi_ct_fit(t.obs = gi.1st.obs$t.obs, # at.obs,cal.t.bck, #
gi.obs = gi.1st.obs$gi.obs, # gi.obs,round(z.true.det$bck.mean), #
model.epi = 'seminr',
fxd.prm = fxd.prm.seminr,
R0.rng = R0.rng,
gimean.rng = gimean.rng,
CI = CI,
R0.true = R0,
gimean.true = gi.mean.true,
do.plot = TRUE)
z.fit <- GI.seminr(latent_mean = latent_mean,
infectious_mean = infectious_mean,
R0 = fit.seminr$R0.best,
nE = nE,
nI = nI,
cal.times.fwdbck = cal.t.bck,
horizon = horizon,
calc.fwd = FALSE)
plot(x=df$at,
y=df$gi.true,
col=rgb(0,0.3,0,0.1),
las=1,
xlab='calendar time',
ylab='Backward GI',
log='y')
lines(df2$rt, df2$bb, lwd=3)
lines(cal.t.bck, z.true.det$bck.mean, col='black', lwd=2, lty=2)
lines(cal.t.bck, z.fit$bck.mean, col=rgb(0,0,1,0.5), lwd=6, lty=1)
abline(v=fit.first.n.obs, lty=4)
}
if(FALSE){ # TESTING....
profvis(expr =
{
z <- nllk(R0 = 3,
gimean = 3,
t.obs = cal.t.bck, #at.obs,
gi.obs = round(z.true.det$bck.mean), #gi.obs,
model.epi = 'seminr',
fxd.prm = fxd.prm.seminr)
})
}
if(0){
library(bbmle)
fr2 <- gi_ct_fit_mle2(t.obs = gi.1st.obs$t.obs, #at.obs,
gi.obs = gi.1st.obs$gi.obs, #gi.obs,
model.epi = 'seminr',
fxd.prm = fxd.prm.seminr,
start.optim = c(R0=2, gimean=5),
CI = CI,
do.plot = FALSE)
fr2 <- gi_ct_fit_mle2(t.obs = at.obs,
gi.obs = gi.obs,
model.epi = 'resude',
fxd.prm = fxd.prm.resude,
start.optim = c(R0=2, gimean=5),
CI = CI,
do.plot = FALSE)
} |
library(rms)
library(pROC)
# Read in the data and set up train/test sets
data<-read.table("../anontfmodel2_R_grouped.csv",header=T,sep=",")
#data<-data[with(data, order(id)), ]
splitIndex <- trunc(nrow(data)*0.66)
while(data$id[splitIndex]==data$id[splitIndex+1]) {
splitIndex <- splitIndex + 1
}
trainset <- data[1:splitIndex,]
testset <- data[(splitIndex+1):nrow(data),]
attach(trainset)
# Build the logistic regression model and calculate ROC curve
optimal<-lrm(Class ~ age+ HR + SPO2_perc+ SPO2_R+ SD_HR+ SD_SPO2_perc+ SD_SPO2_R+ HR_SPO2+ COSEn+ LDS+ Density_Score+ BP_S+ BP_D+ BP_M,
y=T,x=T)
#optimal<-robcov(optimal,cluster=id)
print(optimal)
prob=predict(optimal,type=c("lp"),testset)
testset$prob = prob
ROC <- roc(Class==1 ~ prob, data = testset)
plot(ROC)
| /LogisticRegression.R.REMOTE.11936.R | no_license | skarusala/MachineLearningScripts | R | false | false | 780 | r | library(rms)
library(pROC)
# Read in the data and set up train/test sets
data<-read.table("../anontfmodel2_R_grouped.csv",header=T,sep=",")
#data<-data[with(data, order(id)), ]
splitIndex <- trunc(nrow(data)*0.66)
while(data$id[splitIndex]==data$id[splitIndex+1]) {
splitIndex <- splitIndex + 1
}
trainset <- data[1:splitIndex,]
testset <- data[(splitIndex+1):nrow(data),]
attach(trainset)
# Build the logistic regression model and calculate ROC curve
optimal<-lrm(Class ~ age+ HR + SPO2_perc+ SPO2_R+ SD_HR+ SD_SPO2_perc+ SD_SPO2_R+ HR_SPO2+ COSEn+ LDS+ Density_Score+ BP_S+ BP_D+ BP_M,
y=T,x=T)
#optimal<-robcov(optimal,cluster=id)
print(optimal)
prob=predict(optimal,type=c("lp"),testset)
testset$prob = prob
ROC <- roc(Class==1 ~ prob, data = testset)
plot(ROC)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/resHelpStrFns.R
\name{tStr}
\alias{tStr}
\title{Constructs an APA formatted string for a t-value.}
\usage{
tStr(param, modObj, ...)
}
\arguments{
\item{param}{The parameter of interest. Can be "int" as a shortcut
for "(Intercept)".}
\item{modObj}{Either an \code{lm} or \code{summary.lm} (faster) object.}
\item{...}{Options.}
}
\description{
Constructs an APA formatted string for a t-value.
}
| /man/tStr.Rd | no_license | Cmell/ResultsHelper | R | false | true | 476 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/resHelpStrFns.R
\name{tStr}
\alias{tStr}
\title{Constructs an APA formatted string for a t-value.}
\usage{
tStr(param, modObj, ...)
}
\arguments{
\item{param}{The parameter of interest. Can be "int" as a shortcut
for "(Intercept)".}
\item{modObj}{Either an \code{lm} or \code{summary.lm} (faster) object.}
\item{...}{Options.}
}
\description{
Constructs an APA formatted string for a t-value.
}
|
# IDs_gen.R
# Author: Nicolas Loucheu - ULB (nicolas.loucheu@ulb.ac.be)
# Date: 28th April 2020
# Generate a csv file linking the sample name with the IDs shown in figures
args <- commandArgs()
new_sample <- read.csv(args[6], row.names = 1)
out_folder <- args[7]
#Getting IDs from cell proportions file
IDs <- rownames(new_sample)
IDs_index <- c(1:length(IDs))
#Making a dataframe with IDs and Sample names
linkin <- data.frame(IDs, IDs_index)
# Saving that dataframe
write.csv(linkin, paste0(out_folder, "/link_IDs.csv"))
| /bin/IDs_gen.R | no_license | nicolasloucheu/SampleAnalysis | R | false | false | 527 | r | # IDs_gen.R
# Author: Nicolas Loucheu - ULB (nicolas.loucheu@ulb.ac.be)
# Date: 28th April 2020
# Generate a csv file linking the sample name with the IDs shown in figures
args <- commandArgs()
new_sample <- read.csv(args[6], row.names = 1)
out_folder <- args[7]
#Getting IDs from cell proportions file
IDs <- rownames(new_sample)
IDs_index <- c(1:length(IDs))
#Making a dataframe with IDs and Sample names
linkin <- data.frame(IDs, IDs_index)
# Saving that dataframe
write.csv(linkin, paste0(out_folder, "/link_IDs.csv"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.