content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
### modify of plotHistogram ###
#Pre-set
pmat <- l
expmat <- suva_expr
zscoreThreshold=4
patients=NULL
celltypes=NULL
clusters=2
#function (pmat,expmat,clusters,zscoreThreshold=4,patients=NULL,celltypes=NULL){
t=zscoreThreshold
pmat=scale(pmat)
if (max(pmat)>t){
pmat[which(pmat>t)]=t
pmat[which(pmat<(-t))]=(-t)
}
else {
mx=min(max(pmat),abs(min(pmat)))
sc=t/mx
pmat=pmat*sc
pmat[which(pmat>t)]=t
pmat[which(pmat<(-t))]=(-t)
}
if(is.null(patients) & is.null(celltypes)){
p=pheatmap::pheatmap(t(pmat),cluster_rows=F, cutree_cols = clusters, col=squash::bluered(100),gaps_col=50,show_colnames = F,clustering_distance_cols="euclidean")
}
else if (is.null(celltypes)){
patientcolors =data.frame(patients)
rownames(patientcolors)=colnames(expmat)
pmat=pmat[colnames(expmat),]
p=pheatmap::pheatmap(t(pmat),cluster_rows=F,silent=F, cutree_cols = clusters, col=squash::bluered(100),gaps_col=50,annotation=patientcolors,show_colnames = F,clustering_distance_cols="euclidean")
}
else if(is.null(patients)){
patientcolors =data.frame(celltypes)
rownames(patientcolors)=colnames(expmat)
pmat=pmat[colnames(expmat),]
p=pheatmap::pheatmap(t(pmat),cluster_rows=F, cutree_cols = clusters, col=squash::bluered(100),gaps_col=50,annotation=patientcolors,show_colnames = F,clustering_distance_cols="euclidean")
}
else {
patientcolors =data.frame(celltypes)
patientcolors=cbind(patientcolors,patients)
rownames(patientcolors)=colnames(expmat)
pmat=pmat[colnames(expmat),]
p=pheatmap::pheatmap(t(pmat),cluster_rows=F, cutree_cols = clusters, col=squash::bluered(100),gaps_col=50,annotation=patientcolors,show_colnames = F,clustering_distance_cols="euclidean")
}
ord=unique(cutree(p$tree_col, k = clusters)[p$tree_col[["order"]]])
numb=table(cutree(p$tree_col, k = clusters))[ord]
n=length(numb)
grid::grid.text(expression(bold("Cluster ID \n(left to right)")),x=rep(0.92),y=c(n*0.03+0.03),gp=grid::gpar(fontsize=8, col="grey"))
grid::grid.text(ord,x=rep(0.92,length(numb)),y=seq(n*0.03, 0.03, -0.03),gp=grid::gpar(fontsize=8, col="grey"))
return(cutree(p$tree_col, k = clusters))
} | /Bioloical_characterization/COpy_Number_analysis_CONCISmat/Modify_plotHistogram.R | no_license | haojiang9999/HCA_script | R | false | false | 2,184 | r | ### modify of plotHistogram ###
#Pre-set
pmat <- l
expmat <- suva_expr
zscoreThreshold=4
patients=NULL
celltypes=NULL
clusters=2
#function (pmat,expmat,clusters,zscoreThreshold=4,patients=NULL,celltypes=NULL){
t=zscoreThreshold
pmat=scale(pmat)
if (max(pmat)>t){
pmat[which(pmat>t)]=t
pmat[which(pmat<(-t))]=(-t)
}
else {
mx=min(max(pmat),abs(min(pmat)))
sc=t/mx
pmat=pmat*sc
pmat[which(pmat>t)]=t
pmat[which(pmat<(-t))]=(-t)
}
if(is.null(patients) & is.null(celltypes)){
p=pheatmap::pheatmap(t(pmat),cluster_rows=F, cutree_cols = clusters, col=squash::bluered(100),gaps_col=50,show_colnames = F,clustering_distance_cols="euclidean")
}
else if (is.null(celltypes)){
patientcolors =data.frame(patients)
rownames(patientcolors)=colnames(expmat)
pmat=pmat[colnames(expmat),]
p=pheatmap::pheatmap(t(pmat),cluster_rows=F,silent=F, cutree_cols = clusters, col=squash::bluered(100),gaps_col=50,annotation=patientcolors,show_colnames = F,clustering_distance_cols="euclidean")
}
else if(is.null(patients)){
patientcolors =data.frame(celltypes)
rownames(patientcolors)=colnames(expmat)
pmat=pmat[colnames(expmat),]
p=pheatmap::pheatmap(t(pmat),cluster_rows=F, cutree_cols = clusters, col=squash::bluered(100),gaps_col=50,annotation=patientcolors,show_colnames = F,clustering_distance_cols="euclidean")
}
else {
patientcolors =data.frame(celltypes)
patientcolors=cbind(patientcolors,patients)
rownames(patientcolors)=colnames(expmat)
pmat=pmat[colnames(expmat),]
p=pheatmap::pheatmap(t(pmat),cluster_rows=F, cutree_cols = clusters, col=squash::bluered(100),gaps_col=50,annotation=patientcolors,show_colnames = F,clustering_distance_cols="euclidean")
}
ord=unique(cutree(p$tree_col, k = clusters)[p$tree_col[["order"]]])
numb=table(cutree(p$tree_col, k = clusters))[ord]
n=length(numb)
grid::grid.text(expression(bold("Cluster ID \n(left to right)")),x=rep(0.92),y=c(n*0.03+0.03),gp=grid::gpar(fontsize=8, col="grey"))
grid::grid.text(ord,x=rep(0.92,length(numb)),y=seq(n*0.03, 0.03, -0.03),gp=grid::gpar(fontsize=8, col="grey"))
return(cutree(p$tree_col, k = clusters))
} |
testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1938565335L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641812L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) | /dexterMST/inst/testfiles/is_person_booklet_sorted/AFL_is_person_booklet_sorted/is_person_booklet_sorted_valgrind_files/1615937940-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 826 | r | testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1938565335L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641812L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) |
pickFisher <- function(p, select = seq_along(p), alpha=0.05, silent=FALSE) {
if (any(is.na(p))) stop("missing values in input p-values")
rej <- p[select]
if (any(is.na(rej))) stop("invalid selection or NA in p-values")
nr <- setdiff(p, rej)
nr <- nr[nr > min(rej)]
lrej <- -2*log(sort(rej, decreasing=TRUE))
lnr <- -2*log(sort(nr, decreasing=TRUE))
cum.r <- cumsum(lrej)
cum.nr <- c(0, cumsum(lnr))
crit.val <- sapply(1:length(cum.r), function(st) {
max(qchisq(1-alpha, df=2*(0:(length(cum.nr)-1) + st), lower.tail=TRUE) - cum.nr)
})
out <- max(c(0,which(cum.r <= crit.val)))
if (!silent) {
cat(length(rej), " hypotheses selected. At confidence level ", 1-alpha, ":\n", sep="")
cat("False null-hypotheses >= ", length(rej)-out, "; ", sep="")
cat("True null-hypotheses <= ", out, ".\n", sep="")
invisible(length(rej)-out)
} else
length(rej)-out
}
curveFisher <- function(p, select = seq_along(p), order, alpha=0.05, plot = TRUE) {
if (any(is.na(p))) stop("missing values in input p-values")
selected <- !missing(select) || missing(order)
ordered <- !missing(order)
if (ordered & selected)
stop("please provide either select or order, but not both")
if (selected)
ranks <- sort(rank(p, ties.method="first")[select])
else
ranks <- rank(p, ties.method="first")[order]
if (length(ranks)==0 || any(is.na(ranks)))
stop("invalid selection or NA in p-values")
others <- setdiff(length(p):1, ranks)
lpv <- -2*log(sort(p))
res <- numeric(length(ranks))
chisqs <- qchisq(1-alpha, df=2*1:length(lpv), lower.tail=TRUE)
st <- 1
for (ed in 1:length(ranks)) {
if (selected)
ins <- ranks[seq(st,ed)]
else
ins <- sort(ranks[1:ed])[seq(st,ed)]
outs <- setdiff(length(lpv):min(ins), ins)
cr.v <- max(chisqs[ed-st+1+0:length(outs)] - cumsum(c(0,lpv[outs])))
rej <- (sum(lpv[ins]) >= cr.v)
if (rej)
st <- st+1
res[ed] <- st-1
}
names(res) <- names(ranks)
if (plot) {
false <- c(0, res)
xs <- 1:length(false)-.5
tots <- 0:length(res)
plot(xs, tots, type="S", xlab="number of hypotheses", ylab="number of false null-hypotheses", lty=2)
lines(xs, false, type="S")
legend("topleft", c(paste("false null-hypotheses (", 100*(1-alpha), "% conf.)", sep=""),"others"), lty=1:2)
invisible(res)
} else
res
}
setClass("hommel_old",
representation(
pvalues = "numeric", # store vector of p-values (possibly with names)
jvalues = "numeric", # stores vector of jvalues
jumpalpha = "numeric", # stores vector of alphas where j(alpha) jumps
adjusted = "numeric", # stores adjusted p-values
simes = "logical" # stores whether adjusted p-values ar calculated based on a Simes test (if TRUE) or on Hommel's test from 1983 (if FALSE)
)
)
setGeneric("pvalue", function(object, ...) standardGeneric("pvalue"))
#setMethod("pvalue", "hommel_old", function(object, indicator) {
# if(missing(indicator))
# indicator <- 1: length(object@adjusted)
# object@adjusted[indicator]
#})
#own ceil function, needed because small rounding errors can be influential
ceil <- function(x)
{
y <- ceiling(x)
ifelse(isTRUE(all.equal(y-1,x)),y-1,y)
}
#own smaller function, needed because small rounding errors can be influential
smaller <- function(x,y)
{
x<y && !(isTRUE(all.equal(x,y)))
}
#given matrix: find middle column, find minimum, store minimum in array, split in two matrices.
# for each sub matrix: give left, right, lower, upper bound.
#recursive procedure, stops when no splits are possible anymore
#function that finds minimum in certain column
findMin <- function(pvalues, col, lower, upper)
{
minrow <- max(col,lower) #only take pvalues larger than col and lower
pval <- pvalues[minrow:upper]
values <- pval/(minrow:upper - col + 1)
location <- which.min(values)
min <- values[location]
location <- minrow + location -1 #true location in full matrix
return(list(min=min,location=location))
}
split <- function(pvalues, left,right,lower, upper)
{
if(left > right)
{
numeric(0) # empty list
}
else
{
#find middle column of sub matrix
mid <- left + floor((right-left)/2)
#find minimum of this column + location
result <- findMin(pvalues, mid, lower, upper)
c(split(pvalues, left, mid-1, lower, result$location),
result$min,
split(pvalues, mid+1, right, result$location, upper))
}
}
# calculates jumps in function j(alpha)
jumps <- function(alphas) {
n <- length(alphas)
jumpalpha <- numeric(n) # alpha
jvalues <- numeric(n) # j(alpha)
index <- 1
jumpalpha[index] <- alphas[1]
jvalues[index] <- n
if(n >= 2)
{
for(i in 2:n) {
#if(alphas[i] > jumpalpha[index]) #new jump
if(smaller(jumpalpha[index],alphas[i])) #new jump
{
index <- index+1
jumpalpha[index] <- alphas[i]
jvalues[index] <- n+1-i
}
}
}
list(jumpalpha=jumpalpha[1:index], jvalues=jvalues[1:index])
}
# now calculate smallest alphas for each pvalue by looping simultaneously over jumps and pvalues
adjpvalues <- function(pvalues, jumpalpha, jvalues)
{
n <- length(pvalues)
adjusted <- numeric(n)
j <- 1
for(i in 1:n)
{
while(j <= length(jvalues) && smaller(jumpalpha[j], pvalues[i] * jvalues[j]))
j <- j + 1
if(j <= length(jvalues))
adjusted[i] <- pvalues[i] * jvalues[j]
if(j > 1) #make sure alpha is in the right interval by taking the maximum over alpha found and the minimal alpha of the corresponding interval
adjusted[i] <- max(adjusted[i], jumpalpha[j-1])
#NB: if j > length(jvalues), i.e. j=length(jvalues) + 1, adjusted[i] = jumpalpha[length(jvalues)], i.e. the alpha on which j(alpha) goes to 0
}
adjusted
}
#hommel = F means standard Simes test, hommel = T means test of Hommel 1983
#hommel = F means standard Simes test, hommel = T means test of Hommel 1983
hommelFast <- function(pvalues, simes = TRUE) {
hommel(p=pvalues, simes=simes)
}
hommelFast_old <- function(pvalues, simes = TRUE)
{
if(any(is.na(pvalues)))
stop("missing values in input p-values")
if(length(pvalues)==0)
stop("the pvalues vector is empty")
#save names in right order
names <- names(pvalues)
perm <- order(pvalues)
pvalues <- pvalues[perm]
n <- length(pvalues)
alphas <- split(pvalues, 1, n, 1, n) #full matrix as input
# find true values for minima by multiplying with the right factor. this should depend on hommel1983 or simes
sums <- rep(1,n)
if(!simes) #extra factor needed
{
if(n >= 2)
{
for(i in 2:n)
sums[i] <- sums[i-1] + 1/i
}
}
alphas <- alphas*(n:1)*rev(sums)
result <- jumps(alphas)
jumpalpha <- result$jumpalpha
jvalues <- result$jvalues
adjusted <- adjpvalues(pvalues, jumpalpha, jvalues)
adjusted[perm] <- adjusted
pvalues[perm] <- pvalues #names are still in permuted order here
names(pvalues) <- names
names(adjusted) <- names
if (!simes)
adjusted <- pmin(adjusted, 1)
out <- new("hommel_old",
pvalues = pvalues,
jvalues = jvalues,
jumpalpha = jumpalpha,
adjusted = adjusted,
simes = simes)
return(out)
}
findRejections <- function(cats, j) {
n <- length(cats)
#maximum value of category that can ever lead to a rejection
maxcats <- min(n,max(cats))
if(j>0)
maxcats <- min(maxcats,j)
numRej <- rep(0,n)
parent <- integer(maxcats)
rank <- integer(maxcats)
leftmost <- 1:maxcats
find <- function(cat) {
if(parent[cat] != 0)
parent[cat] <<- find(parent[cat])
else
cat
}
merge <- function(root1, root2) {
if(rank[root1] > rank[root2])
merge(root2, root1)
if(rank[root1] <= rank[root2])
{
parent[root1] <<- root2
leftmost[root2] <<- min(leftmost[root1], leftmost[root2])
if(rank[root1] == rank[root2])
rank[root2] <<- rank[root2]+1
}
}
for(i in 1:n) {
numRej[i] <- ifelse(i > 1, numRej[i-1], 0)
if(cats[i]<= maxcats)
{
root1 <- find(cats[i])
if(leftmost[root1] == 1)
numRej[i] <- numRej[i]+1
else
{
root2 <- find(leftmost[root1] - 1)
merge(root1, root2)
}
}
}
numRej
}
makeCats <- function(jumpalpha, jvalues, alpha, pvalues, simes)
{
#find j for this alpha, can be made faster by using binary search for example if ever necessary
i=1
while(i <= length(jumpalpha) && !smaller(alpha, jumpalpha[i]))
i <- i + 1
#if i = length(jumpalpha) + 1, j must be 0. Elseway, j = jvalues[i]
j <- ifelse(i == (length(jumpalpha) + 1), 0, jvalues[i])
#make categories, based on j
cats <- rep(j+1,length(pvalues)) # j+1 is highest possible value
if(j == 0) #everything could get rejected
cats <- rep(1, length(pvalues))
else
{
if(!simes)
scaling <- sum(1/(1:j))
for(i in 1:length(pvalues))
{
if(!simes)
r <- ceil(pvalues[i]*j*scaling/alpha)
else
r <- ceil(pvalues[i]*j/alpha)
if(r < cats[i])
cats[i] <- max(r,1) #to avoid problems with p-values that are exactly zero
}
}
return(list(cats=cats, j=j))
}
pickSimes <- function(hommel, select, alpha=0.05, silent=FALSE) {
res <- discoveries(hommel, select, alpha=alpha)
n <- length(hommel@p)
if(missing(select))
select <- 1:n
pvalues <- hommel@p[select]
if (!silent) {
cat(length(pvalues), " hypotheses selected. At confidence level ", 1-alpha, ":\n", sep="")
cat("False null-hypotheses >= ", res, "; ", sep="")
cat("True null-hypotheses <= ", length(pvalues) - res, ".\n", sep="")
invisible(res)
} else
res
}
pickSimes_old <- function(hommel, select, alpha=0.05, silent=FALSE) {
n <- length(hommel@pvalues)
if(missing(select))
select <- 1:n
pvalues <- hommel@pvalues[select]
jvalues <- hommel@jvalues
jumpalpha <- hommel@jumpalpha
simes <- hommel@simes
res <- makeCats(jumpalpha, jvalues, alpha, pvalues, simes)
j <- res$j
cats <- res$cats
numRej <- findRejections(cats,j)
corRej <- numRej[length(numRej)] #total number of rejections
if (!silent) {
cat(length(pvalues), " hypotheses selected. At confidence level ", 1-alpha, ":\n", sep="")
cat("False null-hypotheses >= ", corRej, "; ", sep="")
cat("True null-hypotheses <= ", length(pvalues) - corRej, ".\n", sep="")
invisible(corRej)
} else
corRej
}
#TODO: check wheter select really goes well, seems to be an error with the names..
curveSimes <- function(hommel, select, order, alpha=0.05, plot = TRUE) {
if (!missing(order) & !missing(select))
stop("please provide either select or order, but not both")
# if(missing(order))
# stop("No order specified.")
n <- length(hommel@p)
if(missing(order) && missing(select))
select = 1:n
if(!missing(select)) #find the order based on increasing p-values
{
p <- hommel@p[select]
perm <- base::order(p, decreasing = FALSE)
order <- select[perm]
}
res <- discoveries(hommel, ix=order, incremental=TRUE, alpha=alpha)
pvalues <- hommel@p[order]
names(res) <- names(pvalues)
if (plot) {
false <- c(0, res)
xs <- 1:length(false)-.5
tots <- 0:length(res)
plot(xs, tots, type="S", xlab="number of hypotheses", ylab="number of false null-hypotheses", lty=2)
lines(xs, false, type="S")
legend("topleft", c(paste("false null-hypotheses (", 100*(1-alpha), "% conf.)", sep=""),"others"), lty=1:2)
invisible(res)
} else
res
}
curveSimes_old <- function(hommel, select, order, alpha=0.05, plot = TRUE)
{
if (!missing(order) & !missing(select))
stop("please provide either select or order, but not both")
# if(missing(order))
# stop("No order specified.")
n <- length(hommel@pvalues)
if(missing(order) && missing(select))
select = 1:n
if(!missing(select)) #find the order based on increasing p-values
{
p <- hommel@pvalues[select]
perm <- base::order(p, decreasing = FALSE)
order <- select[perm]
}
pvalues <- hommel@pvalues[order]
jvalues <- hommel@jvalues
jumpalpha <- hommel@jumpalpha
simes <- hommel@simes
res <- makeCats(jumpalpha, jvalues, alpha, pvalues, simes)
j <- res$j
cats <- res$cats
res <- findRejections(cats,j)
names(res) <- names(pvalues)
if (plot) {
false <- c(0, res)
xs <- 1:length(false)-.5
tots <- 0:length(res)
plot(xs, tots, type="S", xlab="number of hypotheses", ylab="number of false null-hypotheses", lty=2)
lines(xs, false, type="S")
legend("topleft", c(paste("false null-hypotheses (", 100*(1-alpha), "% conf.)", sep=""),"others"), lty=1:2)
invisible(res)
} else
res
}
pickSimes_old <- function(p, select = seq_along(p), alpha=0.05, hommel=FALSE, silent=FALSE) {
if (any(is.na(p))) stop("missing values in input p-values")
ranks <- sort(rank(p, ties.method="first")[select])
p <- sort(p)
others <- setdiff(length(p):1, ranks)
st <- 1
ed <- length(ranks)
ready <- FALSE
while (!ready) {
ins <- seq_along(p) %in% ranks[seq(st,ed)]
outs <- (!logical(length(p))) & (cummax(ins)==1) & (!ins)
participate <- numeric(length(p))
participate[ins] <- 1+sum(outs)
participate[outs] <- seq_len(sum(outs))
maxlag <- cumsum(outs)
rej <- TRUE
i <- 0
while (rej && (i <= sum(outs))) {
bottom.is <- (participate > i)
K <- sum(bottom.is)
if (hommel)
lag <- floor(1:K - p[bottom.is]/(alpha/(K*sum(1/1:K))))
else
lag <- floor(1:K - p[bottom.is]/(alpha/K))
if (any(lag >= 0 & lag >= maxlag[bottom.is] - i & ins[bottom.is]))
i <- Inf
else if (any(lag >= 0))
i <- i + 1 + max(pmin(lag, maxlag[bottom.is] - i))
else
rej <- FALSE
}
if (rej) {
st <- st+1
ready <- st > ed
} else
ready <- TRUE
}
out <- ed-st+1
if (!silent) {
cat("Rejected ", length(ranks), " hypotheses. At confidence level ", 1-alpha, ":\n", sep="")
cat("Correct rejections >= ", length(ranks)-out, "; ", sep="")
cat("False rejections <= ", out, ".\n", sep="")
invisible(length(ranks)-out)
} else
length(ranks)-out
}
curveSimes_old <- function(p, select = seq_along(p), order, alpha=0.05, hommel=FALSE, plot = TRUE) {
if (any(is.na(p))) stop("missing values in input p-values")
selected <- !missing(select) || missing(order)
ordered <- !missing(order)
if (ordered & selected)
stop("please provide either select or order, but not both")
if (selected) {
ranks <- sort(rank(p, ties.method="first")[select])
endpoint <- pickSimes_old(p, select, alpha, hommel, silent=TRUE)
} else {
ranks <- rank(p, ties.method="first")[order]
endpoint <- pickSimes_old(p, order, alpha, hommel, silent=TRUE)
}
if (length(ranks)==0 || any(is.na(ranks)))
stop("invalid selection or NA in p-values")
p <- sort(p)
others <- setdiff(length(p):1, ranks)
res <- numeric(length(ranks))
st <- 1
for (ed in 1:length(ranks)) {
if (selected)
ins <- seq_along(p) %in% ranks[seq(st,ed)]
else
ins <- seq_along(p) %in% sort(ranks[1:ed])[seq(st,ed)]
outs <- (!logical(length(p))) & (cummax(ins)==1) & (!ins)
participate <- numeric(length(p))
participate[ins] <- 1+sum(outs)
participate[outs] <- seq_len(sum(outs))
maxlag <- cumsum(outs)
rej <- TRUE
i <- 0
while (rej && (i <= sum(outs))) {
bottom.is <- (participate > i)
K <- sum(bottom.is)
if (hommel)
lag <- floor(1:K - p[bottom.is]/(alpha/(K*sum(1/1:K))))
else
lag <- floor(1:K - p[bottom.is]/(alpha/K))
if (any(lag >= 0 & lag >= maxlag[bottom.is] - i & ins[bottom.is]))
i <- Inf
else if (any(lag >= 0))
i <- i + 1 + max(pmin(lag, maxlag[bottom.is] - i))
else
rej <- FALSE
}
if (rej)
st <- st+1
res[ed] <- st-1
if (st > endpoint) {
res[ed:length(ranks)] <- endpoint
break
}
}
names(res) <- names(p[ranks])
if (plot) {
false <- c(0, res)
xs <- 1:length(false)-.5
tots <- 0:length(res)
plot(xs, tots, type="S", xlab="number of rejections", ylab="number of rejections", lty=2)
lines(xs, false, type="S")
legend("topleft", c(paste("correct rejections (", 100*(1-alpha), "% conf.)", sep=""),"others"), lty=1:2)
invisible(res)
} else
res
}
| /R/shortcuts.R | no_license | cran/cherry | R | false | false | 17,260 | r | pickFisher <- function(p, select = seq_along(p), alpha=0.05, silent=FALSE) {
if (any(is.na(p))) stop("missing values in input p-values")
rej <- p[select]
if (any(is.na(rej))) stop("invalid selection or NA in p-values")
nr <- setdiff(p, rej)
nr <- nr[nr > min(rej)]
lrej <- -2*log(sort(rej, decreasing=TRUE))
lnr <- -2*log(sort(nr, decreasing=TRUE))
cum.r <- cumsum(lrej)
cum.nr <- c(0, cumsum(lnr))
crit.val <- sapply(1:length(cum.r), function(st) {
max(qchisq(1-alpha, df=2*(0:(length(cum.nr)-1) + st), lower.tail=TRUE) - cum.nr)
})
out <- max(c(0,which(cum.r <= crit.val)))
if (!silent) {
cat(length(rej), " hypotheses selected. At confidence level ", 1-alpha, ":\n", sep="")
cat("False null-hypotheses >= ", length(rej)-out, "; ", sep="")
cat("True null-hypotheses <= ", out, ".\n", sep="")
invisible(length(rej)-out)
} else
length(rej)-out
}
curveFisher <- function(p, select = seq_along(p), order, alpha=0.05, plot = TRUE) {
if (any(is.na(p))) stop("missing values in input p-values")
selected <- !missing(select) || missing(order)
ordered <- !missing(order)
if (ordered & selected)
stop("please provide either select or order, but not both")
if (selected)
ranks <- sort(rank(p, ties.method="first")[select])
else
ranks <- rank(p, ties.method="first")[order]
if (length(ranks)==0 || any(is.na(ranks)))
stop("invalid selection or NA in p-values")
others <- setdiff(length(p):1, ranks)
lpv <- -2*log(sort(p))
res <- numeric(length(ranks))
chisqs <- qchisq(1-alpha, df=2*1:length(lpv), lower.tail=TRUE)
st <- 1
for (ed in 1:length(ranks)) {
if (selected)
ins <- ranks[seq(st,ed)]
else
ins <- sort(ranks[1:ed])[seq(st,ed)]
outs <- setdiff(length(lpv):min(ins), ins)
cr.v <- max(chisqs[ed-st+1+0:length(outs)] - cumsum(c(0,lpv[outs])))
rej <- (sum(lpv[ins]) >= cr.v)
if (rej)
st <- st+1
res[ed] <- st-1
}
names(res) <- names(ranks)
if (plot) {
false <- c(0, res)
xs <- 1:length(false)-.5
tots <- 0:length(res)
plot(xs, tots, type="S", xlab="number of hypotheses", ylab="number of false null-hypotheses", lty=2)
lines(xs, false, type="S")
legend("topleft", c(paste("false null-hypotheses (", 100*(1-alpha), "% conf.)", sep=""),"others"), lty=1:2)
invisible(res)
} else
res
}
setClass("hommel_old",
representation(
pvalues = "numeric", # store vector of p-values (possibly with names)
jvalues = "numeric", # stores vector of jvalues
jumpalpha = "numeric", # stores vector of alphas where j(alpha) jumps
adjusted = "numeric", # stores adjusted p-values
simes = "logical" # stores whether adjusted p-values ar calculated based on a Simes test (if TRUE) or on Hommel's test from 1983 (if FALSE)
)
)
setGeneric("pvalue", function(object, ...) standardGeneric("pvalue"))
#setMethod("pvalue", "hommel_old", function(object, indicator) {
# if(missing(indicator))
# indicator <- 1: length(object@adjusted)
# object@adjusted[indicator]
#})
#own ceil function, needed because small rounding errors can be influential
ceil <- function(x)
{
y <- ceiling(x)
ifelse(isTRUE(all.equal(y-1,x)),y-1,y)
}
#own smaller function, needed because small rounding errors can be influential
smaller <- function(x,y)
{
x<y && !(isTRUE(all.equal(x,y)))
}
#given matrix: find middle column, find minimum, store minimum in array, split in two matrices.
# for each sub matrix: give left, right, lower, upper bound.
#recursive procedure, stops when no splits are possible anymore
#function that finds minimum in certain column
findMin <- function(pvalues, col, lower, upper)
{
minrow <- max(col,lower) #only take pvalues larger than col and lower
pval <- pvalues[minrow:upper]
values <- pval/(minrow:upper - col + 1)
location <- which.min(values)
min <- values[location]
location <- minrow + location -1 #true location in full matrix
return(list(min=min,location=location))
}
split <- function(pvalues, left,right,lower, upper)
{
if(left > right)
{
numeric(0) # empty list
}
else
{
#find middle column of sub matrix
mid <- left + floor((right-left)/2)
#find minimum of this column + location
result <- findMin(pvalues, mid, lower, upper)
c(split(pvalues, left, mid-1, lower, result$location),
result$min,
split(pvalues, mid+1, right, result$location, upper))
}
}
# calculates jumps in function j(alpha)
jumps <- function(alphas) {
n <- length(alphas)
jumpalpha <- numeric(n) # alpha
jvalues <- numeric(n) # j(alpha)
index <- 1
jumpalpha[index] <- alphas[1]
jvalues[index] <- n
if(n >= 2)
{
for(i in 2:n) {
#if(alphas[i] > jumpalpha[index]) #new jump
if(smaller(jumpalpha[index],alphas[i])) #new jump
{
index <- index+1
jumpalpha[index] <- alphas[i]
jvalues[index] <- n+1-i
}
}
}
list(jumpalpha=jumpalpha[1:index], jvalues=jvalues[1:index])
}
# now calculate smallest alphas for each pvalue by looping simultaneously over jumps and pvalues
adjpvalues <- function(pvalues, jumpalpha, jvalues)
{
n <- length(pvalues)
adjusted <- numeric(n)
j <- 1
for(i in 1:n)
{
while(j <= length(jvalues) && smaller(jumpalpha[j], pvalues[i] * jvalues[j]))
j <- j + 1
if(j <= length(jvalues))
adjusted[i] <- pvalues[i] * jvalues[j]
if(j > 1) #make sure alpha is in the right interval by taking the maximum over alpha found and the minimal alpha of the corresponding interval
adjusted[i] <- max(adjusted[i], jumpalpha[j-1])
#NB: if j > length(jvalues), i.e. j=length(jvalues) + 1, adjusted[i] = jumpalpha[length(jvalues)], i.e. the alpha on which j(alpha) goes to 0
}
adjusted
}
#hommel = F means standard Simes test, hommel = T means test of Hommel 1983
#hommel = F means standard Simes test, hommel = T means test of Hommel 1983
hommelFast <- function(pvalues, simes = TRUE) {
hommel(p=pvalues, simes=simes)
}
hommelFast_old <- function(pvalues, simes = TRUE)
{
if(any(is.na(pvalues)))
stop("missing values in input p-values")
if(length(pvalues)==0)
stop("the pvalues vector is empty")
#save names in right order
names <- names(pvalues)
perm <- order(pvalues)
pvalues <- pvalues[perm]
n <- length(pvalues)
alphas <- split(pvalues, 1, n, 1, n) #full matrix as input
# find true values for minima by multiplying with the right factor. this should depend on hommel1983 or simes
sums <- rep(1,n)
if(!simes) #extra factor needed
{
if(n >= 2)
{
for(i in 2:n)
sums[i] <- sums[i-1] + 1/i
}
}
alphas <- alphas*(n:1)*rev(sums)
result <- jumps(alphas)
jumpalpha <- result$jumpalpha
jvalues <- result$jvalues
adjusted <- adjpvalues(pvalues, jumpalpha, jvalues)
adjusted[perm] <- adjusted
pvalues[perm] <- pvalues #names are still in permuted order here
names(pvalues) <- names
names(adjusted) <- names
if (!simes)
adjusted <- pmin(adjusted, 1)
out <- new("hommel_old",
pvalues = pvalues,
jvalues = jvalues,
jumpalpha = jumpalpha,
adjusted = adjusted,
simes = simes)
return(out)
}
findRejections <- function(cats, j) {
n <- length(cats)
#maximum value of category that can ever lead to a rejection
maxcats <- min(n,max(cats))
if(j>0)
maxcats <- min(maxcats,j)
numRej <- rep(0,n)
parent <- integer(maxcats)
rank <- integer(maxcats)
leftmost <- 1:maxcats
find <- function(cat) {
if(parent[cat] != 0)
parent[cat] <<- find(parent[cat])
else
cat
}
merge <- function(root1, root2) {
if(rank[root1] > rank[root2])
merge(root2, root1)
if(rank[root1] <= rank[root2])
{
parent[root1] <<- root2
leftmost[root2] <<- min(leftmost[root1], leftmost[root2])
if(rank[root1] == rank[root2])
rank[root2] <<- rank[root2]+1
}
}
for(i in 1:n) {
numRej[i] <- ifelse(i > 1, numRej[i-1], 0)
if(cats[i]<= maxcats)
{
root1 <- find(cats[i])
if(leftmost[root1] == 1)
numRej[i] <- numRej[i]+1
else
{
root2 <- find(leftmost[root1] - 1)
merge(root1, root2)
}
}
}
numRej
}
makeCats <- function(jumpalpha, jvalues, alpha, pvalues, simes)
{
#find j for this alpha, can be made faster by using binary search for example if ever necessary
i=1
while(i <= length(jumpalpha) && !smaller(alpha, jumpalpha[i]))
i <- i + 1
#if i = length(jumpalpha) + 1, j must be 0. Elseway, j = jvalues[i]
j <- ifelse(i == (length(jumpalpha) + 1), 0, jvalues[i])
#make categories, based on j
cats <- rep(j+1,length(pvalues)) # j+1 is highest possible value
if(j == 0) #everything could get rejected
cats <- rep(1, length(pvalues))
else
{
if(!simes)
scaling <- sum(1/(1:j))
for(i in 1:length(pvalues))
{
if(!simes)
r <- ceil(pvalues[i]*j*scaling/alpha)
else
r <- ceil(pvalues[i]*j/alpha)
if(r < cats[i])
cats[i] <- max(r,1) #to avoid problems with p-values that are exactly zero
}
}
return(list(cats=cats, j=j))
}
pickSimes <- function(hommel, select, alpha=0.05, silent=FALSE) {
res <- discoveries(hommel, select, alpha=alpha)
n <- length(hommel@p)
if(missing(select))
select <- 1:n
pvalues <- hommel@p[select]
if (!silent) {
cat(length(pvalues), " hypotheses selected. At confidence level ", 1-alpha, ":\n", sep="")
cat("False null-hypotheses >= ", res, "; ", sep="")
cat("True null-hypotheses <= ", length(pvalues) - res, ".\n", sep="")
invisible(res)
} else
res
}
pickSimes_old <- function(hommel, select, alpha=0.05, silent=FALSE) {
n <- length(hommel@pvalues)
if(missing(select))
select <- 1:n
pvalues <- hommel@pvalues[select]
jvalues <- hommel@jvalues
jumpalpha <- hommel@jumpalpha
simes <- hommel@simes
res <- makeCats(jumpalpha, jvalues, alpha, pvalues, simes)
j <- res$j
cats <- res$cats
numRej <- findRejections(cats,j)
corRej <- numRej[length(numRej)] #total number of rejections
if (!silent) {
cat(length(pvalues), " hypotheses selected. At confidence level ", 1-alpha, ":\n", sep="")
cat("False null-hypotheses >= ", corRej, "; ", sep="")
cat("True null-hypotheses <= ", length(pvalues) - corRej, ".\n", sep="")
invisible(corRej)
} else
corRej
}
#TODO: check wheter select really goes well, seems to be an error with the names..
curveSimes <- function(hommel, select, order, alpha=0.05, plot = TRUE) {
if (!missing(order) & !missing(select))
stop("please provide either select or order, but not both")
# if(missing(order))
# stop("No order specified.")
n <- length(hommel@p)
if(missing(order) && missing(select))
select = 1:n
if(!missing(select)) #find the order based on increasing p-values
{
p <- hommel@p[select]
perm <- base::order(p, decreasing = FALSE)
order <- select[perm]
}
res <- discoveries(hommel, ix=order, incremental=TRUE, alpha=alpha)
pvalues <- hommel@p[order]
names(res) <- names(pvalues)
if (plot) {
false <- c(0, res)
xs <- 1:length(false)-.5
tots <- 0:length(res)
plot(xs, tots, type="S", xlab="number of hypotheses", ylab="number of false null-hypotheses", lty=2)
lines(xs, false, type="S")
legend("topleft", c(paste("false null-hypotheses (", 100*(1-alpha), "% conf.)", sep=""),"others"), lty=1:2)
invisible(res)
} else
res
}
curveSimes_old <- function(hommel, select, order, alpha=0.05, plot = TRUE)
{
if (!missing(order) & !missing(select))
stop("please provide either select or order, but not both")
# if(missing(order))
# stop("No order specified.")
n <- length(hommel@pvalues)
if(missing(order) && missing(select))
select = 1:n
if(!missing(select)) #find the order based on increasing p-values
{
p <- hommel@pvalues[select]
perm <- base::order(p, decreasing = FALSE)
order <- select[perm]
}
pvalues <- hommel@pvalues[order]
jvalues <- hommel@jvalues
jumpalpha <- hommel@jumpalpha
simes <- hommel@simes
res <- makeCats(jumpalpha, jvalues, alpha, pvalues, simes)
j <- res$j
cats <- res$cats
res <- findRejections(cats,j)
names(res) <- names(pvalues)
if (plot) {
false <- c(0, res)
xs <- 1:length(false)-.5
tots <- 0:length(res)
plot(xs, tots, type="S", xlab="number of hypotheses", ylab="number of false null-hypotheses", lty=2)
lines(xs, false, type="S")
legend("topleft", c(paste("false null-hypotheses (", 100*(1-alpha), "% conf.)", sep=""),"others"), lty=1:2)
invisible(res)
} else
res
}
pickSimes_old <- function(p, select = seq_along(p), alpha=0.05, hommel=FALSE, silent=FALSE) {
if (any(is.na(p))) stop("missing values in input p-values")
ranks <- sort(rank(p, ties.method="first")[select])
p <- sort(p)
others <- setdiff(length(p):1, ranks)
st <- 1
ed <- length(ranks)
ready <- FALSE
while (!ready) {
ins <- seq_along(p) %in% ranks[seq(st,ed)]
outs <- (!logical(length(p))) & (cummax(ins)==1) & (!ins)
participate <- numeric(length(p))
participate[ins] <- 1+sum(outs)
participate[outs] <- seq_len(sum(outs))
maxlag <- cumsum(outs)
rej <- TRUE
i <- 0
while (rej && (i <= sum(outs))) {
bottom.is <- (participate > i)
K <- sum(bottom.is)
if (hommel)
lag <- floor(1:K - p[bottom.is]/(alpha/(K*sum(1/1:K))))
else
lag <- floor(1:K - p[bottom.is]/(alpha/K))
if (any(lag >= 0 & lag >= maxlag[bottom.is] - i & ins[bottom.is]))
i <- Inf
else if (any(lag >= 0))
i <- i + 1 + max(pmin(lag, maxlag[bottom.is] - i))
else
rej <- FALSE
}
if (rej) {
st <- st+1
ready <- st > ed
} else
ready <- TRUE
}
out <- ed-st+1
if (!silent) {
cat("Rejected ", length(ranks), " hypotheses. At confidence level ", 1-alpha, ":\n", sep="")
cat("Correct rejections >= ", length(ranks)-out, "; ", sep="")
cat("False rejections <= ", out, ".\n", sep="")
invisible(length(ranks)-out)
} else
length(ranks)-out
}
curveSimes_old <- function(p, select = seq_along(p), order, alpha=0.05, hommel=FALSE, plot = TRUE) {
if (any(is.na(p))) stop("missing values in input p-values")
selected <- !missing(select) || missing(order)
ordered <- !missing(order)
if (ordered & selected)
stop("please provide either select or order, but not both")
if (selected) {
ranks <- sort(rank(p, ties.method="first")[select])
endpoint <- pickSimes_old(p, select, alpha, hommel, silent=TRUE)
} else {
ranks <- rank(p, ties.method="first")[order]
endpoint <- pickSimes_old(p, order, alpha, hommel, silent=TRUE)
}
if (length(ranks)==0 || any(is.na(ranks)))
stop("invalid selection or NA in p-values")
p <- sort(p)
others <- setdiff(length(p):1, ranks)
res <- numeric(length(ranks))
st <- 1
for (ed in 1:length(ranks)) {
if (selected)
ins <- seq_along(p) %in% ranks[seq(st,ed)]
else
ins <- seq_along(p) %in% sort(ranks[1:ed])[seq(st,ed)]
outs <- (!logical(length(p))) & (cummax(ins)==1) & (!ins)
participate <- numeric(length(p))
participate[ins] <- 1+sum(outs)
participate[outs] <- seq_len(sum(outs))
maxlag <- cumsum(outs)
rej <- TRUE
i <- 0
while (rej && (i <= sum(outs))) {
bottom.is <- (participate > i)
K <- sum(bottom.is)
if (hommel)
lag <- floor(1:K - p[bottom.is]/(alpha/(K*sum(1/1:K))))
else
lag <- floor(1:K - p[bottom.is]/(alpha/K))
if (any(lag >= 0 & lag >= maxlag[bottom.is] - i & ins[bottom.is]))
i <- Inf
else if (any(lag >= 0))
i <- i + 1 + max(pmin(lag, maxlag[bottom.is] - i))
else
rej <- FALSE
}
if (rej)
st <- st+1
res[ed] <- st-1
if (st > endpoint) {
res[ed:length(ranks)] <- endpoint
break
}
}
names(res) <- names(p[ranks])
if (plot) {
false <- c(0, res)
xs <- 1:length(false)-.5
tots <- 0:length(res)
plot(xs, tots, type="S", xlab="number of rejections", ylab="number of rejections", lty=2)
lines(xs, false, type="S")
legend("topleft", c(paste("correct rejections (", 100*(1-alpha), "% conf.)", sep=""),"others"), lty=1:2)
invisible(res)
} else
res
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DBDriver.R
\docType{class}
\name{DBIDriver-class}
\alias{DBIDriver-class}
\title{DBIDriver class}
\description{
Base class for all DBMS drivers (e.g., RSQLite, MySQL, PostgreSQL).
The virtual class \code{DBIDriver} defines the operations for creating
connections and defining data type mappings. Actual driver classes, for
instance \code{RPgSQL}, \code{RMySQL}, etc. implement these operations in a
DBMS-specific manner.
}
\seealso{
Other DBI classes: \code{\link{DBIConnection-class}},
\code{\link{DBIObject-class}},
\code{\link{DBIResult-class}}
Other DBIDriver generics: \code{\link{dbConnect}},
\code{\link{dbDataType}}, \code{\link{dbDriver}},
\code{\link{dbGetInfo}}, \code{\link{dbIsValid}},
\code{\link{dbListConnections}}
}
\concept{DBI classes}
\concept{DBIDriver generics}
| /man/DBIDriver-class.Rd | no_license | jimhester/DBI | R | false | true | 874 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DBDriver.R
\docType{class}
\name{DBIDriver-class}
\alias{DBIDriver-class}
\title{DBIDriver class}
\description{
Base class for all DBMS drivers (e.g., RSQLite, MySQL, PostgreSQL).
The virtual class \code{DBIDriver} defines the operations for creating
connections and defining data type mappings. Actual driver classes, for
instance \code{RPgSQL}, \code{RMySQL}, etc. implement these operations in a
DBMS-specific manner.
}
\seealso{
Other DBI classes: \code{\link{DBIConnection-class}},
\code{\link{DBIObject-class}},
\code{\link{DBIResult-class}}
Other DBIDriver generics: \code{\link{dbConnect}},
\code{\link{dbDataType}}, \code{\link{dbDriver}},
\code{\link{dbGetInfo}}, \code{\link{dbIsValid}},
\code{\link{dbListConnections}}
}
\concept{DBI classes}
\concept{DBIDriver generics}
|
#This code allows users to take the output from morphological clines fitted in CFit 7 (Gay et al 2008*) and plot allele frequency and morphological clines, including the ability to make heat map style representations of the trait variance expected under the fitted cline.
#*Gay et al 2008 Comparing clines on molecular and phenotypic traits in hybrid zones: a window on tension zone models. Evolution 62:2789)
# Input data (for example plots)
#the data are four morphological traits through a hybrid zone between stream and anadromous sticklebacks in Bonsall Creek, Vancouver Island. The anadromous population is to the left in the graph, the x axis is centred 2.3 km from the sea
cfitdata<-read.table(file.choose(), header=T) #choose cfitdata.txt
attach(cfitdata)
#****plotting unimodal clines****
#p plots just the allele frequency cline as a function of x
p<-function(x,cen,w) exp(w*(x-cen))/(1+exp(w*(x-cen)))
#here's the cline data for Eda (a gene controlling stickleback armour) in the hybrid zone
centreEda<-0.008;widthEda<--2.98;
#here's plot of this cline (I just plot the points in white and then use points(type="l") to make it a smooth curve); I'm sure you can do this more elegantly.
dist<-seq(-0.8,2,by=0.01)
plot(dist,p(dist,centreEda,widthEda), col="white", axes=T, xlab="distance from sea (km)", ylab="freq of marine allele/trait")
lines(dist,p(dist,centreEda,widthEda),lty=1,col="blue")
#moy plots the a morphological trait average cline as a function of x
moy<-function(x,cen,w,min,delta) delta*p(x,cen,w)+min
#varp describes the expected variance around the cline as a function of x. s1, s2 and s3 are the variances for the left, centre and right hand parts of the cline, respectively.
varp<-function(x,cen,w,s1,s2,s3) (s1*p(x,cen,w)^2)+(2*s2*p(x,cen,w)*(1-p(x,cen,w)))+(s3*(1-p(x,cen,w))^2)
#unimodal generates a probability density for the fitted cline (the lighter the color, the more likely you are to observe datapoints at that point). min and delta are the trait minimum and the amount it changes across the cline, respectively.
unimodal<-function(x,y,cen,w,min,delta,s1,s2,s3) (exp(-((y-moy(x,cen,w,min,delta))^2)/(2*(varp(x,cen,w,s1,s2,s3)^2))))/(sqrt(2*pi)*varp(x,cen,w,s1,s2,s3))
#in the unimodal.array function, xmin, xmax, ymin ymax set the range of values for which you wish to generate the plot, and the remaining input comes from CFit 7. i.e.
# cen w 0 0 0 0
# min delta s1 s2 s3 0 0
# 0 0 0 0 0
unimodal.array<-function(xmin,xmax,ymin,ymax,cen,w,min,delta,s1,s2,s3) {
xx<-seq(xmin,xmax,(xmax-xmin)/50)
yy<-seq(ymin,ymax,(ymax-ymin)/50)
arr<-array(0,c(length(xx),length(yy)))
for(i in 1:length(xx)) {
for(j in 1:length(yy))
{arr[i,j]<-unimodal(xx[i],yy[j],cen,w,min,delta,s1,s2,s3)
}}
arr
}
#this is the best fit data for the keel width cline
keelwidthcline<-unimodal.array(-1,2,-1.6,1.6,0.32,-33.56,-0.19,0.31,0.374,0.374,0.374)
#this plots the contours and the data
quartz()
filled.contour(
x = seq(-1,2,length.out=nrow(keelwidthcline)),
y = seq(-1.6,1.6,length.out=ncol(keelwidthcline)),
keelwidthcline, color = heat.colors, plot.title = title(main = "keel width cline"),nlevels=10,plot.axes={ axis(1); axis(2); points(site/1000,keelres)})
#****trimodal plotting functions****
#these functions allow for a three part cline (the below are called by trimodal.array)
pentefreqfunc<-function(slopetrait,slopemoymix,slopemoy) (4*slopetrait*slopemoy*exp(slopetrait*slopemoymix)-((1+exp(slopetrait*slopemoymix))^2)*slopetrait)/(-1+exp(2*slopetrait*slopemoymix))
f<-function(x,centre,pentefreq,xpos1,xpos2,tslope1,tslope2) { if(x<=(centre-xpos2)) {1-((1-exp(-pentefreq*xpos2))/(1+exp(-pentefreq*xpos2)))*exp(-(tslope2*pentefreq)/(1+exp(xpos2*pentefreq))*(x-centre+xpos2))}
if(x>=(centre+xpos1)) {exp(pentefreq*xpos1)/(1+exp(pentefreq*xpos1))*exp(-(-(tslope1*pentefreq)/(1+exp(xpos1*pentefreq)))*(x-centre-xpos1))}
else {exp(pentefreq*(x-centre))/(1+exp(pentefreq*(x-centre)))}
}
m1<-function(x,centre,slopetrait,tslope1,tslope2,min,delta,slopemoymix,slopemoy) {f(x-slopemoymix,centre,slopemoy*slopetrait,1000,1000,tslope1,tslope2)*delta+min}
m3<-function(x,centre,slopetrait,tslope1,tslope2,min,delta,slopemoymix,slopemoy) {f(x+slopemoymix,centre,slopemoy*slopetrait,1000,1000,tslope1,tslope2)*delta+min}
m2<-function(x,centre,slopetrait,tslope1,tslope2,min,delta,slopemoymix,slopemoy) {(m1(x,centre,slopetrait,tslope1,tslope2,min,delta,slopemoymix,slopemoy)+m3(x,centre,slopetrait,tslope1,tslope2,min,delta,slopemoymix,slopemoy))/2}
ff<-function(p,f1,f2,f3,f4,f5) {
if(p<(1/6)) {6*p*f1}
if(p<(1/3)) {6*(f1-f2)*p+2*f1-f2}
if(p<(1/2)) {6*(f3-f2)*p+3*f2-2*f3}
if(p<(2/3)) {6*(f4-f3)*p+4*f3-3*f4}
if(p<(5/6)) {6*(f5-f4)*p+5*f4-4*f5}
else {6*(1-p)*f5}
}
trimodal<-function(x,y,centre,slopetrait,xpos1,xpos2,tslope1,tslope2,min,delta,s1,s2,s3,slopemoymix,slopemoy,f1,f2,f3,f4,f5) {
p<-f(x,centre,pentefreqfunc(slopetrait,slopemoymix,slopemoy),xpos1,xpos2,tslope1,tslope2)
p1<-p^2+p*(1-p)*ff(p,f1,f2,f3,f4,f5)
p3<-(1-p)^2+p*(1-p)*ff(p,f1,f2,f3,f4,f5)
p1*(exp(-(y-m1(x,centre,slopetrait,tslope1,tslope2,min,delta,slopemoymix,slopemoy))^2/((2*s1)^2))/(sqrt(2*pi*s1)))+p3*(exp(-(y-m3(x,centre,slopetrait,tslope1,tslope2,min,delta,slopemoymix,slopemoy))^2/((2*s3)^2))/(sqrt(2*pi*s3)))+(1-p1-p3)*(exp(-(y-m2(x,centre,slopetrait,tslope1,tslope2,min,delta,slopemoymix,slopemoy))^2/((2*s2)^2))/(sqrt(2*pi*s2)))
}
#this is the only function you need to run
trimodal.array<-function(xmin,xmax,ymin,ymax,var_list) {
xx<-seq(xmin,xmax,(xmax-xmin)/50)
yy<-seq(ymin,ymax,(ymax-ymin)/50)
arr<-array(0,c(length(xx),length(yy)))
for(i in 1:length(xx)) {
for(j in 1:length(yy))
{arr[i,j]<-trimodal(xx[i],yy[j],var_list[1],var_list[2],var_list[3],var_list[4],var_list[5],var_list[6],var_list[7],var_list[8],var_list[9],var_list[10],var_list[11],var_list[12],var_list[13],var_list[14],var_list[15],var_list[16],var_list[17],var_list[18])
}}
arr
}
#trial finres data (the length of the dorsal fin through the HZ)
#the data come from the CFit maximum model:
# centre width xpos1 xpos2 tslope1 tslope2 coeffslopemean
# min delta sigma1 sigma2 sigma3 mix
# phi1 phi2 phi3 phi4 phi5
#I recommend entering your values as a list like this (the example below is for my trait finres) and using trimodal.array() to plot the clines. The variables are in the order a-r as in the table on page 10 of the CFit 7 documents,it's important to maintain this order.
finresdata<-c(centre<--0.34,slope<--2.04,d1<-1.17,d2<-0.0001,t1<-0.250,t2<-0.0001,min_mu<--0.630,delta_mu<-2.254,sigma1<-1.596,sigma2<-0.197,sigma3<-0.909,mix<-0.726,a_m<-0.555,f1<-0.968,f2<-0.882,f3<--0.581,f4<--0.013,f5<-0.675)
#the finres full model gives a really strange result
finresplot<-trimodal.array(-0.8,2,-3,3,finresdata)
xlimits<-seq(-0.8,2,length.out=nrow(finresplot))
ylimits<-seq(-3,3,length.out=ncol(finresplot))
filled.contour(xlimits,ylimits,finresplot, color = heat.colors, plot.title = title(main = "dorsal fin length"),nlevels=10,plot.axes={ axis(1); axis(2); points(site/1000,finres)})
#trial pectres data (the length of the pectoral fin through the HZ)
pectresdata<-c(centre_pectres<--0.172321142,pente_pectres<--35.19,xpos1_pectres<-0.0001,xpos2_pectres<-43.506,tslope1_pectres<-0.000,tslope2_pectres<-0.765,coeffslopemoy_pectres<-0.291,min_pectres<--0.742,delta_pectres<-1.327,sigma1_pectres<-0.737,sigma2_pectres<-0.545,sigma3_pectres<-1.698,mixpectres<-1.284,f1_pectres<-0.817,f2_pectres<--0.499,f3_pectres<--0.864,f4_pectres<--0.497,f5_pectres<--0.188)
#the pectres full model gives a two step cline (which happens because it tries to accommodate the strangely high points at 1.7)
pectresplot<-trimodal.array(-0.8,2,-3,3,pectresdata)
xlimits<-seq(-0.8,2,length.out=nrow(pectresplot))
ylimits<-seq(-3,3,length.out=ncol(pectresplot))
test.plot<-filled.contour(xlimits,ylimits,pectresplot, color = heat.colors, plot.title = title(main = "pectoral fin length"),nlevels=10,plot.axes={ axis(1); axis(2); points(site/1000,pectres)})
| /Plotting_CFit7_Clines_Aug2013.R | no_license | TheMolecularEcologist/MiscellaneousCode | R | false | false | 8,075 | r | #This code allows users to take the output from morphological clines fitted in CFit 7 (Gay et al 2008*) and plot allele frequency and morphological clines, including the ability to make heat map style representations of the trait variance expected under the fitted cline.
#*Gay et al 2008 Comparing clines on molecular and phenotypic traits in hybrid zones: a window on tension zone models. Evolution 62:2789)
# Input data (for example plots)
#the data are four morphological traits through a hybrid zone between stream and anadromous sticklebacks in Bonsall Creek, Vancouver Island. The anadromous population is to the left in the graph, the x axis is centred 2.3 km from the sea
cfitdata<-read.table(file.choose(), header=T) #choose cfitdata.txt
attach(cfitdata)
#****plotting unimodal clines****
#p plots just the allele frequency cline as a function of x
p<-function(x,cen,w) exp(w*(x-cen))/(1+exp(w*(x-cen)))
#here's the cline data for Eda (a gene controlling stickleback armour) in the hybrid zone
centreEda<-0.008;widthEda<--2.98;
#here's plot of this cline (I just plot the points in white and then use points(type="l") to make it a smooth curve); I'm sure you can do this more elegantly.
dist<-seq(-0.8,2,by=0.01)
plot(dist,p(dist,centreEda,widthEda), col="white", axes=T, xlab="distance from sea (km)", ylab="freq of marine allele/trait")
lines(dist,p(dist,centreEda,widthEda),lty=1,col="blue")
#moy plots the a morphological trait average cline as a function of x
moy<-function(x,cen,w,min,delta) delta*p(x,cen,w)+min
#varp describes the expected variance around the cline as a function of x. s1, s2 and s3 are the variances for the left, centre and right hand parts of the cline, respectively.
varp<-function(x,cen,w,s1,s2,s3) (s1*p(x,cen,w)^2)+(2*s2*p(x,cen,w)*(1-p(x,cen,w)))+(s3*(1-p(x,cen,w))^2)
#unimodal generates a probability density for the fitted cline (the lighter the color, the more likely you are to observe datapoints at that point). min and delta are the trait minimum and the amount it changes across the cline, respectively.
unimodal<-function(x,y,cen,w,min,delta,s1,s2,s3) (exp(-((y-moy(x,cen,w,min,delta))^2)/(2*(varp(x,cen,w,s1,s2,s3)^2))))/(sqrt(2*pi)*varp(x,cen,w,s1,s2,s3))
#in the unimodal.array function, xmin, xmax, ymin ymax set the range of values for which you wish to generate the plot, and the remaining input comes from CFit 7. i.e.
# cen w 0 0 0 0
# min delta s1 s2 s3 0 0
# 0 0 0 0 0
unimodal.array<-function(xmin,xmax,ymin,ymax,cen,w,min,delta,s1,s2,s3) {
xx<-seq(xmin,xmax,(xmax-xmin)/50)
yy<-seq(ymin,ymax,(ymax-ymin)/50)
arr<-array(0,c(length(xx),length(yy)))
for(i in 1:length(xx)) {
for(j in 1:length(yy))
{arr[i,j]<-unimodal(xx[i],yy[j],cen,w,min,delta,s1,s2,s3)
}}
arr
}
#this is the best fit data for the keel width cline
keelwidthcline<-unimodal.array(-1,2,-1.6,1.6,0.32,-33.56,-0.19,0.31,0.374,0.374,0.374)
#this plots the contours and the data
quartz()
filled.contour(
x = seq(-1,2,length.out=nrow(keelwidthcline)),
y = seq(-1.6,1.6,length.out=ncol(keelwidthcline)),
keelwidthcline, color = heat.colors, plot.title = title(main = "keel width cline"),nlevels=10,plot.axes={ axis(1); axis(2); points(site/1000,keelres)})
#****trimodal plotting functions****
#these functions allow for a three part cline (the below are called by trimodal.array)
pentefreqfunc<-function(slopetrait,slopemoymix,slopemoy) (4*slopetrait*slopemoy*exp(slopetrait*slopemoymix)-((1+exp(slopetrait*slopemoymix))^2)*slopetrait)/(-1+exp(2*slopetrait*slopemoymix))
f<-function(x,centre,pentefreq,xpos1,xpos2,tslope1,tslope2) { if(x<=(centre-xpos2)) {1-((1-exp(-pentefreq*xpos2))/(1+exp(-pentefreq*xpos2)))*exp(-(tslope2*pentefreq)/(1+exp(xpos2*pentefreq))*(x-centre+xpos2))}
if(x>=(centre+xpos1)) {exp(pentefreq*xpos1)/(1+exp(pentefreq*xpos1))*exp(-(-(tslope1*pentefreq)/(1+exp(xpos1*pentefreq)))*(x-centre-xpos1))}
else {exp(pentefreq*(x-centre))/(1+exp(pentefreq*(x-centre)))}
}
m1<-function(x,centre,slopetrait,tslope1,tslope2,min,delta,slopemoymix,slopemoy) {f(x-slopemoymix,centre,slopemoy*slopetrait,1000,1000,tslope1,tslope2)*delta+min}
m3<-function(x,centre,slopetrait,tslope1,tslope2,min,delta,slopemoymix,slopemoy) {f(x+slopemoymix,centre,slopemoy*slopetrait,1000,1000,tslope1,tslope2)*delta+min}
m2<-function(x,centre,slopetrait,tslope1,tslope2,min,delta,slopemoymix,slopemoy) {(m1(x,centre,slopetrait,tslope1,tslope2,min,delta,slopemoymix,slopemoy)+m3(x,centre,slopetrait,tslope1,tslope2,min,delta,slopemoymix,slopemoy))/2}
ff<-function(p,f1,f2,f3,f4,f5) {
if(p<(1/6)) {6*p*f1}
if(p<(1/3)) {6*(f1-f2)*p+2*f1-f2}
if(p<(1/2)) {6*(f3-f2)*p+3*f2-2*f3}
if(p<(2/3)) {6*(f4-f3)*p+4*f3-3*f4}
if(p<(5/6)) {6*(f5-f4)*p+5*f4-4*f5}
else {6*(1-p)*f5}
}
trimodal<-function(x,y,centre,slopetrait,xpos1,xpos2,tslope1,tslope2,min,delta,s1,s2,s3,slopemoymix,slopemoy,f1,f2,f3,f4,f5) {
p<-f(x,centre,pentefreqfunc(slopetrait,slopemoymix,slopemoy),xpos1,xpos2,tslope1,tslope2)
p1<-p^2+p*(1-p)*ff(p,f1,f2,f3,f4,f5)
p3<-(1-p)^2+p*(1-p)*ff(p,f1,f2,f3,f4,f5)
p1*(exp(-(y-m1(x,centre,slopetrait,tslope1,tslope2,min,delta,slopemoymix,slopemoy))^2/((2*s1)^2))/(sqrt(2*pi*s1)))+p3*(exp(-(y-m3(x,centre,slopetrait,tslope1,tslope2,min,delta,slopemoymix,slopemoy))^2/((2*s3)^2))/(sqrt(2*pi*s3)))+(1-p1-p3)*(exp(-(y-m2(x,centre,slopetrait,tslope1,tslope2,min,delta,slopemoymix,slopemoy))^2/((2*s2)^2))/(sqrt(2*pi*s2)))
}
#this is the only function you need to run
trimodal.array<-function(xmin,xmax,ymin,ymax,var_list) {
xx<-seq(xmin,xmax,(xmax-xmin)/50)
yy<-seq(ymin,ymax,(ymax-ymin)/50)
arr<-array(0,c(length(xx),length(yy)))
for(i in 1:length(xx)) {
for(j in 1:length(yy))
{arr[i,j]<-trimodal(xx[i],yy[j],var_list[1],var_list[2],var_list[3],var_list[4],var_list[5],var_list[6],var_list[7],var_list[8],var_list[9],var_list[10],var_list[11],var_list[12],var_list[13],var_list[14],var_list[15],var_list[16],var_list[17],var_list[18])
}}
arr
}
#trial finres data (the length of the dorsal fin through the HZ)
#the data come from the CFit maximum model:
# centre width xpos1 xpos2 tslope1 tslope2 coeffslopemean
# min delta sigma1 sigma2 sigma3 mix
# phi1 phi2 phi3 phi4 phi5
#I recommend entering your values as a list like this (the example below is for my trait finres) and using trimodal.array() to plot the clines. The variables are in the order a-r as in the table on page 10 of the CFit 7 documents,it's important to maintain this order.
finresdata<-c(centre<--0.34,slope<--2.04,d1<-1.17,d2<-0.0001,t1<-0.250,t2<-0.0001,min_mu<--0.630,delta_mu<-2.254,sigma1<-1.596,sigma2<-0.197,sigma3<-0.909,mix<-0.726,a_m<-0.555,f1<-0.968,f2<-0.882,f3<--0.581,f4<--0.013,f5<-0.675)
#the finres full model gives a really strange result
finresplot<-trimodal.array(-0.8,2,-3,3,finresdata)
xlimits<-seq(-0.8,2,length.out=nrow(finresplot))
ylimits<-seq(-3,3,length.out=ncol(finresplot))
filled.contour(xlimits,ylimits,finresplot, color = heat.colors, plot.title = title(main = "dorsal fin length"),nlevels=10,plot.axes={ axis(1); axis(2); points(site/1000,finres)})
#trial pectres data (the length of the pectoral fin through the HZ)
pectresdata<-c(centre_pectres<--0.172321142,pente_pectres<--35.19,xpos1_pectres<-0.0001,xpos2_pectres<-43.506,tslope1_pectres<-0.000,tslope2_pectres<-0.765,coeffslopemoy_pectres<-0.291,min_pectres<--0.742,delta_pectres<-1.327,sigma1_pectres<-0.737,sigma2_pectres<-0.545,sigma3_pectres<-1.698,mixpectres<-1.284,f1_pectres<-0.817,f2_pectres<--0.499,f3_pectres<--0.864,f4_pectres<--0.497,f5_pectres<--0.188)
#the pectres full model gives a two step cline (which happens because it tries to accommodate the strangely high points at 1.7)
pectresplot<-trimodal.array(-0.8,2,-3,3,pectresdata)
xlimits<-seq(-0.8,2,length.out=nrow(pectresplot))
ylimits<-seq(-3,3,length.out=ncol(pectresplot))
test.plot<-filled.contour(xlimits,ylimits,pectresplot, color = heat.colors, plot.title = title(main = "pectoral fin length"),nlevels=10,plot.axes={ axis(1); axis(2); points(site/1000,pectres)})
|
###IMPORT LIBRARIES####
library(dplyr)
library(magrittr)
###READ IN DATA####
f <-read.table("data/smoking-data.dat",header=FALSE)
names(f)
#RENAME COLUMNS
colnames(f)<-cbind("cat","fev1")
#RENAME VARIABLES
f_clean <- f %>%
mutate(cat.f= factor(if_else(cat=="1","non-smoker",
ifelse(cat=="2","early",
ifelse(cat=="3","recent",
ifelse(cat=="4","current","")))),
ordered=T),
ident=order(cat))
f_clean
###I found a better way to do this after I had finished####
g_data <- g %>%
rename(cat=V1,fev1=V2)%>%
mutate(cat.f = factor(cat,
levels=c("1","2","3","4"),
labels=c("non","early","former","current"),
ordered=T),
ident=order(cat))
g_data
| /assignment01/scripts/cleaning.R | no_license | JeremyYeaton/cogmaster_as | R | false | false | 867 | r | ###IMPORT LIBRARIES####
library(dplyr)
library(magrittr)
###READ IN DATA####
f <-read.table("data/smoking-data.dat",header=FALSE)
names(f)
#RENAME COLUMNS
colnames(f)<-cbind("cat","fev1")
#RENAME VARIABLES
f_clean <- f %>%
mutate(cat.f= factor(if_else(cat=="1","non-smoker",
ifelse(cat=="2","early",
ifelse(cat=="3","recent",
ifelse(cat=="4","current","")))),
ordered=T),
ident=order(cat))
f_clean
###I found a better way to do this after I had finished####
g_data <- g %>%
rename(cat=V1,fev1=V2)%>%
mutate(cat.f = factor(cat,
levels=c("1","2","3","4"),
labels=c("non","early","former","current"),
ordered=T),
ident=order(cat))
g_data
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/STRUCTURE_interface.R
\name{write.structure.bin}
\alias{write.structure.bin}
\title{writing an executable file to run the 'structure' commands}
\usage{
write.structure.bin(object, fn = "structure_sim", k.range = c(1, 10),
n.runs = 20, path = "Structure", parameter_list = list())
}
\arguments{
\item{object}{\code{Genotype}. The object containing the samples and markers
to run STRUCTURE on.}
\item{fn}{\code{character}. The file name for the file with the STRUCTURE commands.}
\item{k.range}{\code{numeric}. A vector with 2 values: the minimum and
maximum 'K's to run. The program will run all the values of K between and
including the minimum and maximum.}
\item{n.runs}{\code{numeric}. The number of runs to run each K. In any case,
there will not be more than one run with K=1, because for this K the results
are degenerated and deterministic.}
\item{path}{\code{character}. The directory to write the STRUCTURE files in.}
\item{parameter_list}{\code{list}. An optional list of additional parameters
for STRUCTURE.}
}
\description{
Writing an 'bash' file with the command to run a series of STRUCTURE
simulations, and files with parameters for STRUCTURE. This allows you to run
a number of simulations for each K (the number of ancestry populations),
which is recommended for the analysis of STRUCTURE results.
}
\details{
After running \code{write.structure.bin}, a new file named
\code{fn} is formed in your working directory, with the calls to STRUCTURE.
to run it, you first need to turn it to an executable file, for example by
typing \code{chmod a=rwx <fn>} in your Unix command line terminal. You can
then run it by typing in your Unix command line terminal
\code{./<fn>}.
}
\examples{
data("citrus_clean")
Alleles = getMarkerInfo(MxS, Masked=FALSE)$Alleles
names(Alleles) <- MarkerNames(MxS, Masked=F)
nucMxS=xy2atgc(MxS, alleles=Alleles)
write.structure.bin(MxS, fn="structure_sim", k.range = c(1, 10), n.runs=20)
}
| /man/write.structure.bin.Rd | no_license | mor-rubinstein/collectiongenetics | R | false | true | 2,020 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/STRUCTURE_interface.R
\name{write.structure.bin}
\alias{write.structure.bin}
\title{writing an executable file to run the 'structure' commands}
\usage{
write.structure.bin(object, fn = "structure_sim", k.range = c(1, 10),
n.runs = 20, path = "Structure", parameter_list = list())
}
\arguments{
\item{object}{\code{Genotype}. The object containing the samples and markers
to run STRUCTURE on.}
\item{fn}{\code{character}. The file name for the file with the STRUCTURE commands.}
\item{k.range}{\code{numeric}. A vector with 2 values: the minimum and
maximum 'K's to run. The program will run all the values of K between and
including the minimum and maximum.}
\item{n.runs}{\code{numeric}. The number of runs to run each K. In any case,
there will not be more than one run with K=1, because for this K the results
are degenerated and deterministic.}
\item{path}{\code{character}. The directory to write the STRUCTURE files in.}
\item{parameter_list}{\code{list}. An optional list of additional parameters
for STRUCTURE.}
}
\description{
Writing an 'bash' file with the command to run a series of STRUCTURE
simulations, and files with parameters for STRUCTURE. This allows you to run
a number of simulations for each K (the number of ancestry populations),
which is recommended for the analysis of STRUCTURE results.
}
\details{
After running \code{write.structure.bin}, a new file named
\code{fn} is formed in your working directory, with the calls to STRUCTURE.
to run it, you first need to turn it to an executable file, for example by
typing \code{chmod a=rwx <fn>} in your Unix command line terminal. You can
then run it by typing in your Unix command line terminal
\code{./<fn>}.
}
\examples{
data("citrus_clean")
Alleles = getMarkerInfo(MxS, Masked=FALSE)$Alleles
names(Alleles) <- MarkerNames(MxS, Masked=F)
nucMxS=xy2atgc(MxS, alleles=Alleles)
write.structure.bin(MxS, fn="structure_sim", k.range = c(1, 10), n.runs=20)
}
|
# READ NBB codes - XBRL Mapping file
# Clear all environment variables
rm(list = ls(all.names = TRUE))
#set working directory
setwd("/Users/fvangool/Dropbox (Personal)/R projects/data/")
# monitor performance
ptm <- proc.time()
#for cbind
#output <- df <- data.frame(matrix(ncol = 0, nrow = 37))
#for rbind
output <- df <- data.frame(matrix(ncol = 6, nrow = 0))
## MAPPING Retrieval
#read NBB codes formula in dataframe
formulaNBB <- read.table("finance_formulascodes.csv", stringsAsFactors=FALSE,
header=TRUE,
sep=";", na.strings = c("NA", "")
)[ ,c("formula_name", "NBB_code")]
# remove NA entries
formulaNBB <- na.omit(formulaNBB)
# READ NBB codes - XBRL Mapping file
data <- read.table("160310-XBRL Mapping NBB codes comma.csv", stringsAsFactors=FALSE,
header=TRUE,
sep=";"
)
# function to retrieve all Xlink labels based on NBB code
retrieveXlink <- function(nbbcode){
xlink <- data[data$NBB_Code==as.character(nbbcode) & data$Publisher=="NBB", ]
xlink
}
# lookup matching Xlink Label
# xlinklist <- lapply(formulaNBB$NBB_code, function (x) retrieveXlink(x))
xlinklist <- lapply(unique(formulaNBB$NBB_code), function (x) retrieveXlink(x))
# merge list of dataframes in 1 dataframe
xlinks <- do.call("rbind", xlinklist)
#look up all xbrl files directory
#read all the filenames with XBRL extension from a directory
filenames <- list.files(, pattern="*.xbrl", full.names=FALSE)
#start looping trough files
for (i in 1:length(filenames)) {
## XPATH part
# install.packages("xml2")
library(xml2)
#x <- read_xml("virtus.xbrl")
x <- read_xml(filenames[i])
# xlinks$Xlink_label
getXBRLvalue <- function( nbb, xbrl){
b <- nbb
a <- xbrl
xp <- sprintf(".//pfs:%s[@contextRef='CurrentInstant'or @contextRef='CurrentDuration']",a)
tt <- xml_find_all(x,xp, xml_ns(x))
out <- c(b,a,xml_text(tt))
}
#res <- lapply(xlinks, function (x) getXBRLvalue(x),simplify = FALSE, USE.NAMES = TRUE)
#res <- mapply(xlinks$NBB_Code, function (x) getXBRLvalue(x), MoreArgs = xlinks$Xlink_label)
#res <- sapply(xlinks$NBB_Code, function (x) getXBRLvalue(x),xbrl =xlinks$Xlink_label)
qq <- mapply(getXBRLvalue, xlinks$NBB_Code, xlinks$Xlink_label)
result <- data.frame( nbb_code = sapply( qq, "[", 1), xbrl_name = sapply( qq, "[", 2),
xbrl_value =as.numeric( sapply( qq, "[", 3) ) )
# GET General info company and annual report period start end
#start data annual report period
xp <- sprintf(".//xbrli:startDate")
tt <- xml_find_all(x,xp, xml_ns(x))
out <- xml_text(tt)
periodStartDate <- out[1]
#result$start_date <- periodStartDate
#WARNING: store the char input as date
result$start_date <- as.POSIXlt(periodStartDate, "%Y-%m-%d", tz = "CET")
#end data annaul report period
xp <- sprintf(".//xbrli:endDate")
tt <- xml_find_all(x,xp, xml_ns(x))
out <- xml_text(tt)
periodEndDate <- out[1]
result$end_date <- periodEndDate
# EntityCurrentLegalName
xp <- sprintf(".//pfs-gcd:EntityCurrentLegalName[@contextRef='CurrentDuration']")
tt <- xml_find_all(x,xp, xml_ns(x))
entityCurrentLegalName <- xml_text(tt)
result$entity_name <- entityCurrentLegalName
#VAT Number
xp <- sprintf(".//pfs-gcd:IdentifierValue[@contextRef='CurrentDuration']")
tt <- xml_find_all(x,xp, xml_ns(x))
VATnumber <- xml_text(tt)
#result$vat <- VATnumber
# construct filename based on general info
#filename <- paste(entityCurrentLegalName, VATnumber, periodStartDate, periodEndDate,".xlsx", sep="_")
#print(filename[1])
# library(xlsx)
#
# write.xlsx(result, filename[1])
#cbind output
#print(head(result))
#cbind data
#output <- cbind(output,result)
#rbind data
output <- rbind(output,result)
#end for loop
}
#library(xlsx)
#head(output)
#str(output)
#write.xlsx(output, "Gert_results binded by column.xlsx")
#write.xlsx(output, "160414-Concurrentieanlayse-Virtus Shop in Shape_cbind.xlsx")
# # Print performance result
#print(proc.time() - ptm)
#add year column based on start_date of output variable
require(lubridate)
output$year <- year(output$start_date)
#remove the start and end date
output$start_date <- NULL
output$end_date <- NULL
#reorder output columns
output <-output[,c(4,2,1,3,5)]
# sort by entity name then by year
output <- output[ order(output[,1], output[,3], output[,5]),]
## testing transpose wiht reshape
#test <- head(output,200)
test <-output
test$xbrl_name <- NULL
library(reshape2)
#nn<-reshape(test,timevar="year",idvar="entity_name",direction="wide")
#molten = melt(test, id = c("xbrl_value"))
molten = melt(test, id.vars = c("entity_name","year","nbb_code") , measure.vars = c("xbrl_value"))
molten
pret <- molten
pret$variable <- NULL
nn<-reshape(pret,timevar="year",idvar=c("entity_name", "nbb_code"),direction="wide")
#write.xlsx(nn, "testy.xlsx")
#sort by nbbcode
#l <-nn[,c(2,1)]
#l <- nn[,c(ncol(nn),1:(ncol(nn)-1))]
l <- nn[,c(2,1,1:(ncol(nn)-2))]
#nn <-nn[,c(4,2,1,3,5)]
# sort by nbb code then by entity name
nn <- nn[ order(l[,1], l[,2]),]
head(nn)
nn[nn == "#N/A"] <- NA
nn[is.na(nn)] <- 0
print(proc.time() - ptm)
library(xlsx)
write.xlsx(nn, "160502-VIRTUS Concurrentieanalyse.xlsx")
# my_func <- function(x) {
# paste0(deparse(x), collapse="")
# }
# nn<-reshape(molten,timevar="cat",idvar="sample",direction="wide")
# happy <- dcast(molten, formula = entity_name + year ~ variable,value.var="value",
# fun.aggregate=my_func)
# happy
# require(reshape2)
# x = data.frame(subject = c("John", "Mary"),
# time = c(1,1),
# age = c(33,NA),
# weight = c(90, NA),
# height = c(2,2))
# x
# molten = melt(x, id = c("subject", "time"))
# molten
# dcast(molten, formula = time + subject ~ variable, value.var="value",
# fun.aggregate=my_func)
#
#
# # Melt French Fries dataset
# data(french_fries)
# ffm <- melt(french_fries, id = 1:4, na.rm = TRUE)
#
# # Aggregate examples - all 3 yield the same result
# dcast(ffm, treatment ~ .)
# dcast(ffm, treatment ~ ., function(x) length(x))
# dcast(ffm, treatment ~ ., length)
#
# # Passing further arguments through ...
# dcast(ffm, treatment ~ ., sum)
# dcast(ffm, treatment ~ ., sum, trim = 0.1)
#
# data(airquality)
# names(airquality) <- tolower(names(airquality))
#
# df <- data.frame(
# V1=rep(1:3, 14),
# V2=rep(paste0("A", 0:6), 6),
# V3=sample(1:100, 42),
# V4=paste0(sample(letters, 42, replace=TRUE), sample(letters, 42, replace=TRUE))
# )
print(proc.time() - ptm)
#split the dataframes
# nn$entity_name <- as.factor(nn$entity_name)
# spli <- split( nn,nn$entity_name)
# #get the third data.frame from list
# xx <-as.data.frame(spli[3])
# ebit1 <- xx[xx$Bossuyt.Winkelinrichting.nbb_code == "9903" ,]
# ebit2 <- xx[xx$Bossuyt.Winkelinrichting.nbb_code == "751" ,]
# ebit3 <- xx[xx$Bossuyt.Winkelinrichting.nbb_code == "752/9" ,]
# ebit4 <- xx[xx$Bossuyt.Winkelinrichting.nbb_code == "650" ,]
# ebit5 <- xx[xx$Bossuyt.Winkelinrichting.nbb_code == "652/9" ,]
#
#
# ebit2011 <- ebit1$Bossuyt.Winkelinrichting.value.2011 - ebit2$Bossuyt.Winkelinrichting.value.2011 - ebit3$Bossuyt.Winkelinrichting.value.2011 + ebit4$Bossuyt.Winkelinrichting.value.2011 + ebit5$Bossuyt.Winkelinrichting.value.2011
#
# EBIT calculation
# 9903-751-752/9+ 650+ 652/9
# "Winst (verlies) van het boekjaar voor belasting 9903
#
# – Opbrengsten uit vlottende activa 751
# – Andere financiële opbrengsten 752/9
# + Kosten van schulden 650
# + Andere financiële kosten 652/9"
wvvbel <- subset(output, nbb_code == "9903")
wvvbel[is.na(wvvbel)] <- 0
ova <- subset(output, nbb_code == "751")
ova[is.na(ova)] <- 0
afo <- subset(output, nbb_code == "752/9")
afo[is.na(afo)] <- 0
ks <- subset(output, nbb_code == "650")
ks[is.na(ks)] <- 0
afk <- subset(output, nbb_code == "652/9")
afk[is.na(afk)] <- 0
# 9903-751-752/9+ 650+ 652/9
#calculate EBIT
EBIT <- cbind(wvvbel$xbrl_value - ova$xbrl_value - afo$xbrl_value + ks$xbrl_value + afk$xbrl_value)
EBIT <- cbind(wvvbel, wvvbel$xbrl_value - ova$xbrl_value - afo$xbrl_value + ks$xbrl_value + afk$xbrl_value)
colnames(EBIT)[6] <- "EBIT"
EBIT$nbb_code <- NULL
EBIT$xbrl_value <- NULL
EBIT$xbrl_name <- NULL
library(xlsx)
write.xlsx(EBIT, "EBIT.xlsx")
write.xlsx(output, "ouput.xlsx")
# Acid test (liquiditeit enge zin)
# (29/58-29-3):(42/48+492/3)
t1 <- subset(output, nbb_code == "29/58")
t1[is.na(t1)] <- 0
t2 <- subset(output, nbb_code == "29")
t2[is.na(t2)] <- 0
t3 <- subset(output, nbb_code == "3")
t3[is.na(t3)] <- 0
t4 <- subset(output, nbb_code == "42/48")
t4[is.na(t4)] <- 0
t5 <- subset(output, nbb_code == "492/3")
t5[is.na(t5)] <- 0
# (29/58-29-3):(42/48+492/3)
acid <- EBIT <- cbind(t1,(t1$xbrl_value - t2$xbrl_value - t3$xbrl_value)/(t4$xbrl_value + t5$xbrl_value) )
colnames(EBIT)[6] <- "EBIT"
#### test
library(ggplot2)
ggplot(data = omzet, aes(x = year, y = xbrl_value/1000, color = entity_name)) +
geom_line(aes(group = entity_name)) + geom_point()
test <- as.Date(as.character(omzet$year), format="%Y")
mydate = strptime(omzet$year,format='%Y')
years(test)
write.csv(omzet, file = "omzet.csv")
nonsense <- read.csv("omzet.csv")
| /ipcore.R | no_license | fvangool/Shiny-Application | R | false | false | 9,732 | r | # READ NBB codes - XBRL Mapping file
# Clear all environment variables
rm(list = ls(all.names = TRUE))
#set working directory
setwd("/Users/fvangool/Dropbox (Personal)/R projects/data/")
# monitor performance
ptm <- proc.time()
#for cbind
#output <- df <- data.frame(matrix(ncol = 0, nrow = 37))
#for rbind
output <- df <- data.frame(matrix(ncol = 6, nrow = 0))
## MAPPING Retrieval
#read NBB codes formula in dataframe
formulaNBB <- read.table("finance_formulascodes.csv", stringsAsFactors=FALSE,
header=TRUE,
sep=";", na.strings = c("NA", "")
)[ ,c("formula_name", "NBB_code")]
# remove NA entries
formulaNBB <- na.omit(formulaNBB)
# READ NBB codes - XBRL Mapping file
data <- read.table("160310-XBRL Mapping NBB codes comma.csv", stringsAsFactors=FALSE,
header=TRUE,
sep=";"
)
# function to retrieve all Xlink labels based on NBB code
retrieveXlink <- function(nbbcode){
xlink <- data[data$NBB_Code==as.character(nbbcode) & data$Publisher=="NBB", ]
xlink
}
# lookup matching Xlink Label
# xlinklist <- lapply(formulaNBB$NBB_code, function (x) retrieveXlink(x))
xlinklist <- lapply(unique(formulaNBB$NBB_code), function (x) retrieveXlink(x))
# merge list of dataframes in 1 dataframe
xlinks <- do.call("rbind", xlinklist)
#look up all xbrl files directory
#read all the filenames with XBRL extension from a directory
filenames <- list.files(, pattern="*.xbrl", full.names=FALSE)
#start looping trough files
for (i in 1:length(filenames)) {
## XPATH part
# install.packages("xml2")
library(xml2)
#x <- read_xml("virtus.xbrl")
x <- read_xml(filenames[i])
# xlinks$Xlink_label
getXBRLvalue <- function( nbb, xbrl){
b <- nbb
a <- xbrl
xp <- sprintf(".//pfs:%s[@contextRef='CurrentInstant'or @contextRef='CurrentDuration']",a)
tt <- xml_find_all(x,xp, xml_ns(x))
out <- c(b,a,xml_text(tt))
}
#res <- lapply(xlinks, function (x) getXBRLvalue(x),simplify = FALSE, USE.NAMES = TRUE)
#res <- mapply(xlinks$NBB_Code, function (x) getXBRLvalue(x), MoreArgs = xlinks$Xlink_label)
#res <- sapply(xlinks$NBB_Code, function (x) getXBRLvalue(x),xbrl =xlinks$Xlink_label)
qq <- mapply(getXBRLvalue, xlinks$NBB_Code, xlinks$Xlink_label)
result <- data.frame( nbb_code = sapply( qq, "[", 1), xbrl_name = sapply( qq, "[", 2),
xbrl_value =as.numeric( sapply( qq, "[", 3) ) )
# GET General info company and annual report period start end
#start data annual report period
xp <- sprintf(".//xbrli:startDate")
tt <- xml_find_all(x,xp, xml_ns(x))
out <- xml_text(tt)
periodStartDate <- out[1]
#result$start_date <- periodStartDate
#WARNING: store the char input as date
result$start_date <- as.POSIXlt(periodStartDate, "%Y-%m-%d", tz = "CET")
#end data annaul report period
xp <- sprintf(".//xbrli:endDate")
tt <- xml_find_all(x,xp, xml_ns(x))
out <- xml_text(tt)
periodEndDate <- out[1]
result$end_date <- periodEndDate
# EntityCurrentLegalName
xp <- sprintf(".//pfs-gcd:EntityCurrentLegalName[@contextRef='CurrentDuration']")
tt <- xml_find_all(x,xp, xml_ns(x))
entityCurrentLegalName <- xml_text(tt)
result$entity_name <- entityCurrentLegalName
#VAT Number
xp <- sprintf(".//pfs-gcd:IdentifierValue[@contextRef='CurrentDuration']")
tt <- xml_find_all(x,xp, xml_ns(x))
VATnumber <- xml_text(tt)
#result$vat <- VATnumber
# construct filename based on general info
#filename <- paste(entityCurrentLegalName, VATnumber, periodStartDate, periodEndDate,".xlsx", sep="_")
#print(filename[1])
# library(xlsx)
#
# write.xlsx(result, filename[1])
#cbind output
#print(head(result))
#cbind data
#output <- cbind(output,result)
#rbind data
output <- rbind(output,result)
#end for loop
}
#library(xlsx)
#head(output)
#str(output)
#write.xlsx(output, "Gert_results binded by column.xlsx")
#write.xlsx(output, "160414-Concurrentieanlayse-Virtus Shop in Shape_cbind.xlsx")
# # Print performance result
#print(proc.time() - ptm)
#add year column based on start_date of output variable
require(lubridate)
output$year <- year(output$start_date)
#remove the start and end date
output$start_date <- NULL
output$end_date <- NULL
#reorder output columns
output <-output[,c(4,2,1,3,5)]
# sort by entity name then by year
output <- output[ order(output[,1], output[,3], output[,5]),]
## testing transpose wiht reshape
#test <- head(output,200)
test <-output
test$xbrl_name <- NULL
library(reshape2)
#nn<-reshape(test,timevar="year",idvar="entity_name",direction="wide")
#molten = melt(test, id = c("xbrl_value"))
molten = melt(test, id.vars = c("entity_name","year","nbb_code") , measure.vars = c("xbrl_value"))
molten
pret <- molten
pret$variable <- NULL
nn<-reshape(pret,timevar="year",idvar=c("entity_name", "nbb_code"),direction="wide")
#write.xlsx(nn, "testy.xlsx")
#sort by nbbcode
#l <-nn[,c(2,1)]
#l <- nn[,c(ncol(nn),1:(ncol(nn)-1))]
l <- nn[,c(2,1,1:(ncol(nn)-2))]
#nn <-nn[,c(4,2,1,3,5)]
# sort by nbb code then by entity name
nn <- nn[ order(l[,1], l[,2]),]
head(nn)
nn[nn == "#N/A"] <- NA
nn[is.na(nn)] <- 0
print(proc.time() - ptm)
library(xlsx)
write.xlsx(nn, "160502-VIRTUS Concurrentieanalyse.xlsx")
# my_func <- function(x) {
# paste0(deparse(x), collapse="")
# }
# nn<-reshape(molten,timevar="cat",idvar="sample",direction="wide")
# happy <- dcast(molten, formula = entity_name + year ~ variable,value.var="value",
# fun.aggregate=my_func)
# happy
# require(reshape2)
# x = data.frame(subject = c("John", "Mary"),
# time = c(1,1),
# age = c(33,NA),
# weight = c(90, NA),
# height = c(2,2))
# x
# molten = melt(x, id = c("subject", "time"))
# molten
# dcast(molten, formula = time + subject ~ variable, value.var="value",
# fun.aggregate=my_func)
#
#
# # Melt French Fries dataset
# data(french_fries)
# ffm <- melt(french_fries, id = 1:4, na.rm = TRUE)
#
# # Aggregate examples - all 3 yield the same result
# dcast(ffm, treatment ~ .)
# dcast(ffm, treatment ~ ., function(x) length(x))
# dcast(ffm, treatment ~ ., length)
#
# # Passing further arguments through ...
# dcast(ffm, treatment ~ ., sum)
# dcast(ffm, treatment ~ ., sum, trim = 0.1)
#
# data(airquality)
# names(airquality) <- tolower(names(airquality))
#
# df <- data.frame(
# V1=rep(1:3, 14),
# V2=rep(paste0("A", 0:6), 6),
# V3=sample(1:100, 42),
# V4=paste0(sample(letters, 42, replace=TRUE), sample(letters, 42, replace=TRUE))
# )
print(proc.time() - ptm)
#split the dataframes
# nn$entity_name <- as.factor(nn$entity_name)
# spli <- split( nn,nn$entity_name)
# #get the third data.frame from list
# xx <-as.data.frame(spli[3])
# ebit1 <- xx[xx$Bossuyt.Winkelinrichting.nbb_code == "9903" ,]
# ebit2 <- xx[xx$Bossuyt.Winkelinrichting.nbb_code == "751" ,]
# ebit3 <- xx[xx$Bossuyt.Winkelinrichting.nbb_code == "752/9" ,]
# ebit4 <- xx[xx$Bossuyt.Winkelinrichting.nbb_code == "650" ,]
# ebit5 <- xx[xx$Bossuyt.Winkelinrichting.nbb_code == "652/9" ,]
#
#
# ebit2011 <- ebit1$Bossuyt.Winkelinrichting.value.2011 - ebit2$Bossuyt.Winkelinrichting.value.2011 - ebit3$Bossuyt.Winkelinrichting.value.2011 + ebit4$Bossuyt.Winkelinrichting.value.2011 + ebit5$Bossuyt.Winkelinrichting.value.2011
#
# EBIT calculation
# 9903-751-752/9+ 650+ 652/9
# "Winst (verlies) van het boekjaar voor belasting 9903
#
# – Opbrengsten uit vlottende activa 751
# – Andere financiële opbrengsten 752/9
# + Kosten van schulden 650
# + Andere financiële kosten 652/9"
wvvbel <- subset(output, nbb_code == "9903")
wvvbel[is.na(wvvbel)] <- 0
ova <- subset(output, nbb_code == "751")
ova[is.na(ova)] <- 0
afo <- subset(output, nbb_code == "752/9")
afo[is.na(afo)] <- 0
ks <- subset(output, nbb_code == "650")
ks[is.na(ks)] <- 0
afk <- subset(output, nbb_code == "652/9")
afk[is.na(afk)] <- 0
# 9903-751-752/9+ 650+ 652/9
#calculate EBIT
EBIT <- cbind(wvvbel$xbrl_value - ova$xbrl_value - afo$xbrl_value + ks$xbrl_value + afk$xbrl_value)
EBIT <- cbind(wvvbel, wvvbel$xbrl_value - ova$xbrl_value - afo$xbrl_value + ks$xbrl_value + afk$xbrl_value)
colnames(EBIT)[6] <- "EBIT"
EBIT$nbb_code <- NULL
EBIT$xbrl_value <- NULL
EBIT$xbrl_name <- NULL
library(xlsx)
write.xlsx(EBIT, "EBIT.xlsx")
write.xlsx(output, "ouput.xlsx")
# Acid test (liquiditeit enge zin)
# (29/58-29-3):(42/48+492/3)
t1 <- subset(output, nbb_code == "29/58")
t1[is.na(t1)] <- 0
t2 <- subset(output, nbb_code == "29")
t2[is.na(t2)] <- 0
t3 <- subset(output, nbb_code == "3")
t3[is.na(t3)] <- 0
t4 <- subset(output, nbb_code == "42/48")
t4[is.na(t4)] <- 0
t5 <- subset(output, nbb_code == "492/3")
t5[is.na(t5)] <- 0
# (29/58-29-3):(42/48+492/3)
acid <- EBIT <- cbind(t1,(t1$xbrl_value - t2$xbrl_value - t3$xbrl_value)/(t4$xbrl_value + t5$xbrl_value) )
colnames(EBIT)[6] <- "EBIT"
#### test
library(ggplot2)
ggplot(data = omzet, aes(x = year, y = xbrl_value/1000, color = entity_name)) +
geom_line(aes(group = entity_name)) + geom_point()
test <- as.Date(as.character(omzet$year), format="%Y")
mydate = strptime(omzet$year,format='%Y')
years(test)
write.csv(omzet, file = "omzet.csv")
nonsense <- read.csv("omzet.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OptPath_getter.R
\name{getOptPathEOL}
\alias{getOptPathEOL}
\title{Get end-of-life vector from the optimization path.}
\usage{
getOptPathEOL(op, dob, eol)
}
\arguments{
\item{op}{[\code{\link{OptPath}}]\cr
Optimization path.}
\item{dob}{[\code{integer}]\cr
Vector of date-of-birth values to further subset the result.
Only elements with a date-of-birth included in \code{dob} are selected.
Default is all.}
\item{eol}{[\code{integer}]\cr
Vector of end-of-life values to further subset the result.
Only elements with an end-of-life included in \code{eol} are selected.
Default is all.}
}
\value{
[\code{integer}].
}
\description{
Get end-of-life vector from the optimization path.
}
\seealso{
Other optpath: \code{\link{OptPath}},
\code{\link{addOptPathEl}},
\code{\link{getOptPathBestIndex}},
\code{\link{getOptPathCols}},
\code{\link{getOptPathCol}}, \code{\link{getOptPathDOB}},
\code{\link{getOptPathEl}},
\code{\link{getOptPathErrorMessages}},
\code{\link{getOptPathExecTimes}},
\code{\link{getOptPathLength}},
\code{\link{getOptPathParetoFront}},
\code{\link{getOptPathX}}, \code{\link{getOptPathY}},
\code{\link{setOptPathElDOB}},
\code{\link{setOptPathElEOL}}
}
| /man/getOptPathEOL.Rd | no_license | bklppr/ParamHelpers | R | false | true | 1,274 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OptPath_getter.R
\name{getOptPathEOL}
\alias{getOptPathEOL}
\title{Get end-of-life vector from the optimization path.}
\usage{
getOptPathEOL(op, dob, eol)
}
\arguments{
\item{op}{[\code{\link{OptPath}}]\cr
Optimization path.}
\item{dob}{[\code{integer}]\cr
Vector of date-of-birth values to further subset the result.
Only elements with a date-of-birth included in \code{dob} are selected.
Default is all.}
\item{eol}{[\code{integer}]\cr
Vector of end-of-life values to further subset the result.
Only elements with an end-of-life included in \code{eol} are selected.
Default is all.}
}
\value{
[\code{integer}].
}
\description{
Get end-of-life vector from the optimization path.
}
\seealso{
Other optpath: \code{\link{OptPath}},
\code{\link{addOptPathEl}},
\code{\link{getOptPathBestIndex}},
\code{\link{getOptPathCols}},
\code{\link{getOptPathCol}}, \code{\link{getOptPathDOB}},
\code{\link{getOptPathEl}},
\code{\link{getOptPathErrorMessages}},
\code{\link{getOptPathExecTimes}},
\code{\link{getOptPathLength}},
\code{\link{getOptPathParetoFront}},
\code{\link{getOptPathX}}, \code{\link{getOptPathY}},
\code{\link{setOptPathElDOB}},
\code{\link{setOptPathElEOL}}
}
|
sigma.til.f<-function(beta.til,sigma2.x.til,sigma2.u.til,lambda.e){
resultcol1<-c(((beta.til^2)*(sigma2.x.til))+(lambda.e*sigma2.u.til),(beta.til*sigma2.x.til))
resultcol2<-c((beta.til*sigma2.x.til),(sigma2.x.til+sigma2.u.til));
result<-cbind(resultcol1,resultcol2);
return(result);
} | /sigma_til.R | no_license | cardorl/AlgoritmoR_Repository | R | false | false | 295 | r | sigma.til.f<-function(beta.til,sigma2.x.til,sigma2.u.til,lambda.e){
resultcol1<-c(((beta.til^2)*(sigma2.x.til))+(lambda.e*sigma2.u.til),(beta.til*sigma2.x.til))
resultcol2<-c((beta.til*sigma2.x.til),(sigma2.x.til+sigma2.u.til));
result<-cbind(resultcol1,resultcol2);
return(result);
} |
#setup
library(tidyverse)
library(tidytuesdayR)
library(skimr)
library(tigris)
library(rnaturalearth)
library(ggplot2)
library(janitor)
library(extrafont)
library(patchwork)
#load & organize data
tt_data <- tt_load(2021, week=20)
broadband <- tt_data$broadband
broadbandzip <- tt_data$broadband_zip
#take a look
skim(broadband)
skim(broadbandzip)
#let's work with broadband
#clean and organize data (thanks to M Henderson for some help on this)
broadband2 <- tt_data$broadband %>%
clean_names() %>%
rename(Availability = broadband_availability_per_fcc,
Usage = broadband_usage
) %>%
mutate(
Usage = as.numeric(Usage),
Availability = as.numeric(Availability)
)
#filter to state
PA_data <- broadband2 %>%
filter(st == 'PA')
#load & filter state counties & clean
PA_counties <- counties(state = 'PA') %>%
clean_names()
#join sets
PA_bdnd <- PA_counties %>%
left_join(PA_data, by = c("namelsad" = "county_name"))
#plot Usage
g <- ggplot() +
geom_sf(color = "#232229", data = PA_bdnd, aes(fill = Usage), size = 0.25) +
scale_fill_gradient("% Using", labels = scales::percent, low = "#440154FF", high = "#3CBB75FF") +
theme(
plot.margin = unit(c(0.6, 0.5, 0.5, 0.5), "cm"),
panel.background = element_rect(fill = "#232229"),
plot.background = element_rect(fill = "#232229"),
legend.background = element_blank(),
strip.background = element_rect(fill = "#232229"),
plot.title = element_text(color = "#e9e0cc", size = 25, hjust = 1, family = "mono", face = "bold"),
plot.subtitle = element_text(color = "#e9e0cc", size = 15, hjust = 1, family = "mono", face = "bold"),
plot.caption = element_text(color = "#e9e0cc", size = 10, hjust = 0, family = "mono", face = "bold"),
legend.title = element_text(color = "#e9e0cc", size = 10, hjust = 0, family = "mono", face = "bold"),
legend.text = element_text(color = "#e9e0cc", family = "mono"),
legend.justification = "left",
legend.position = "bottom",
legend.key.width = unit(1, "cm"),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
)
#plot Availability
a <- ggplot() +
geom_sf(color = "#232229", data = PA_bdnd, aes(fill = Availability), size = 0.25) +
scale_fill_gradient("% Available", labels = scales::percent, low = "#440154FF", high = "#3CBB75FF") +
theme(
plot.margin = unit(c(0.6, 0.5, 0.5, 0.5), "cm"),
panel.background = element_rect(fill = "#232229"),
plot.background = element_rect(fill = "#232229"),
legend.background = element_blank(),
strip.background = element_rect(fill = "#232229"),
plot.title = element_text(color = "#e9e0cc", size = 25, hjust = 1, family = "mono", face = "bold"),
plot.subtitle = element_text(color = "#e9e0cc", size = 15, hjust = 1, family = "mono", face = "bold"),
plot.caption = element_text(color = "#e9e0cc", size = 10, hjust = 0, family = "mono", face = "bold"),
legend.title = element_text(color = "#e9e0cc", size = 10, hjust = 0, family = "mono", face = "bold"),
legend.text = element_text(color = "#e9e0cc", family = "mono", face = "bold"),
legend.justification = "left",
legend.position = "bottom",
legend.key.width = unit(1, "cm"),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
)
#Patch plots
pa <- g + a + plot_layout(ncol = 2) +
plot_annotation(
title = "PENNSYLVANIA",
subtitle = "Less than 40% using broadband in many counties despite being widly available",
caption = "#TidyTuesday Week20 | Data: Microsoft & The Verge | Graphic: M. Jaffee"
) &
theme(panel.background = element_rect(fill = "#232229", colour="#232229"),
plot.background = element_rect(fill = "#232229", colour="#232229"),
plot.title = element_text(colour = "#e9e0cc", size=20, face="bold", hjust = 0, family="mono"),
plot.subtitle = element_text(colour = "#e9e0cc", size=15, hjust = 0, family="mono"),
plot.caption = element_text(colour = "#e9e0cc", size=10, face="bold", hjust = 0, family="mono")
)
pa
| /TidyTuesdays/W20/W20_2021.R | no_license | MJaffee/R-Projects | R | false | false | 4,764 | r | #setup
library(tidyverse)
library(tidytuesdayR)
library(skimr)
library(tigris)
library(rnaturalearth)
library(ggplot2)
library(janitor)
library(extrafont)
library(patchwork)
#load & organize data
tt_data <- tt_load(2021, week=20)
broadband <- tt_data$broadband
broadbandzip <- tt_data$broadband_zip
#take a look
skim(broadband)
skim(broadbandzip)
#let's work with broadband
#clean and organize data (thanks to M Henderson for some help on this)
broadband2 <- tt_data$broadband %>%
clean_names() %>%
rename(Availability = broadband_availability_per_fcc,
Usage = broadband_usage
) %>%
mutate(
Usage = as.numeric(Usage),
Availability = as.numeric(Availability)
)
#filter to state
PA_data <- broadband2 %>%
filter(st == 'PA')
#load & filter state counties & clean
PA_counties <- counties(state = 'PA') %>%
clean_names()
#join sets
PA_bdnd <- PA_counties %>%
left_join(PA_data, by = c("namelsad" = "county_name"))
#plot Usage
g <- ggplot() +
geom_sf(color = "#232229", data = PA_bdnd, aes(fill = Usage), size = 0.25) +
scale_fill_gradient("% Using", labels = scales::percent, low = "#440154FF", high = "#3CBB75FF") +
theme(
plot.margin = unit(c(0.6, 0.5, 0.5, 0.5), "cm"),
panel.background = element_rect(fill = "#232229"),
plot.background = element_rect(fill = "#232229"),
legend.background = element_blank(),
strip.background = element_rect(fill = "#232229"),
plot.title = element_text(color = "#e9e0cc", size = 25, hjust = 1, family = "mono", face = "bold"),
plot.subtitle = element_text(color = "#e9e0cc", size = 15, hjust = 1, family = "mono", face = "bold"),
plot.caption = element_text(color = "#e9e0cc", size = 10, hjust = 0, family = "mono", face = "bold"),
legend.title = element_text(color = "#e9e0cc", size = 10, hjust = 0, family = "mono", face = "bold"),
legend.text = element_text(color = "#e9e0cc", family = "mono"),
legend.justification = "left",
legend.position = "bottom",
legend.key.width = unit(1, "cm"),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
)
#plot Availability
a <- ggplot() +
geom_sf(color = "#232229", data = PA_bdnd, aes(fill = Availability), size = 0.25) +
scale_fill_gradient("% Available", labels = scales::percent, low = "#440154FF", high = "#3CBB75FF") +
theme(
plot.margin = unit(c(0.6, 0.5, 0.5, 0.5), "cm"),
panel.background = element_rect(fill = "#232229"),
plot.background = element_rect(fill = "#232229"),
legend.background = element_blank(),
strip.background = element_rect(fill = "#232229"),
plot.title = element_text(color = "#e9e0cc", size = 25, hjust = 1, family = "mono", face = "bold"),
plot.subtitle = element_text(color = "#e9e0cc", size = 15, hjust = 1, family = "mono", face = "bold"),
plot.caption = element_text(color = "#e9e0cc", size = 10, hjust = 0, family = "mono", face = "bold"),
legend.title = element_text(color = "#e9e0cc", size = 10, hjust = 0, family = "mono", face = "bold"),
legend.text = element_text(color = "#e9e0cc", family = "mono", face = "bold"),
legend.justification = "left",
legend.position = "bottom",
legend.key.width = unit(1, "cm"),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
)
#Patch plots
pa <- g + a + plot_layout(ncol = 2) +
plot_annotation(
title = "PENNSYLVANIA",
subtitle = "Less than 40% using broadband in many counties despite being widly available",
caption = "#TidyTuesday Week20 | Data: Microsoft & The Verge | Graphic: M. Jaffee"
) &
theme(panel.background = element_rect(fill = "#232229", colour="#232229"),
plot.background = element_rect(fill = "#232229", colour="#232229"),
plot.title = element_text(colour = "#e9e0cc", size=20, face="bold", hjust = 0, family="mono"),
plot.subtitle = element_text(colour = "#e9e0cc", size=15, hjust = 0, family="mono"),
plot.caption = element_text(colour = "#e9e0cc", size=10, face="bold", hjust = 0, family="mono")
)
pa
|
#gvhbjbvbvhjbvhjb | /ohiohi.R | no_license | lincoln1202/R | R | false | false | 17 | r | #gvhbjbvbvhjbvhjb |
#' Calculate inclusion probabilties
#'
#' Description
#' @param data A dataframe, often created by \code{\link{draw_population}} or \code{\link{draw_sample}}.
#' @param sampling A sampling object created by \code{\link{declare_sampling}}.
#' @return A matrix of probabilities of selection.
#' @examples
#' population <- declare_population(size = 850)
#' sampling <- declare_sampling(n=500)
#' pop_draw <- draw_population(population = population)
#' sampling_probabilities <- get_sampling_probabilities(data = pop_draw,
#' sampling = sampling)
#' head(sampling_probabilities)
#' @export
get_sampling_probabilities <- function(data, sampling){
sampling <- clean_inputs(sampling, object_class = "sampling", accepts_list = FALSE)
N <- nrow(data)
strata_variable_name <- sampling$strata_variable_name
cluster_variable_name <- sampling$cluster_variable_name
if(!is.null(strata_variable_name)){
strata_variable <- data[,strata_variable_name]
}else{
strata_variable <- NULL
}
if(!is.null(cluster_variable_name)){
cluster_variable <- data[,cluster_variable_name]
}else{
cluster_variable <- NULL
}
n <- sampling$n
probability <- sampling$probability
strata_n <- sampling$strata_n
strata_probabilities <- sampling$strata_probabilities
sampling_type <- sampling$sampling_type
if(sampling_type=="simple"){
probs <- simple_sampling_probabilities(N = N, n = n, probability = probability)
}
if(sampling_type=="stratified"){
probs <- stratified_sampling_probabilities(strata_variable = strata_variable,
probability = probability,
strata_n = strata_n,
strata_probabilities = strata_probabilities)
}
if(sampling_type=="clustered"){
probs <- clustered_sampling_probabilities(cluster_variable = cluster_variable,
n = n,
probability = probability)
}
if(sampling_type=="stratified and clustered"){
probs <- stratified_and_clustered_sampling_probabilities(cluster_variable = cluster_variable,
strata_variable = strata_variable,
strata_n = strata_n,
probability = probability,
strata_probabilities = strata_probabilities)
}
if(sampling_type=="none"){
probs <- rep(1, N)
}
return(probs)
}
simple_sampling_probabilities <- function(N, n = NULL, probability = NULL){
probability_each <- NULL
if(!is.null(probability)){
probability_each <- c(1 - probability, probability)
}
prob_mat <- complete_assignment_probabilities(N = N, m = n,
probability_each = probability_each,
condition_names = c(0,1))
return(prob_mat[,"prob_1"])
}
stratified_sampling_probabilities <- function(strata_variable,
probability = NULL,
strata_n = NULL,
strata_probabilities = NULL){
probability_each <- NULL
if(!is.null(probability)){
probability_each <- c(1 - probability, probability)
}
block_probabilities <- NULL
if(!is.null(strata_probabilities)){
block_probabilities <- cbind(1-strata_probabilities, strata_probabilities)
}
if(!is.null(strata_n)){
strata_totals <- table(strata_variable)
strata_n_matrix <- cbind(strata_totals - strata_n, strata_n)
}else{
strata_n_matrix <-NULL
}
prob_mat <- blocked_assignment_probabilities(block_variable = strata_variable,
block_m_each = strata_n_matrix,
block_probabilities = block_probabilities,
probability_each = probability_each,
condition_names = c(0,1))
return(prob_mat[,"prob_1"])
}
clustered_sampling_probabilities <- function(cluster_variable,
n = NULL,
probability = NULL){
probability_each <- NULL
if(!is.null(probability)){
probability_each <- c(1-probability, probability)
}
prob_mat <- clustered_assignment_probabilities(cluster_variable = cluster_variable,
m = n,
probability_each = probability_each,
condition_names = c(0,1))
return(prob_mat[,"prob_1"])
}
stratified_and_clustered_sampling_probabilities <- function(cluster_variable,
strata_variable,
strata_n = NULL,
probability = NULL,
strata_probabilities = NULL){
probability_each <- NULL
if(!is.null(probability)){
probability_each <- c(1-probability, probability)
}
block_probabilities <- NULL
if(!is.null(strata_probabilities)){
block_probabilities <- cbind(1-strata_probabilities, strata_probabilities)
}
# Must do someday
# block_m_each, strata_n
prob_mat <- blocked_and_clustered_assignment_probabilities(cluster_variable = cluster_variable,
block_variable = strata_variable,
block_m = strata_n, probability_each = probability_each,
block_probabilities = block_probabilities, condition_names = c(0,1))
return(prob_mat[,"prob_1"])
}
| /R/sampling_probabilities.R | no_license | yadevi/DeclareDesign-1 | R | false | false | 6,220 | r |
#' Calculate inclusion probabilties
#'
#' Description
#' @param data A dataframe, often created by \code{\link{draw_population}} or \code{\link{draw_sample}}.
#' @param sampling A sampling object created by \code{\link{declare_sampling}}.
#' @return A matrix of probabilities of selection.
#' @examples
#' population <- declare_population(size = 850)
#' sampling <- declare_sampling(n=500)
#' pop_draw <- draw_population(population = population)
#' sampling_probabilities <- get_sampling_probabilities(data = pop_draw,
#' sampling = sampling)
#' head(sampling_probabilities)
#' @export
get_sampling_probabilities <- function(data, sampling){
sampling <- clean_inputs(sampling, object_class = "sampling", accepts_list = FALSE)
N <- nrow(data)
strata_variable_name <- sampling$strata_variable_name
cluster_variable_name <- sampling$cluster_variable_name
if(!is.null(strata_variable_name)){
strata_variable <- data[,strata_variable_name]
}else{
strata_variable <- NULL
}
if(!is.null(cluster_variable_name)){
cluster_variable <- data[,cluster_variable_name]
}else{
cluster_variable <- NULL
}
n <- sampling$n
probability <- sampling$probability
strata_n <- sampling$strata_n
strata_probabilities <- sampling$strata_probabilities
sampling_type <- sampling$sampling_type
if(sampling_type=="simple"){
probs <- simple_sampling_probabilities(N = N, n = n, probability = probability)
}
if(sampling_type=="stratified"){
probs <- stratified_sampling_probabilities(strata_variable = strata_variable,
probability = probability,
strata_n = strata_n,
strata_probabilities = strata_probabilities)
}
if(sampling_type=="clustered"){
probs <- clustered_sampling_probabilities(cluster_variable = cluster_variable,
n = n,
probability = probability)
}
if(sampling_type=="stratified and clustered"){
probs <- stratified_and_clustered_sampling_probabilities(cluster_variable = cluster_variable,
strata_variable = strata_variable,
strata_n = strata_n,
probability = probability,
strata_probabilities = strata_probabilities)
}
if(sampling_type=="none"){
probs <- rep(1, N)
}
return(probs)
}
simple_sampling_probabilities <- function(N, n = NULL, probability = NULL){
probability_each <- NULL
if(!is.null(probability)){
probability_each <- c(1 - probability, probability)
}
prob_mat <- complete_assignment_probabilities(N = N, m = n,
probability_each = probability_each,
condition_names = c(0,1))
return(prob_mat[,"prob_1"])
}
stratified_sampling_probabilities <- function(strata_variable,
probability = NULL,
strata_n = NULL,
strata_probabilities = NULL){
probability_each <- NULL
if(!is.null(probability)){
probability_each <- c(1 - probability, probability)
}
block_probabilities <- NULL
if(!is.null(strata_probabilities)){
block_probabilities <- cbind(1-strata_probabilities, strata_probabilities)
}
if(!is.null(strata_n)){
strata_totals <- table(strata_variable)
strata_n_matrix <- cbind(strata_totals - strata_n, strata_n)
}else{
strata_n_matrix <-NULL
}
prob_mat <- blocked_assignment_probabilities(block_variable = strata_variable,
block_m_each = strata_n_matrix,
block_probabilities = block_probabilities,
probability_each = probability_each,
condition_names = c(0,1))
return(prob_mat[,"prob_1"])
}
clustered_sampling_probabilities <- function(cluster_variable,
n = NULL,
probability = NULL){
probability_each <- NULL
if(!is.null(probability)){
probability_each <- c(1-probability, probability)
}
prob_mat <- clustered_assignment_probabilities(cluster_variable = cluster_variable,
m = n,
probability_each = probability_each,
condition_names = c(0,1))
return(prob_mat[,"prob_1"])
}
stratified_and_clustered_sampling_probabilities <- function(cluster_variable,
strata_variable,
strata_n = NULL,
probability = NULL,
strata_probabilities = NULL){
probability_each <- NULL
if(!is.null(probability)){
probability_each <- c(1-probability, probability)
}
block_probabilities <- NULL
if(!is.null(strata_probabilities)){
block_probabilities <- cbind(1-strata_probabilities, strata_probabilities)
}
# Must do someday
# block_m_each, strata_n
prob_mat <- blocked_and_clustered_assignment_probabilities(cluster_variable = cluster_variable,
block_variable = strata_variable,
block_m = strata_n, probability_each = probability_each,
block_probabilities = block_probabilities, condition_names = c(0,1))
return(prob_mat[,"prob_1"])
}
|
library(shiny)
library(ggplot2)
library(dplyr)
library(DT) # library to make interactive table
library(plotly)
install.packages("shinythemes")
library(shinythemes)
# load dataset
usa_ufo_sightings <- read.csv("usa_ufo_sightings.csv")
# make sure the data is in right data format
usa_ufo_sightings$date_sighted <- as.Date(usa_ufo_sightings$date_sighted)
usa_ufo_sightings$city <- as.character(usa_ufo_sightings$city)
usa_ufo_sightings$state<- as.character(usa_ufo_sightings$state)
usa_ufo_sightings$shape <- as.character(usa_ufo_sightings$shape)
usa_ufo_sightings$comments <- as.character(usa_ufo_sightings$comments)
ui <- fluidPage(
titlePanel("UFO Sightings"),
sidebarLayout(
sidebarPanel(
selectInput("state", "Choose a U.S. state:", choices = unique(usa_ufo_sightings$state)),
dateRangeInput("dates", "Choose a date range:",
start = "1920-01-01",
end = "1950-01-01")
),
mainPanel(
# Add plot output named 'shapes'
plotOutput("shapes"),
# Add table output named 'duration_table'
tableOutput("duration_table")
)
)
)
server <- function(input, output) {
# CODE BELOW: Create a plot output name 'shapes', of sightings by shape,
# For the selected inputs
output$shapes <- renderPlot({
usa_ufo_sightings %>%
filter(state == input$state,
date_sighted >= input$dates[1],
date_sighted <= input$dates[2]) %>%
ggplot(aes(shape)) +
geom_bar() +
labs(x = "Shape", y = "# Sighted")
})
# CODE BELOW: Create a table output named 'duration_table', by shape,
# of # sighted, plus mean, median, max, and min duration of sightings
# for the selected inputs
output$duration_table <- renderTable({
usa_ufo_sightings %>%
filter(
state == input$state,
date_sighted >= input$dates[1],
date_sighted <= input$dates[2]
) %>%
group_by(shape) %>%
summarize(
nb_sighted = n(),
avg_duration = mean(duration_sec),
median_duration = median(duration_sec),
min_duration = min(duration_sec),
max_duration = max(duration_sec)
)
})
}
shinyApp(ui, server) | /shiny21_ufo_app1.R | no_license | btindol178/Shiny | R | false | false | 2,270 | r | library(shiny)
library(ggplot2)
library(dplyr)
library(DT) # library to make interactive table
library(plotly)
install.packages("shinythemes")
library(shinythemes)
# load dataset
usa_ufo_sightings <- read.csv("usa_ufo_sightings.csv")
# make sure the data is in right data format
usa_ufo_sightings$date_sighted <- as.Date(usa_ufo_sightings$date_sighted)
usa_ufo_sightings$city <- as.character(usa_ufo_sightings$city)
usa_ufo_sightings$state<- as.character(usa_ufo_sightings$state)
usa_ufo_sightings$shape <- as.character(usa_ufo_sightings$shape)
usa_ufo_sightings$comments <- as.character(usa_ufo_sightings$comments)
ui <- fluidPage(
titlePanel("UFO Sightings"),
sidebarLayout(
sidebarPanel(
selectInput("state", "Choose a U.S. state:", choices = unique(usa_ufo_sightings$state)),
dateRangeInput("dates", "Choose a date range:",
start = "1920-01-01",
end = "1950-01-01")
),
mainPanel(
# Add plot output named 'shapes'
plotOutput("shapes"),
# Add table output named 'duration_table'
tableOutput("duration_table")
)
)
)
server <- function(input, output) {
# CODE BELOW: Create a plot output name 'shapes', of sightings by shape,
# For the selected inputs
output$shapes <- renderPlot({
usa_ufo_sightings %>%
filter(state == input$state,
date_sighted >= input$dates[1],
date_sighted <= input$dates[2]) %>%
ggplot(aes(shape)) +
geom_bar() +
labs(x = "Shape", y = "# Sighted")
})
# CODE BELOW: Create a table output named 'duration_table', by shape,
# of # sighted, plus mean, median, max, and min duration of sightings
# for the selected inputs
output$duration_table <- renderTable({
usa_ufo_sightings %>%
filter(
state == input$state,
date_sighted >= input$dates[1],
date_sighted <= input$dates[2]
) %>%
group_by(shape) %>%
summarize(
nb_sighted = n(),
avg_duration = mean(duration_sec),
median_duration = median(duration_sec),
min_duration = min(duration_sec),
max_duration = max(duration_sec)
)
})
}
shinyApp(ui, server) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/propose.R
\name{fit_permutations_and_graphs}
\alias{fit_permutations_and_graphs}
\title{Fit lots of graphs to data.}
\usage{
fit_permutations_and_graphs(data, permutations, graphs, cores)
}
\arguments{
\item{data}{The data table.}
\item{permutations}{List of population permutations.}
\item{graphs}{List of functions for producing graphs.}
\item{cores}{Number of cores used.}
}
\value{
A list of \code{\link{fast_fit}} results.
}
\description{
Combines a list of (population) permutations and a list of graph topologies
to a big list of graphs, then fits those graphs to given data using parallel
computation. This function needs \code{doParallel}, \code{foreach} and
\code{parallel} installed.
}
\examples{
\donttest{
# Let's experiment by fitting all the graphs with five leaves and at most one admixture
# event to a five population subset of the bear data. Note that with three data rows only
# we do wisely by not concluding too much about the actual bear family tree; this is to
# illustrate the function usage only!
data(bears)
data <- bears[16:18, ]
print(data)
permutations <- make_permutations(c("PB", "BLK", "Sweden", "Denali", "Kenai"))
graphs <- five_leaves_graphs
# We go with one core only as I don't know what kind of machine you are using.
fitted_graphs <- fit_permutations_and_graphs(data, permutations, graphs, 1)
# Now sort the fitted objects by best_error and see how the best graph looks like.
errors <- sapply(fitted_graphs, function(x) x$best_error)
best_graphs <- fitted_graphs[order(errors)]
plot(best_graphs[[1]]$graph, color = "goldenrod", title = best_graphs[[1]]$best_error)
# The same value for best_error actually occurs in the list 152 times because of our
# unsufficient data.
}
}
\seealso{
\code{\link{make_permutations}}
\code{\link{four_leaves_graphs}}
\code{\link{five_leaves_graphs}}
\code{\link{six_leaves_graphs}}
\code{\link{seven_leaves_graphs}}
\code{\link{eight_leaves_trees}}
\code{\link{fit_graph_list}}
}
| /man/fit_permutations_and_graphs.Rd | no_license | guzhongru/admixture_graph | R | false | true | 2,048 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/propose.R
\name{fit_permutations_and_graphs}
\alias{fit_permutations_and_graphs}
\title{Fit lots of graphs to data.}
\usage{
fit_permutations_and_graphs(data, permutations, graphs, cores)
}
\arguments{
\item{data}{The data table.}
\item{permutations}{List of population permutations.}
\item{graphs}{List of functions for producing graphs.}
\item{cores}{Number of cores used.}
}
\value{
A list of \code{\link{fast_fit}} results.
}
\description{
Combines a list of (population) permutations and a list of graph topologies
to a big list of graphs, then fits those graphs to given data using parallel
computation. This function needs \code{doParallel}, \code{foreach} and
\code{parallel} installed.
}
\examples{
\donttest{
# Let's experiment by fitting all the graphs with five leaves and at most one admixture
# event to a five population subset of the bear data. Note that with three data rows only
# we do wisely by not concluding too much about the actual bear family tree; this is to
# illustrate the function usage only!
data(bears)
data <- bears[16:18, ]
print(data)
permutations <- make_permutations(c("PB", "BLK", "Sweden", "Denali", "Kenai"))
graphs <- five_leaves_graphs
# We go with one core only as I don't know what kind of machine you are using.
fitted_graphs <- fit_permutations_and_graphs(data, permutations, graphs, 1)
# Now sort the fitted objects by best_error and see how the best graph looks like.
errors <- sapply(fitted_graphs, function(x) x$best_error)
best_graphs <- fitted_graphs[order(errors)]
plot(best_graphs[[1]]$graph, color = "goldenrod", title = best_graphs[[1]]$best_error)
# The same value for best_error actually occurs in the list 152 times because of our
# unsufficient data.
}
}
\seealso{
\code{\link{make_permutations}}
\code{\link{four_leaves_graphs}}
\code{\link{five_leaves_graphs}}
\code{\link{six_leaves_graphs}}
\code{\link{seven_leaves_graphs}}
\code{\link{eight_leaves_trees}}
\code{\link{fit_graph_list}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HelpfullFunctions.R
\name{PID.controller}
\alias{PID.controller}
\title{PID controller, which generates ouptput based on the error}
\usage{
PID.controller(
err.vec,
Kp,
Ki,
Kd,
Kp.db = 1,
Kp.disc = 0.75,
Ki.db = Inf,
Kd.db = 1,
Kd.disc = 0.75,
Ki.disc = 0.75
)
}
\arguments{
\item{err.vec}{A vector with the error terms (discrepancies of the result and the value)}
\item{Kp}{Factor measuring how strong we want to turn based on proportial gain}
\item{Ki}{Factor measuring how strong we want to turn based on integral gain}
\item{Kd}{Factor measuring how strong we want to turn based on derivative gain}
\item{Kp.db}{How many data points should be used to calculate the proportional effect? By default the standard procedure is used, e.g. only the last data point. If there is a lot of noise in the environment it might be prudent to use a higher number.}
\item{Kp.disc}{When calculating the proportional effect based on multiple data.points with Kp.db it might be sensible to weight more recent data points higher. This parameter determines the discount factor.}
\item{Ki.db}{How many data points should be used to calculate the integral? By default the standard procedure is used, e.g. all data points. If there are strong changes in the environment it might be prudent to use a lower number.}
\item{Kd.db}{How many data points should be used to calculate the derivative? By default the standard procedure is used, e.g. the change between the last two data points. If there is a lot of noise one might want to use a higher number of points.}
\item{Kd.disc}{When calculating the derivative based on multiple data.points with Kd.db it might be sensible to weight more recent changes higher. This parameter determines the discount factor to determine the weights of former changes.}
\item{Ki.disc}{When calculating the integral based on multiple data.points with Kp.db it might be sensible to weight more recent data points higher. This parameter determines the discount factor.}
}
\description{
PID controller, which generates ouptput based on the error
}
| /man/PID.controller.Rd | no_license | MartinKies/RLR | R | false | true | 2,162 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HelpfullFunctions.R
\name{PID.controller}
\alias{PID.controller}
\title{PID controller, which generates ouptput based on the error}
\usage{
PID.controller(
err.vec,
Kp,
Ki,
Kd,
Kp.db = 1,
Kp.disc = 0.75,
Ki.db = Inf,
Kd.db = 1,
Kd.disc = 0.75,
Ki.disc = 0.75
)
}
\arguments{
\item{err.vec}{A vector with the error terms (discrepancies of the result and the value)}
\item{Kp}{Factor measuring how strong we want to turn based on proportial gain}
\item{Ki}{Factor measuring how strong we want to turn based on integral gain}
\item{Kd}{Factor measuring how strong we want to turn based on derivative gain}
\item{Kp.db}{How many data points should be used to calculate the proportional effect? By default the standard procedure is used, e.g. only the last data point. If there is a lot of noise in the environment it might be prudent to use a higher number.}
\item{Kp.disc}{When calculating the proportional effect based on multiple data.points with Kp.db it might be sensible to weight more recent data points higher. This parameter determines the discount factor.}
\item{Ki.db}{How many data points should be used to calculate the integral? By default the standard procedure is used, e.g. all data points. If there are strong changes in the environment it might be prudent to use a lower number.}
\item{Kd.db}{How many data points should be used to calculate the derivative? By default the standard procedure is used, e.g. the change between the last two data points. If there is a lot of noise one might want to use a higher number of points.}
\item{Kd.disc}{When calculating the derivative based on multiple data.points with Kd.db it might be sensible to weight more recent changes higher. This parameter determines the discount factor to determine the weights of former changes.}
\item{Ki.disc}{When calculating the integral based on multiple data.points with Kp.db it might be sensible to weight more recent data points higher. This parameter determines the discount factor.}
}
\description{
PID controller, which generates ouptput based on the error
}
|
# Notes:
# 1. Enter your data.ca.gov portal username in the environment variables for you account, in a variable called 'portal_username'
# 2. Enter your data.ca.gov portal password in the environment variables for you account, in a variable called 'portal_password'
# load packages -----------------------------------------------------------
library(RSelenium)
library(methods) # it seems that this needs to be called explicitly to avoid an error for some reason
library(XML)
library(dplyr)
library(janitor)
library(readr)
library(lubridate)
library(readxl)
library(ckanr)
library(binman)
library(wdman)
library(stringr)
library(magrittr)
library(pingr)
library(glue)
library(tictoc)
library(here)
# enter variables ---------------------------------------------------------
## dataset names ----
dataset_name <- 'surface-water-toxicity-results'
## list files / resources ----
data_resource_id_list <- list('toxicity' = 'ac8bf4c8-0675-4764-92f1-b67bdb187ba1')
# set up selenium (automated browser) ---------------------------------
source(here('start_selenium.R'))
## Note - for more information / examples on how the RSelenium package works, see:
# https://stackoverflow.com/questions/35504731/specify-download-folder-in-rselenium
# https://cran.r-project.org/web/packages/RSelenium/vignettes/RSelenium-basics.html
# https://stackoverflow.com/questions/32123248/submitting-form-from-r-to-mixed-html-and-javascript
# https://github.com/ropensci/RSelenium/issues/121
# ## define chrome browser options for the Selenium session ----
# eCaps <- list(
# chromeOptions =
# list(prefs = list(
# "profile.default_content_settings.popups" = 0L,
# "download.prompt_for_download" = FALSE,
# "download.default_directory" = gsub(pattern = '/', replacement = '\\\\', x = getwd()) # download.dir
# )
# )
# )
#
# ## check for open port ----
# for (port_check in 4567L:4577L) {
# port_test <- ping_port(destination = 'localhost', port = port_check)
# # print(all(is.na(port_test)))
# if (all(is.na(port_test)) == TRUE) {
# port_use <- port_check
# break
# }
# }
#
# ## get drivers ----
# selenium(check = TRUE,
# retcommand = TRUE,
# port = port_use)
# Sys.sleep(1)
#
# ## get current version of chrome browser ----
# chrome_browser_version <-
# system2(command = "wmic",
# args = 'datafile where name="C:\\\\Program Files (x86)\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe" get Version /value',
# stdout = TRUE,
# stderr = TRUE) %>%
# str_extract(pattern = "(?<=Version=)(\\d+\\.){3}")
# if (sum(!is.na(chrome_browser_version)) == 0) {
# chrome_browser_version <-
# system2(command = "wmic",
# args = 'datafile where name="C:\\\\Program Files\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe" get Version /value',
# stdout = TRUE,
# stderr = TRUE) %>%
# str_extract(pattern = "(?<=Version=)(\\d+\\.){3}")
# }
#
# ## get available chrome drivers ----
# chrome_driver_versions <- list_versions("chromedriver")
#
# ## match driver / version ----
# chrome_driver_current <- chrome_browser_version %>%
# extract(!is.na(.)) %>%
# str_replace_all(pattern = "\\.",
# replacement = "\\\\.") %>%
# paste0("^", .) %>%
# str_subset(string = dplyr::last(chrome_driver_versions)) %>%
# as.numeric_version() %>%
# max() %>%
# as.character()
#
# ### if no matching driver / version, use most recent driver ----
# if(is_empty(chrome_driver_current)) {
# chrome_driver_current <- tibble(version = last(chrome_driver_versions)) %>%
# separate_wider_delim(cols = version,
# delim = '.',
# names_sep = '',
# cols_remove = FALSE) %>%
# rename(version = versionversion) %>%
# mutate(across(num_range('version', 1:4), as.numeric)) %>%
# arrange(desc(version1), desc(version2), desc(version3), desc(version4)) %>%
# slice(1) %>%
# pull(version)
# }
#
# ## re-check for open port ----
# for (port_check in 4567L:4577L) {
# port_test <- ping_port(destination = 'localhost', port = port_check)
# # print(all(is.na(port_test)))
# if (all(is.na(port_test)) == TRUE) {
# port_use <- port_check
# break
# }
# }
#
# #### remove the 'LICENSE.chromedriver' file (if it exists)
# chrome_driver_dir <- paste0(app_dir("chromedriver", FALSE),
# '/win32/',
# chrome_driver_current)
# # list.files(chrome_driver_dir)
# if ('LICENSE.chromedriver' %in% list.files(chrome_driver_dir)) {
# file.remove(
# paste0(chrome_driver_dir, '/', 'LICENSE.chromedriver')
# )
# }
#
# ## set up selenium with the current chrome version ----
# selCommand <- selenium(jvmargs =
# c("-Dwebdriver.chrome.verboseLogging=true"),
# check = TRUE,
# retcommand = TRUE,
# chromever = chrome_driver_current,
# port = port_use)
#
# ## write selenium specifications to batch file ----
# writeLines(selCommand,
# 'Start_Server.bat')
# Sys.sleep(1) #### wait a few seconds
#
# ## start server ----
# shell.exec('Start_Server.bat')
# Sys.sleep(1) #### wait a few seconds
#
# ## open connection ----
# remDr <- remoteDriver(port = port_use, # 4567L,
# browserName = "chrome",
# extraCapabilities = eCaps)
# Sys.sleep(1) #### wait a few seconds
# remDr$open()
# load files to portal -----------------------------------------
## get portal username and password ----
portal_username <- Sys.getenv('portal_username')
portal_password <- Sys.getenv('portal_password')
## navigate to data.ca.gov log in page and log in ----
login_url <- 'https://data.ca.gov/user/login'
remDr$navigate(login_url)
webElem <- remDr$findElement(using = 'id', value = 'field-login')
webElem$sendKeysToElement(list(portal_username))
webElem <- remDr$findElement(using = 'id', value = 'field-password')
webElem$sendKeysToElement(list(portal_password))
webElem <- remDr$findElement(using = 'css selector', value = 'button.btn.btn-primary')
webElem$clickElement()
## loop through all resources and enter data ----
for (id_number in seq_along(names(data_resource_id_list))) {
# id_number <- 1
data_resource_id <- data_resource_id_list[[id_number]]
### navigate to resource editor page ----
edit_url <- paste0('https://data.ca.gov/dataset/', dataset_name, '/resource_edit/', data_resource_id)
remDr$navigate(edit_url)
# click the 'Remove' button (to remove the old version of the file)
webElem <- remDr$findElement(using = 'css selector', value = paste0('.btn-remove-url'))
webElem$clickElement()
Sys.sleep(1)
# enter the path of the new file to be uploaded
webElem <- remDr$findElement(using = 'css selector', value = paste0('#field-image-upload'))
# webElem$clickElement()
webElem$sendKeysToElement(list('C:\\David\\_CA_data_portal\\CEDEN\\CEDEN_Datasets\\2021-09-01\\ToxicityData_2021-09-01.zip'))
Sys.sleep(1)
# click the 'Update Resource' button to upload the new file
webElem <- remDr$findElement(using = 'css selector', value = 'button.btn.btn-primary')
webElem$clickElement()
# wait until the upload is complete before going to the next step (can't navigate away from the page while files are being uploaded)
# NOTE: maybe also use ?setImplicitWaitTimeout()
### see: https://stackoverflow.com/questions/27080920/how-to-check-if-page-finished-loading-in-rselenium
### see: https://stackoverflow.com/questions/11256732/how-to-handle-windows-file-upload-using-selenium-webdriver
{
i <- 0
t1 <- Sys.time()
while(remDr$getCurrentUrl() == edit_url & i <= 120) {
print('uploading')
Sys.sleep(10)
}
t2 <- Sys.time()
upload_time <- t2 - t1
print(glue('upload complete -- upload time: {round(upload_time,1)} {units(upload_time)}'))
}
# go to the next file
}
# close server ------------------------------------------------------------
remDr$close()
shell.exec(file = 'Stop.bat') # this closes the java window
| /ckan-data-file-upload-tool/ckan_data-file-upload-tool.R | no_license | daltare/CA-Data-Portal-Uploads | R | false | false | 8,694 | r | # Notes:
# 1. Enter your data.ca.gov portal username in the environment variables for you account, in a variable called 'portal_username'
# 2. Enter your data.ca.gov portal password in the environment variables for you account, in a variable called 'portal_password'
# load packages -----------------------------------------------------------
library(RSelenium)
library(methods) # it seems that this needs to be called explicitly to avoid an error for some reason
library(XML)
library(dplyr)
library(janitor)
library(readr)
library(lubridate)
library(readxl)
library(ckanr)
library(binman)
library(wdman)
library(stringr)
library(magrittr)
library(pingr)
library(glue)
library(tictoc)
library(here)
# enter variables ---------------------------------------------------------
## dataset names ----
dataset_name <- 'surface-water-toxicity-results'
## list files / resources ----
data_resource_id_list <- list('toxicity' = 'ac8bf4c8-0675-4764-92f1-b67bdb187ba1')
# set up selenium (automated browser) ---------------------------------
source(here('start_selenium.R'))
## Note - for more information / examples on how the RSelenium package works, see:
# https://stackoverflow.com/questions/35504731/specify-download-folder-in-rselenium
# https://cran.r-project.org/web/packages/RSelenium/vignettes/RSelenium-basics.html
# https://stackoverflow.com/questions/32123248/submitting-form-from-r-to-mixed-html-and-javascript
# https://github.com/ropensci/RSelenium/issues/121
# ## define chrome browser options for the Selenium session ----
# eCaps <- list(
# chromeOptions =
# list(prefs = list(
# "profile.default_content_settings.popups" = 0L,
# "download.prompt_for_download" = FALSE,
# "download.default_directory" = gsub(pattern = '/', replacement = '\\\\', x = getwd()) # download.dir
# )
# )
# )
#
# ## check for open port ----
# for (port_check in 4567L:4577L) {
# port_test <- ping_port(destination = 'localhost', port = port_check)
# # print(all(is.na(port_test)))
# if (all(is.na(port_test)) == TRUE) {
# port_use <- port_check
# break
# }
# }
#
# ## get drivers ----
# selenium(check = TRUE,
# retcommand = TRUE,
# port = port_use)
# Sys.sleep(1)
#
# ## get current version of chrome browser ----
# chrome_browser_version <-
# system2(command = "wmic",
# args = 'datafile where name="C:\\\\Program Files (x86)\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe" get Version /value',
# stdout = TRUE,
# stderr = TRUE) %>%
# str_extract(pattern = "(?<=Version=)(\\d+\\.){3}")
# if (sum(!is.na(chrome_browser_version)) == 0) {
# chrome_browser_version <-
# system2(command = "wmic",
# args = 'datafile where name="C:\\\\Program Files\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe" get Version /value',
# stdout = TRUE,
# stderr = TRUE) %>%
# str_extract(pattern = "(?<=Version=)(\\d+\\.){3}")
# }
#
# ## get available chrome drivers ----
# chrome_driver_versions <- list_versions("chromedriver")
#
# ## match driver / version ----
# chrome_driver_current <- chrome_browser_version %>%
# extract(!is.na(.)) %>%
# str_replace_all(pattern = "\\.",
# replacement = "\\\\.") %>%
# paste0("^", .) %>%
# str_subset(string = dplyr::last(chrome_driver_versions)) %>%
# as.numeric_version() %>%
# max() %>%
# as.character()
#
# ### if no matching driver / version, use most recent driver ----
# if(is_empty(chrome_driver_current)) {
# chrome_driver_current <- tibble(version = last(chrome_driver_versions)) %>%
# separate_wider_delim(cols = version,
# delim = '.',
# names_sep = '',
# cols_remove = FALSE) %>%
# rename(version = versionversion) %>%
# mutate(across(num_range('version', 1:4), as.numeric)) %>%
# arrange(desc(version1), desc(version2), desc(version3), desc(version4)) %>%
# slice(1) %>%
# pull(version)
# }
#
# ## re-check for open port ----
# for (port_check in 4567L:4577L) {
# port_test <- ping_port(destination = 'localhost', port = port_check)
# # print(all(is.na(port_test)))
# if (all(is.na(port_test)) == TRUE) {
# port_use <- port_check
# break
# }
# }
#
# #### remove the 'LICENSE.chromedriver' file (if it exists)
# chrome_driver_dir <- paste0(app_dir("chromedriver", FALSE),
# '/win32/',
# chrome_driver_current)
# # list.files(chrome_driver_dir)
# if ('LICENSE.chromedriver' %in% list.files(chrome_driver_dir)) {
# file.remove(
# paste0(chrome_driver_dir, '/', 'LICENSE.chromedriver')
# )
# }
#
# ## set up selenium with the current chrome version ----
# selCommand <- selenium(jvmargs =
# c("-Dwebdriver.chrome.verboseLogging=true"),
# check = TRUE,
# retcommand = TRUE,
# chromever = chrome_driver_current,
# port = port_use)
#
# ## write selenium specifications to batch file ----
# writeLines(selCommand,
# 'Start_Server.bat')
# Sys.sleep(1) #### wait a few seconds
#
# ## start server ----
# shell.exec('Start_Server.bat')
# Sys.sleep(1) #### wait a few seconds
#
# ## open connection ----
# remDr <- remoteDriver(port = port_use, # 4567L,
# browserName = "chrome",
# extraCapabilities = eCaps)
# Sys.sleep(1) #### wait a few seconds
# remDr$open()
# load files to portal -----------------------------------------
## get portal username and password ----
portal_username <- Sys.getenv('portal_username')
portal_password <- Sys.getenv('portal_password')
## navigate to data.ca.gov log in page and log in ----
login_url <- 'https://data.ca.gov/user/login'
remDr$navigate(login_url)
webElem <- remDr$findElement(using = 'id', value = 'field-login')
webElem$sendKeysToElement(list(portal_username))
webElem <- remDr$findElement(using = 'id', value = 'field-password')
webElem$sendKeysToElement(list(portal_password))
webElem <- remDr$findElement(using = 'css selector', value = 'button.btn.btn-primary')
webElem$clickElement()
## loop through all resources and enter data ----
for (id_number in seq_along(names(data_resource_id_list))) {
# id_number <- 1
data_resource_id <- data_resource_id_list[[id_number]]
### navigate to resource editor page ----
edit_url <- paste0('https://data.ca.gov/dataset/', dataset_name, '/resource_edit/', data_resource_id)
remDr$navigate(edit_url)
# click the 'Remove' button (to remove the old version of the file)
webElem <- remDr$findElement(using = 'css selector', value = paste0('.btn-remove-url'))
webElem$clickElement()
Sys.sleep(1)
# enter the path of the new file to be uploaded
webElem <- remDr$findElement(using = 'css selector', value = paste0('#field-image-upload'))
# webElem$clickElement()
webElem$sendKeysToElement(list('C:\\David\\_CA_data_portal\\CEDEN\\CEDEN_Datasets\\2021-09-01\\ToxicityData_2021-09-01.zip'))
Sys.sleep(1)
# click the 'Update Resource' button to upload the new file
webElem <- remDr$findElement(using = 'css selector', value = 'button.btn.btn-primary')
webElem$clickElement()
# wait until the upload is complete before going to the next step (can't navigate away from the page while files are being uploaded)
# NOTE: maybe also use ?setImplicitWaitTimeout()
### see: https://stackoverflow.com/questions/27080920/how-to-check-if-page-finished-loading-in-rselenium
### see: https://stackoverflow.com/questions/11256732/how-to-handle-windows-file-upload-using-selenium-webdriver
{
i <- 0
t1 <- Sys.time()
while(remDr$getCurrentUrl() == edit_url & i <= 120) {
print('uploading')
Sys.sleep(10)
}
t2 <- Sys.time()
upload_time <- t2 - t1
print(glue('upload complete -- upload time: {round(upload_time,1)} {units(upload_time)}'))
}
# go to the next file
}
# close server ------------------------------------------------------------
remDr$close()
shell.exec(file = 'Stop.bat') # this closes the java window
|
#create the data frame
emp_int<-c(1:5)
emp_name=c("rick","bob","pop","garry","larry")
emp_salary<-c(1234,4565,7898,1000,3455)
Start_Date=as.Date(c("2012-01-12","2014-03-01","2015-01-01","2017-02-01"))
Start_Date
#get the structure of the data frame
str(emp.data)
emp.data<-data.frame(emp_int=c(1:5),
emp_name=c("rick","bob","pop","garry","larry"),
emp_salary=c(1234,4565,7898,1000,3455),
Start_Date=as.Date(c("2012-01-12","2014-03-01","2015-01-01","2017-02-01","2013-02-03")),
StringsAsFactors=FALSE)
emp.data
#extract pecific cols
result<-data.frame(emp.data$emp_name,emp.data$emp_salary)
result
emp.data<-data.frame(emp_id=c(1:5),emp_name=c("rick","bob","pop","garry","larry"))
emp.data<-data.frame(emp_int=c(1:5),
emp_name=c("rick","bob","pop","garry","larry"),
emp_salary=c(1234,4565,7898,1000,3455),
Start_Date=as.Date(c("2012-01-12","2014-03-01","2015-01-01","2017-02-01","2013-02-03")),
StringsAsFactors=FALSE)
#extract first two rows
result<-emp.data[1:2]
result
#Extract 3rd and 5th row with2nd and 4th col
#live demo
#create the data frame
emp.data<-data.frame(emp_int=c(1:5),
emp_name=c("rick","bob","pop","garry","larry"),
emp_salary=c(1234,4565,7898,1000,3455),
Start_Date=as.Date(c("2012-01-12","2014-03-01","2015-01-01","2017-02-01","2013-02-03")),
StringsAsFactors=FALSE)
#extract 3rd and 5th row with 2nd and 4th col
result<-emp.data[c(3,5,c(2,4))]
result
#add row
#to add more rows permanently to an existing data frame,we need to bring in the new rows in the same structure as the existing row
emp.data<-data.frame(emp_int=c(1:3),
emp_name=c("rick","bob","pop"),
emp_salary=c(1234,4565,7898),
Start_Date=as.Date(c("2012-01-12","2014-03-01","2015-01-01")),
StringsAsFactors=FALSE)
#create the second data frame
emp.newdata<-data.frame(emp_id=c(4:6),emp_name=c("Hansee","Sujay","vinay"),
salary=c(576.9,897,678),
start_date=as.Date(c("2012-01-07","2012-01-01","2012-09-08")),
dept=c("IT","Operations","Finance"),
StringsAsFactors=FALSE
)
#bind the two data frame
emp.finaldata<-rbind(emp.data,emp.newdata)
emp.finaldata
| /prig11.R | no_license | HanseeR/Practice- | R | false | false | 2,604 | r | #create the data frame
emp_int<-c(1:5)
emp_name=c("rick","bob","pop","garry","larry")
emp_salary<-c(1234,4565,7898,1000,3455)
Start_Date=as.Date(c("2012-01-12","2014-03-01","2015-01-01","2017-02-01"))
Start_Date
#get the structure of the data frame
str(emp.data)
emp.data<-data.frame(emp_int=c(1:5),
emp_name=c("rick","bob","pop","garry","larry"),
emp_salary=c(1234,4565,7898,1000,3455),
Start_Date=as.Date(c("2012-01-12","2014-03-01","2015-01-01","2017-02-01","2013-02-03")),
StringsAsFactors=FALSE)
emp.data
#extract pecific cols
result<-data.frame(emp.data$emp_name,emp.data$emp_salary)
result
emp.data<-data.frame(emp_id=c(1:5),emp_name=c("rick","bob","pop","garry","larry"))
emp.data<-data.frame(emp_int=c(1:5),
emp_name=c("rick","bob","pop","garry","larry"),
emp_salary=c(1234,4565,7898,1000,3455),
Start_Date=as.Date(c("2012-01-12","2014-03-01","2015-01-01","2017-02-01","2013-02-03")),
StringsAsFactors=FALSE)
#extract first two rows
result<-emp.data[1:2]
result
#Extract 3rd and 5th row with2nd and 4th col
#live demo
#create the data frame
emp.data<-data.frame(emp_int=c(1:5),
emp_name=c("rick","bob","pop","garry","larry"),
emp_salary=c(1234,4565,7898,1000,3455),
Start_Date=as.Date(c("2012-01-12","2014-03-01","2015-01-01","2017-02-01","2013-02-03")),
StringsAsFactors=FALSE)
#extract 3rd and 5th row with 2nd and 4th col
result<-emp.data[c(3,5,c(2,4))]
result
#add row
#to add more rows permanently to an existing data frame,we need to bring in the new rows in the same structure as the existing row
emp.data<-data.frame(emp_int=c(1:3),
emp_name=c("rick","bob","pop"),
emp_salary=c(1234,4565,7898),
Start_Date=as.Date(c("2012-01-12","2014-03-01","2015-01-01")),
StringsAsFactors=FALSE)
#create the second data frame
emp.newdata<-data.frame(emp_id=c(4:6),emp_name=c("Hansee","Sujay","vinay"),
salary=c(576.9,897,678),
start_date=as.Date(c("2012-01-07","2012-01-01","2012-09-08")),
dept=c("IT","Operations","Finance"),
StringsAsFactors=FALSE
)
#bind the two data frame
emp.finaldata<-rbind(emp.data,emp.newdata)
emp.finaldata
|
#Reading in the given data
rockland <- read.csv("Rockland.csv", header = TRUE)
#Basic one-sample t-test on List Prices to find out the confidence interval of mean
#Checking Assumptions:
# 1. Independence assumption: Since the houses are randonly selected, they are indpendent
# of each other. Thus,this condition is met.
# 2. Randomization condition: Since the houses are randomly selected, this condition is
# satisfied.
# 3. 10% condition: 169 homes are less than the total number of homes in Rockland. Thus,
# this condition is satisfied.
# 4. Nearly Normal Condition:
hist(rockland$List.Price, col = 'slateblue3', border = 'violetred', xlab = 'List Prices',
ylab = 'Number of homes',
main = 'Histogram of List Prices vs Number of homes')
#From the histogram, we can see that the data for List Price is skewed to the right.
#This indicates that there are a few outliers. From the data, we can see that the prices
#for two of the houses in Rockland are $1289000 and $1338000. They seem to be the outlier.
#However, since the sample size is large, t-tests can be applied.
# One-sample t-test:
t.test(rockland$List.Price, conf.level = 0.95)
#From the t-test, we can be 95% confident that the real mean for the list prices in
#Rockland lies between $306133.7 and $355607.1.
#The budget of the family is $350000.
#A hypothesis test can be carried out to check whether the family can afford a home
#in Rockland.
#Hypotheses:
#Null Hypothesis, H0: mu(list.price) = 350,000
#Alternate Hypothesis, HA: mu(list.price) < 350,000
#The conditions have already been checked.
t.test(rockland$List.Price, mu = 350000, alternative = 'less', conf.level = 0.95)
#From this, we can see that the p-value is more than 0.05.
#So, we can reject the null hypothesis which means, there is evidence that the mean
#of list prices of homes in Rockland is less than 350000.
#Thus, the family can afford a home in Rockland.
#Comparing different types of homes
#There are four types of homes: Apartment, House, Mobile, Row/Townhouse
#In order to compare the mean prices of four homes, one-way anova can be used
#Hypotheses
#Null Hypothesis, H0: There is no difference in the prices of different types of homes.
#Alternate Hypothesis, HA: The list price of at least one type of home is different.
type.anova <- aov(List.Price~Type, data = rockland)
summary(type.anova)
#Here, the p-value is much less than alpha, so we reject the null.
#Thus, we can say that at least one of the type of home has a different mean list price.
#Checking the conditions for one-way anova:
#Independence assumption: The price of one home does not affect the price of others.
#Randomization condition: The homes are randomly selected. Hence, this condition is met.
#Independent group assumption: The list prices of one group do not affect that of others.
#Equal variance
boxplot(List.Price~Type, data = rockland,
main = 'Comparison of Types of home',
ylab = 'Number of homes', xlab = 'List Prices',
col = c('violetred4', 'slateblue4', 'springgreen4', 'papayawhip'))
#All types seem to have pretty close means. Mobile homes seem to have a lower mean
#than the others. There is no extreme skewness.
plot(type.anova)
#The Residuals vs Predicted Plot shows the red line close to the equal spread line.
#The normal Q-Q plot follows a straight line with a few outliers.
hist(type.anova$residuals, main = 'Histogram of residuals', xlab = 'Residuals')
#The histogram is nearly normal with a few outliers.
#Thus, the one-way anova holds.
#Tukey's HSD test can be applied to find out which type of homes are different.
TukeyHSD(type.anova, conf.level = 0.95)
#Here, we can see that the p-values for Mobile-Apartment, Mobile-House, and
#Row/Townhouse-House are different. Thus, we can say that the mean list price of Mobile
#homes is different from the rest.
library(ggplot2)
library(Hmisc)
ggplot(rockland, aes(Type, List.Price, fill=Type))+
stat_summary(fun.y=mean, geom="bar")+
stat_summary(fun.data=mean_cl_normal, geom="errorbar", width=0.2)+
labs(x="Type", y="List Price", title="List Prices of different types of home")+
scale_fill_brewer(palette="Set3")
#Comparing prices in different areas
clr.rock <- subset(rockland, Area == 'Clarence-Rockland')
rock <- subset(rockland, Area == 'Rockland')
#The mean prices of different areas can be compared using t-test
#Hypotheses
#Null Hypothesis, H0: mu(List Prices of home in Clarence-Rock)-mu(List Prices of home in Rock) = 0
#Alternate Hypothesis, HA: mu(List Prices of home in Clarence-Rock)-mu(List Prices of home in Rock) != 0
#Assumptions and conditions
#Independence Assumption: The data is drawn independently for both areas.
#Randomization Condition: The homes are randomly selected in both areas.
#10% condition: Both 38, and 131 are less than the total number of homes in both areas.
#Nearly Normal condition:
hist(clr.rock$List.Price, col = 'slateblue3', border = 'violetred', xlab = 'List Prices',
ylab = 'Number of homes',
main = 'Histogram of List Prices of homes in Clarence-Rockland')
#The histogram is unimodal, symmetric, and nearly normal.
hist(rock$List.Price, col = 'slateblue3', border = 'violetred', xlab = 'List Prices',
ylab = 'Number of homes',
main = 'Histogram of List Prices of homes in Rockland')
#The histogram is nearly normal with a few outliers.
#Independent Groups Assumption: The two groups are independent of each other asthe homes
#are randomly selected.
t.test(clr.rock$List.Price, rock$List.Price,
alternative = "two.sided",
var.equal = FALSE,
paired = FALSE,
conf.level = 0.95)
#Here, p-value is greater than alpha (0.05). Thus, we fail to reject null.
#Thus, it can be said that the mean list price in both the areas is not significantly
#different from each other.
#Assuming that the family of four includes parents and 2 children,
#they might need a three bedroom home.
#Checking for types of homes with 3 bedrooms.
three.bed <- subset(rockland, Number.of.Bedrooms == 3)
#A one-way anova needs to be conducted for this
#Hypotheses
#Null Hypothesis, H0: There is no difference in the prices of different types of homes with three bedrooms.
#Alternate Hypothesis, HA: The list price of at least one type of home with three bedrooms is different.
three.bed.anova <- aov(List.Price~Type, data = three.bed)
summary(three.bed.anova)
#Here, p-value is less than alpha. Thus, we reject the null.
#We have enough evidence to say that the price of at least one type of home is different with three bedrooms.
#Checking the conditions for one-way anova:
#Independence assumption: The price of one home does not affect the price of others.
#Randomization condition: The homes are randomly selected. Hence, this condition is met.
#Independent group assumption: The list prices of one group do not affect that of others.
#Equal variance
boxplot(List.Price~Type, data = three.bed,
main = 'Comparison of Types of home with three bedrooms',
ylab = 'Number of homes', xlab = 'List Prices',
col = c('violetred4', 'slateblue4', 'springgreen4', 'papayawhip'))
#All types seem to have pretty close means. Mobile homes seem to have a lower mean
#than the others. There is no extreme skewness.
plot(three.bed.anova)
#The Residuals vs Fitted Plot shows the red line close to the equal spread line.
#The normal Q-Q plot follows a straight line with a few outliers.
hist(three.bed.anova$residuals, main = 'Histogram of residuals', xlab = 'Residuals')
#The histogram is nearly normal with a few outliers.
#Thus, the one-way anova holds.
#Tukey's HSD test can be applied to find out which type of homes are different.
TukeyHSD(three.bed.anova, conf.level = 0.95)
#Here, we can see that the p-values for Mobile-House and
#Row/Townhouse-House are different. Thus, we can say that the mean list price of House
#with three bedrooms is different from the rest.
library(ggplot2)
library(Hmisc)
ggplot(three.bed, aes(Type, List.Price, fill=Type))+
stat_summary(fun.y=mean, geom="bar")+
stat_summary(fun.data=mean_cl_normal, geom="errorbar", width=0.2)+
labs(x="Type", y="List Price", title="List Prices of different types of home with 3 bedrooms")+
scale_fill_brewer(palette="Set2")
#Thus, the houses with three bedroom have a different price range than other types.
| /Dave, Himani Case Study 1.R | no_license | Himani-Dave/StatsProjectsInR | R | false | false | 8,634 | r | #Reading in the given data
rockland <- read.csv("Rockland.csv", header = TRUE)
#Basic one-sample t-test on List Prices to find out the confidence interval of mean
#Checking Assumptions:
# 1. Independence assumption: Since the houses are randonly selected, they are indpendent
# of each other. Thus,this condition is met.
# 2. Randomization condition: Since the houses are randomly selected, this condition is
# satisfied.
# 3. 10% condition: 169 homes are less than the total number of homes in Rockland. Thus,
# this condition is satisfied.
# 4. Nearly Normal Condition:
hist(rockland$List.Price, col = 'slateblue3', border = 'violetred', xlab = 'List Prices',
ylab = 'Number of homes',
main = 'Histogram of List Prices vs Number of homes')
#From the histogram, we can see that the data for List Price is skewed to the right.
#This indicates that there are a few outliers. From the data, we can see that the prices
#for two of the houses in Rockland are $1289000 and $1338000. They seem to be the outlier.
#However, since the sample size is large, t-tests can be applied.
# One-sample t-test:
t.test(rockland$List.Price, conf.level = 0.95)
#From the t-test, we can be 95% confident that the real mean for the list prices in
#Rockland lies between $306133.7 and $355607.1.
#The budget of the family is $350000.
#A hypothesis test can be carried out to check whether the family can afford a home
#in Rockland.
#Hypotheses:
#Null Hypothesis, H0: mu(list.price) = 350,000
#Alternate Hypothesis, HA: mu(list.price) < 350,000
#The conditions have already been checked.
t.test(rockland$List.Price, mu = 350000, alternative = 'less', conf.level = 0.95)
#From this, we can see that the p-value is more than 0.05.
#So, we can reject the null hypothesis which means, there is evidence that the mean
#of list prices of homes in Rockland is less than 350000.
#Thus, the family can afford a home in Rockland.
#Comparing different types of homes
#There are four types of homes: Apartment, House, Mobile, Row/Townhouse
#In order to compare the mean prices of four homes, one-way anova can be used
#Hypotheses
#Null Hypothesis, H0: There is no difference in the prices of different types of homes.
#Alternate Hypothesis, HA: The list price of at least one type of home is different.
type.anova <- aov(List.Price~Type, data = rockland)
summary(type.anova)
#Here, the p-value is much less than alpha, so we reject the null.
#Thus, we can say that at least one of the type of home has a different mean list price.
#Checking the conditions for one-way anova:
#Independence assumption: The price of one home does not affect the price of others.
#Randomization condition: The homes are randomly selected. Hence, this condition is met.
#Independent group assumption: The list prices of one group do not affect that of others.
#Equal variance
boxplot(List.Price~Type, data = rockland,
main = 'Comparison of Types of home',
ylab = 'Number of homes', xlab = 'List Prices',
col = c('violetred4', 'slateblue4', 'springgreen4', 'papayawhip'))
#All types seem to have pretty close means. Mobile homes seem to have a lower mean
#than the others. There is no extreme skewness.
plot(type.anova)
#The Residuals vs Predicted Plot shows the red line close to the equal spread line.
#The normal Q-Q plot follows a straight line with a few outliers.
hist(type.anova$residuals, main = 'Histogram of residuals', xlab = 'Residuals')
#The histogram is nearly normal with a few outliers.
#Thus, the one-way anova holds.
#Tukey's HSD test can be applied to find out which type of homes are different.
TukeyHSD(type.anova, conf.level = 0.95)
#Here, we can see that the p-values for Mobile-Apartment, Mobile-House, and
#Row/Townhouse-House are different. Thus, we can say that the mean list price of Mobile
#homes is different from the rest.
library(ggplot2)
library(Hmisc)
ggplot(rockland, aes(Type, List.Price, fill=Type))+
stat_summary(fun.y=mean, geom="bar")+
stat_summary(fun.data=mean_cl_normal, geom="errorbar", width=0.2)+
labs(x="Type", y="List Price", title="List Prices of different types of home")+
scale_fill_brewer(palette="Set3")
#Comparing prices in different areas
clr.rock <- subset(rockland, Area == 'Clarence-Rockland')
rock <- subset(rockland, Area == 'Rockland')
#The mean prices of different areas can be compared using t-test
#Hypotheses
#Null Hypothesis, H0: mu(List Prices of home in Clarence-Rock)-mu(List Prices of home in Rock) = 0
#Alternate Hypothesis, HA: mu(List Prices of home in Clarence-Rock)-mu(List Prices of home in Rock) != 0
#Assumptions and conditions
#Independence Assumption: The data is drawn independently for both areas.
#Randomization Condition: The homes are randomly selected in both areas.
#10% condition: Both 38, and 131 are less than the total number of homes in both areas.
#Nearly Normal condition:
hist(clr.rock$List.Price, col = 'slateblue3', border = 'violetred', xlab = 'List Prices',
ylab = 'Number of homes',
main = 'Histogram of List Prices of homes in Clarence-Rockland')
#The histogram is unimodal, symmetric, and nearly normal.
hist(rock$List.Price, col = 'slateblue3', border = 'violetred', xlab = 'List Prices',
ylab = 'Number of homes',
main = 'Histogram of List Prices of homes in Rockland')
#The histogram is nearly normal with a few outliers.
#Independent Groups Assumption: The two groups are independent of each other asthe homes
#are randomly selected.
t.test(clr.rock$List.Price, rock$List.Price,
alternative = "two.sided",
var.equal = FALSE,
paired = FALSE,
conf.level = 0.95)
#Here, p-value is greater than alpha (0.05). Thus, we fail to reject null.
#Thus, it can be said that the mean list price in both the areas is not significantly
#different from each other.
#Assuming that the family of four includes parents and 2 children,
#they might need a three bedroom home.
#Checking for types of homes with 3 bedrooms.
three.bed <- subset(rockland, Number.of.Bedrooms == 3)
#A one-way anova needs to be conducted for this
#Hypotheses
#Null Hypothesis, H0: There is no difference in the prices of different types of homes with three bedrooms.
#Alternate Hypothesis, HA: The list price of at least one type of home with three bedrooms is different.
three.bed.anova <- aov(List.Price~Type, data = three.bed)
summary(three.bed.anova)
#Here, p-value is less than alpha. Thus, we reject the null.
#We have enough evidence to say that the price of at least one type of home is different with three bedrooms.
#Checking the conditions for one-way anova:
#Independence assumption: The price of one home does not affect the price of others.
#Randomization condition: The homes are randomly selected. Hence, this condition is met.
#Independent group assumption: The list prices of one group do not affect that of others.
#Equal variance
boxplot(List.Price~Type, data = three.bed,
main = 'Comparison of Types of home with three bedrooms',
ylab = 'Number of homes', xlab = 'List Prices',
col = c('violetred4', 'slateblue4', 'springgreen4', 'papayawhip'))
#All types seem to have pretty close means. Mobile homes seem to have a lower mean
#than the others. There is no extreme skewness.
plot(three.bed.anova)
#The Residuals vs Fitted Plot shows the red line close to the equal spread line.
#The normal Q-Q plot follows a straight line with a few outliers.
hist(three.bed.anova$residuals, main = 'Histogram of residuals', xlab = 'Residuals')
#The histogram is nearly normal with a few outliers.
#Thus, the one-way anova holds.
#Tukey's HSD test can be applied to find out which type of homes are different.
TukeyHSD(three.bed.anova, conf.level = 0.95)
#Here, we can see that the p-values for Mobile-House and
#Row/Townhouse-House are different. Thus, we can say that the mean list price of House
#with three bedrooms is different from the rest.
library(ggplot2)
library(Hmisc)
ggplot(three.bed, aes(Type, List.Price, fill=Type))+
stat_summary(fun.y=mean, geom="bar")+
stat_summary(fun.data=mean_cl_normal, geom="errorbar", width=0.2)+
labs(x="Type", y="List Price", title="List Prices of different types of home with 3 bedrooms")+
scale_fill_brewer(palette="Set2")
#Thus, the houses with three bedroom have a different price range than other types.
|
####cluster####
install.packages("flexclust")
library(flexclust)
data(nutrient,package="flexclust")
row.names(nutrient) <- tolower(row.names(nutrient))
nutrient.scaled <- scale(nutrient)
d <- dist(nutrient.scaled)
fit.average <- hclust(d,method="average")
plot(fit.average,hang=1,cex=0.8)
install.packages("NbClust")
library(NbClust)
devAskNewPage(ask=TRUE)
nc <- NbClust(nutrient.scaled,distance="euclidean",min.nc=2,max.nc = 15,method="average")
barplot(table(nc$Best.nc[1,]))
clusters <- cutree(fit.average,k=5)
table(clusters)
aggregate(nutrient,by=list(cluster=clusters),median)
aggregate(as.data.frame(nutrient.scaled),by=list(cluster=clusters),median)
plot(fit.average,hang=-1,cex=0.8)
rect.hclust(fit.average,k=5)
floor(823.123,-1)
| /r_exam/0724.R | no_license | Jinsung-Jeon/GyeonggiBigData | R | false | false | 744 | r | ####cluster####
install.packages("flexclust")
library(flexclust)
data(nutrient,package="flexclust")
row.names(nutrient) <- tolower(row.names(nutrient))
nutrient.scaled <- scale(nutrient)
d <- dist(nutrient.scaled)
fit.average <- hclust(d,method="average")
plot(fit.average,hang=1,cex=0.8)
install.packages("NbClust")
library(NbClust)
devAskNewPage(ask=TRUE)
nc <- NbClust(nutrient.scaled,distance="euclidean",min.nc=2,max.nc = 15,method="average")
barplot(table(nc$Best.nc[1,]))
clusters <- cutree(fit.average,k=5)
table(clusters)
aggregate(nutrient,by=list(cluster=clusters),median)
aggregate(as.data.frame(nutrient.scaled),by=list(cluster=clusters),median)
plot(fit.average,hang=-1,cex=0.8)
rect.hclust(fit.average,k=5)
floor(823.123,-1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim_bern_mean.R
\name{sim_bern_mean}
\alias{sim_bern_mean}
\title{Simulate from a Bernoulli distribution}
\usage{
sim_bern_mean(n, prob)
}
\arguments{
\item{n}{number of observations. If length(n) > 1, the length is taken to be the number required.}
\item{prob}{probability of success on each trial.}
}
\description{
The sim_bern_mean function has the same general structure as sim_regression()
but simulates data from a Bernoulli distribution and returns the sample average.
}
| /man/sim_bern_mean.Rd | no_license | jackyan0320/example.package | R | false | true | 557 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim_bern_mean.R
\name{sim_bern_mean}
\alias{sim_bern_mean}
\title{Simulate from a Bernoulli distribution}
\usage{
sim_bern_mean(n, prob)
}
\arguments{
\item{n}{number of observations. If length(n) > 1, the length is taken to be the number required.}
\item{prob}{probability of success on each trial.}
}
\description{
The sim_bern_mean function has the same general structure as sim_regression()
but simulates data from a Bernoulli distribution and returns the sample average.
}
|
#0) load libraries
library(docshop)
library(geosphere)
library(ggplot2)
library(reshape2)
#a) find main directory
if(grepl("briarons",getwd())){
main.dir<-"C:/Users/briarons/Desktop/Analysis - Data/Postdoc"
fig.dir<-"C:/Users/briarons/Desktop/Temp - Figures/"
}
if(grepl("bda13",getwd())){
main.dir<-"C:/Users/bda13/Desktop/Analysis - Data/Postdoc"
fig.dir<-"C:/Users/bda13/Desktop/Temp - Figures/"
}
#b) find file paths
mergeddir<-file.path(main.dir,"Merged files")
mergedpath<-file.path(mergeddir,"DEA PDMP ACS (county).fst")
dir.create(mergeddir,showWarnings = F)
#1) read data
df<-read.fst(mergedpath,as.data.table = T)
#2) create vars of interest
df$pMAT<-df$n_MAT_patients/df$n_people
df$pillsperperson<-df$pills/df$ind.pop
df$pillsperperson[is.infinite(df$pillsperperson)]<-NA
df$inchome<-df$income.median/df$homevalue.median
df$ins.pillsperperson<-df$opioid_pill_total/df$ind.pop
df<-df[df$year!=2006,]
#3) aggregate variablesacross years of interest
df.1<-df[df$year %in% 2007:2009, lapply(.SD, base::mean, na.rm=TRUE), by=c("county") ]
df.2<-df[df$year %in% 2010:2012, lapply(.SD, base::mean, na.rm=TRUE), by=c("county") ]
df.3<-df[df$year %in% 2013:2015, lapply(.SD, base::mean, na.rm=TRUE), by=c("county") ]
df.4<-df[df$year %in% 2016:2018, lapply(.SD, base::mean, na.rm=TRUE), by=c("county") ]
cor(df.2$MAT_pill_total,df.2$opioid_pill_total,use="complete")
cor(df.2$MAT_pill_total/df.2$n_people,df.2$opioid_pill_total/df.2$n_people,use="complete")
cor(df.2$n_opioid_prescriptions/df.2$n_people,df.2$opioid_pill_total/df.2$n_people,use="complete")
cor(df.2$pillsperperson,df.2$opioid_pill_total/df.2$ind.pop,use="complete")
cor(df.2$pills/df.2$ind.pop,df.2$opioid_pill_total/df.2$ind.pop,use="complete")
cor(df.2$ins.pillsperperson[df.2$ind.pop>20000],df.2$pillsperperson[df.2$ind.pop>20000],use="complete")
sum(df$opioid_pill_total[df$year%in%2007:2012 & df$ind.pop>20000],na.rm = T)/1000000
sum(df$pills[df$year%in%2007:2012 & df$ind.pop>20000],na.rm = T)/1000000
table(df.2$ind.pop>20000)
#3) select interesting variables
keepvars<-c(
"Q.pillsperperson",
"pillsperperson",
"no_has_opioid",
"no_high_mme",
"no_with_overdosed",
"no_has_MAT",
"no_has_overlap",
"no_n_unique_pharmacy4",
"no_n_unique_provider4",
"education.lessthanhs",
"education.masters",
"income.median",
"poverty.number",
"gini",
"inchome",
"insurance.public",
"insurance.private",
"insurance.none",
"age.median",
"sex.male",
"race.white",
"marital.married",
"ind.pop"
)
varnames<-c(
"Q.pillsperperson",
"DEA.pillsperperson",
"% on Opioids",
"% on High MME",
"% Overdosed",
"% on MAT",
"% Overlapping RX",
"% Pharma shopping",
"% Doctor shopping",
"% Education < HS",
"% Education > Masters",
"Median Income",
"% Poverty",
"Gini",
"Income / Home Value",
"% Public Insurance",
"% Private Insurance",
"% Uninsured",
"Median Age",
"% Male",
"% White",
"% Married",
"Population"
)
df.12 <-df.1[, keepvars,with=F]
df.22 <-df.2[, keepvars,with=F]
df.32 <-df.3[, keepvars,with=F]
df.42 <-df.4[, keepvars,with=F]
# df.22<-df.22[df.22$ind.pop>20000,]
dfl<-list(df.12,df.22,df.32,df.42)
i=2
# for(i in 1:(length(dfl)+1)){
if(i<(length(dfl)+1)){
df2<-dfl[[i]]
#c) limit outliers
outlierfun<- function(x,min=.02,max=.98){
x[x<quantile(x,min,na.rm=T)]<-quantile(x,min,na.rm=T)
x[x>quantile(x,max,na.rm=T)]<-quantile(x,max,na.rm=T)
return(x)
}
df2<-as.data.frame(sapply(df2,outlierfun))
#4) look at correlations
cormat<-round(cor(df2,use="pairwise.complete.obs"),2)
cormat<-cormat[,1:5]
cormat<-cormat[,1:2]
}
if(i==(length(dfl)+1)){
df2<-dfl[[1]]
df2<-as.data.frame(sapply(df2,outlierfun))
cormat<-round(cor(df2,use="pairwise.complete.obs"),2)
cormat1<-cormat[,1:5]
df2<-dfl[[4]]
df2<-as.data.frame(sapply(df2,outlierfun))
cormat<-round(cor(df2,use="pairwise.complete.obs"),2)
cormat2<-cormat[,1:5]
cormat<-cormat2-cormat1
diag(cormat)<-NA
}
#5) graph results
#a) Prep graph
#i) remove upper triangle
get_lower_tri <- function(cormat){
cormat[upper.tri(cormat)]<- NA
return(cormat)
}
lower_tri <- get_lower_tri(cormat)
#ii) Put into three columns
melted_cormat <- melt(lower_tri, na.rm = F,variable.factor=F)
# melted_cormat$Var1<-as.character(melted_cormat$Var1)
# melted_cormat$Var2<-as.character(melted_cormat$Var2)
#iii) Format results
melted_cormat$val2<-melted_cormat$value
melted_cormat$val2<-sprintf("%.2f", melted_cormat$val2)
#iv) Erase duplicates from melted values and format
melted_cormat$val2[melted_cormat$value==1]<-""
melted_cormat$value[melted_cormat$value==1]<-0
# melted_cormat[melted_cormat$Var1==melted_cormat$Var2,]$value<-0
melted_cormat$val2<-ifelse(sign(as.numeric(melted_cormat$val2))==-1,paste("(",(str_sub(melted_cormat$val2,2,5)),")",sep=""),melted_cormat$val2)
melted_cormat$val2[melted_cormat$val2=="-0.00"]<-"0.00"
melted_cormat$value[is.na(melted_cormat$value)]<-0
# melted_cormat$value[melted_cormat$value==1]<-0
#v) Plot heatmap
ggheatmap<-ggplot(data = melted_cormat, aes(Var2, Var1, fill = value))+
geom_tile(color = "white")+
scale_fill_gradient2(low = "#1bc3e5", high = "#e53d1b",
midpoint = 0, limit = c(-1,1), space = "Lab",
name="Pearson\nCorrelation") +
theme_minimal()+
theme(
axis.text.x = element_text(
angle = 45,
vjust = 1,
size = 14, #axis label text size
hjust = 1
),
axis.text.y = element_text(size = 14) #axis label text size
) #+
#coord_fixed()
#b) graph with text, prettier labels. etc.
#i) change variable names
#ii) graph
ggheatmap2<-ggheatmap +
geom_text(aes(Var2, Var1, label = val2), color = "black",
size = 4.5,hjust=.5) +
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank(),
legend.text = element_text(size = 15),
legend.title = element_text(size = 15),
plot.title = element_text(size=20, hjust=.38),
plot.subtitle = element_text(size=20, hjust=.38),
plot.margin = margin(.5, 4, 0, 0, "cm"))+
guides(fill = guide_colorbar(barwidth = 3, barheight = 15))+
ggtitle("Correlations among Q and DEA",subtitle="Pills per person (2010-2012)")+
scale_y_discrete(labels= (varnames))+
scale_x_discrete(labels= (varnames))
#iii)save
ggsave(
tsave1 <-
paste(
fig.dir,
i,"Correlations among Q and DEA.png",
sep = ""
),
ggheatmap2,
width = 7,
height = 8,
units = "in"
)
browseURL(tsave1)
print(i)
# }
| /Code/7 - Assess correlations (county).R | no_license | BrianAronson/Indiana-MAT-Opioid | R | false | false | 8,758 | r | #0) load libraries
library(docshop)
library(geosphere)
library(ggplot2)
library(reshape2)
#a) find main directory
if(grepl("briarons",getwd())){
main.dir<-"C:/Users/briarons/Desktop/Analysis - Data/Postdoc"
fig.dir<-"C:/Users/briarons/Desktop/Temp - Figures/"
}
if(grepl("bda13",getwd())){
main.dir<-"C:/Users/bda13/Desktop/Analysis - Data/Postdoc"
fig.dir<-"C:/Users/bda13/Desktop/Temp - Figures/"
}
#b) find file paths
mergeddir<-file.path(main.dir,"Merged files")
mergedpath<-file.path(mergeddir,"DEA PDMP ACS (county).fst")
dir.create(mergeddir,showWarnings = F)
#1) read data
df<-read.fst(mergedpath,as.data.table = T)
#2) create vars of interest
df$pMAT<-df$n_MAT_patients/df$n_people
df$pillsperperson<-df$pills/df$ind.pop
df$pillsperperson[is.infinite(df$pillsperperson)]<-NA
df$inchome<-df$income.median/df$homevalue.median
df$ins.pillsperperson<-df$opioid_pill_total/df$ind.pop
df<-df[df$year!=2006,]
#3) aggregate variablesacross years of interest
df.1<-df[df$year %in% 2007:2009, lapply(.SD, base::mean, na.rm=TRUE), by=c("county") ]
df.2<-df[df$year %in% 2010:2012, lapply(.SD, base::mean, na.rm=TRUE), by=c("county") ]
df.3<-df[df$year %in% 2013:2015, lapply(.SD, base::mean, na.rm=TRUE), by=c("county") ]
df.4<-df[df$year %in% 2016:2018, lapply(.SD, base::mean, na.rm=TRUE), by=c("county") ]
cor(df.2$MAT_pill_total,df.2$opioid_pill_total,use="complete")
cor(df.2$MAT_pill_total/df.2$n_people,df.2$opioid_pill_total/df.2$n_people,use="complete")
cor(df.2$n_opioid_prescriptions/df.2$n_people,df.2$opioid_pill_total/df.2$n_people,use="complete")
cor(df.2$pillsperperson,df.2$opioid_pill_total/df.2$ind.pop,use="complete")
cor(df.2$pills/df.2$ind.pop,df.2$opioid_pill_total/df.2$ind.pop,use="complete")
cor(df.2$ins.pillsperperson[df.2$ind.pop>20000],df.2$pillsperperson[df.2$ind.pop>20000],use="complete")
sum(df$opioid_pill_total[df$year%in%2007:2012 & df$ind.pop>20000],na.rm = T)/1000000
sum(df$pills[df$year%in%2007:2012 & df$ind.pop>20000],na.rm = T)/1000000
table(df.2$ind.pop>20000)
#3) select interesting variables
keepvars<-c(
"Q.pillsperperson",
"pillsperperson",
"no_has_opioid",
"no_high_mme",
"no_with_overdosed",
"no_has_MAT",
"no_has_overlap",
"no_n_unique_pharmacy4",
"no_n_unique_provider4",
"education.lessthanhs",
"education.masters",
"income.median",
"poverty.number",
"gini",
"inchome",
"insurance.public",
"insurance.private",
"insurance.none",
"age.median",
"sex.male",
"race.white",
"marital.married",
"ind.pop"
)
varnames<-c(
"Q.pillsperperson",
"DEA.pillsperperson",
"% on Opioids",
"% on High MME",
"% Overdosed",
"% on MAT",
"% Overlapping RX",
"% Pharma shopping",
"% Doctor shopping",
"% Education < HS",
"% Education > Masters",
"Median Income",
"% Poverty",
"Gini",
"Income / Home Value",
"% Public Insurance",
"% Private Insurance",
"% Uninsured",
"Median Age",
"% Male",
"% White",
"% Married",
"Population"
)
df.12 <-df.1[, keepvars,with=F]
df.22 <-df.2[, keepvars,with=F]
df.32 <-df.3[, keepvars,with=F]
df.42 <-df.4[, keepvars,with=F]
# df.22<-df.22[df.22$ind.pop>20000,]
dfl<-list(df.12,df.22,df.32,df.42)
i=2
# for(i in 1:(length(dfl)+1)){
if(i<(length(dfl)+1)){
df2<-dfl[[i]]
#c) limit outliers
outlierfun<- function(x,min=.02,max=.98){
x[x<quantile(x,min,na.rm=T)]<-quantile(x,min,na.rm=T)
x[x>quantile(x,max,na.rm=T)]<-quantile(x,max,na.rm=T)
return(x)
}
df2<-as.data.frame(sapply(df2,outlierfun))
#4) look at correlations
cormat<-round(cor(df2,use="pairwise.complete.obs"),2)
cormat<-cormat[,1:5]
cormat<-cormat[,1:2]
}
if(i==(length(dfl)+1)){
df2<-dfl[[1]]
df2<-as.data.frame(sapply(df2,outlierfun))
cormat<-round(cor(df2,use="pairwise.complete.obs"),2)
cormat1<-cormat[,1:5]
df2<-dfl[[4]]
df2<-as.data.frame(sapply(df2,outlierfun))
cormat<-round(cor(df2,use="pairwise.complete.obs"),2)
cormat2<-cormat[,1:5]
cormat<-cormat2-cormat1
diag(cormat)<-NA
}
#5) graph results
#a) Prep graph
#i) remove upper triangle
get_lower_tri <- function(cormat){
cormat[upper.tri(cormat)]<- NA
return(cormat)
}
lower_tri <- get_lower_tri(cormat)
#ii) Put into three columns
melted_cormat <- melt(lower_tri, na.rm = F,variable.factor=F)
# melted_cormat$Var1<-as.character(melted_cormat$Var1)
# melted_cormat$Var2<-as.character(melted_cormat$Var2)
#iii) Format results
melted_cormat$val2<-melted_cormat$value
melted_cormat$val2<-sprintf("%.2f", melted_cormat$val2)
#iv) Erase duplicates from melted values and format
melted_cormat$val2[melted_cormat$value==1]<-""
melted_cormat$value[melted_cormat$value==1]<-0
# melted_cormat[melted_cormat$Var1==melted_cormat$Var2,]$value<-0
melted_cormat$val2<-ifelse(sign(as.numeric(melted_cormat$val2))==-1,paste("(",(str_sub(melted_cormat$val2,2,5)),")",sep=""),melted_cormat$val2)
melted_cormat$val2[melted_cormat$val2=="-0.00"]<-"0.00"
melted_cormat$value[is.na(melted_cormat$value)]<-0
# melted_cormat$value[melted_cormat$value==1]<-0
#v) Plot heatmap
ggheatmap<-ggplot(data = melted_cormat, aes(Var2, Var1, fill = value))+
geom_tile(color = "white")+
scale_fill_gradient2(low = "#1bc3e5", high = "#e53d1b",
midpoint = 0, limit = c(-1,1), space = "Lab",
name="Pearson\nCorrelation") +
theme_minimal()+
theme(
axis.text.x = element_text(
angle = 45,
vjust = 1,
size = 14, #axis label text size
hjust = 1
),
axis.text.y = element_text(size = 14) #axis label text size
) #+
#coord_fixed()
#b) graph with text, prettier labels. etc.
#i) change variable names
#ii) graph
ggheatmap2<-ggheatmap +
geom_text(aes(Var2, Var1, label = val2), color = "black",
size = 4.5,hjust=.5) +
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank(),
legend.text = element_text(size = 15),
legend.title = element_text(size = 15),
plot.title = element_text(size=20, hjust=.38),
plot.subtitle = element_text(size=20, hjust=.38),
plot.margin = margin(.5, 4, 0, 0, "cm"))+
guides(fill = guide_colorbar(barwidth = 3, barheight = 15))+
ggtitle("Correlations among Q and DEA",subtitle="Pills per person (2010-2012)")+
scale_y_discrete(labels= (varnames))+
scale_x_discrete(labels= (varnames))
#iii)save
ggsave(
tsave1 <-
paste(
fig.dir,
i,"Correlations among Q and DEA.png",
sep = ""
),
ggheatmap2,
width = 7,
height = 8,
units = "in"
)
browseURL(tsave1)
print(i)
# }
|
#use this script to validate model and optimization
#also includes variance decomposition analysis
#Load workspace and save the summary statistics to summary table
load("Step2_NEE_NDVI_UNBdata_MELstarting.Rdata") #load workspace
q05=apply(param.keep, 2, quantile, 0.05) #calculate 5% quantile
q25=apply(param.keep, 2, quantile, 0.25) #calculate 25% quantile
means=apply(param.keep, 2, mean)
q75=apply(param.keep, 2, quantile, 0.75) #calculate 75% quantile
q95=apply(param.keep, 2, quantile, 0.95) #calculate 95%
summarytable=data.frame(q05 = q05, q25 = q25, mean = means,
q75 = q75, q95 = q95) #bind all of the information together in the proper order (same order as summarytable columns)
param.keep_NEE_NDVI_UNBdata = param.keep #save the table of accepted parameters under a new name
write.csv(param.keep_NEE_NDVI_UNBdata, "Params_NEE_NDVI_UNBdata.csv")
###comparison using data that was assimilated
data.compare1 = data.frame(data.compare1)
out=data.frame(solvemodel(param.best, state)) #with columns to match data.assim
out.compare1 = out[match(data.compare1$time, out$time),]
out.compare1=out.compare1[,c(1,7,11)]
head(out.compare1)
head(data.compare1)
#now calculate bias mean error, MAE, and R2 for each stock/flux of interest
#calculate RMSE
error = (data.compare1[,c(2,3)]-out.compare1[,c(2,3)])
errorsquared = error^2
mean = apply(errorsquared,2,mean,na.rm=TRUE)
RMSE = sqrt(mean)
#calculate MAE
abs.error = abs(out.compare1-data.compare1)
MAE = apply(abs.error[,c(2,3)],2,mean,na.rm=TRUE)
#calculate r2
reg_NEE = lm(data.compare1[,2]~out.compare1[,2])
r2_NEE = summary(reg_NEE)$r.squared
reg_NDVI = lm(data.compare1[,3]~out.compare1[,3])
r2_NDVI = summary(reg_NDVI)$r.squared
##plot linear regression for assimilated data
par(mfrow=c(2,2), mar=c(4,4,2,2))
plot(data.compare1$NEE, out.compare1$NEE, xlab= "Actual", ylab="Modelled", main = "NEE")
abline(0,1,col="red")
plot(density(resid(reg_NEE)), main="Density of Residuals")
plot(data.compare1$NDVI, out.compare1$NDVI, xlab= "Actual", ylab="Modelled", main = "NDVI")
abline(0,1,col="red")
plot(density(resid(reg_NDVI)), main="Density of Residuals")
par(mfrow=c(2,1), mar=c(4,4,2,2))
plot(out.compare1$NEE~out.compare1$time, pch=16)
points(data.compare1$NEE~data.compare1$time, col="red")
plot(out.compare1$NDVI~out.compare1$time, pch=16)
points(data.compare1$NDVI~data.compare1$time, col="red")
###comparison using data that was NOT assimilated
data.compare=read.csv("Assimilation_data_ALL.csv")
data.compare1 = data.compare[data.compare$Year==2011,]
data.compare1 = data.frame(data.compare1)
out=data.frame(solvemodel(param.best, state)) #with columns to match data.assim
out.compare1 = out[match(data.compare1$Time, out$time),]
out.compare1=out.compare1[,c(1,7,8,9,11)]
data.compare1=data.compare1[,c(3,6,7,8,9)]
head(out.compare1)
head(data.compare1)
#now calculate bias mean error, MAE, and R2 for each stock/flux of interest
#calculate RMSE
error = (data.compare1[,c(2:5)]-out.compare1[,c(2:5)])
errorsquared = error^2
mean = apply(errorsquared,2,mean,na.rm=TRUE)
RMSE = sqrt(mean)
#calculate MAE
abs.error = abs(out.compare1-data.compare1)
MAE = apply(abs.error,2,mean,na.rm=TRUE)
#calculate r2
reg_NEE = lm(data.compare1[,2]~out.compare1[,2])
r2_NEE = summary(reg_NEE)$r.squared
reg_GPP = lm(data.compare1[,3]~out.compare1[,3])
r2_GPP = summary(reg_GPP)$r.squared
reg_Re = lm(data.compare1[,4]~out.compare1[,4])
r2_Re = summary(reg_Re)$r.squared
reg_NDVI = lm(data.compare1[,5]~out.compare1[,5])
r2_NDVI = summary(reg_NDVI)$r.squared
##plot linear regression for assimilated data
par(mfrow=c(2,2), mar=c(4,4,2,2))
plot(data.compare1[,2], out.compare1[,2], xlab= "Actual", ylab="Modelled", main = "NEE")
abline(0,1,col="red")
plot(density(resid(reg_NEE)), main="Density of Residuals")
plot(data.compare1[,4], out.compare1[,4], xlab= "Actual", ylab="Modelled", main = "NDVI")
abline(0,1,col="red")
plot(density(resid(reg_NDVI)), main="Density of Residuals")
par(mfrow=c(2,1), mar=c(4,4,2,2))
plot(out.compare1$NEE~out.compare1$time, pch=16)
points(data.compare1$NEE~data.compare1$Time, col="red")
plot(out.compare1$NDVI~out.compare1$time, pch=16)
points(data.compare1$NDVI~data.compare1$Time, col="red")
##########################VARIANCE DECOMPOSITION ANALYSIS#######################
load("Step2_NEE_NDVI_UNBdata_MELstarting.Rdata") #load best experiment (experiment 4)
head(param.keep) #view table of accepted parameters
means=apply(param.keep, 2, mean) #calculate parameter means
#to perform the variance decomposition analysis, you need to:
# 1) alter each parameter individually holding all other parameters constant at their means
# 2) run the model for each parameter set to obtain an ensemble of model runs
# 3) for each model run, calculate the monthly average of the output
# 4) Calculate the variance in monthly averages for each parameter - this gives you the contribution of that parameter to the model variance
head(out[,1:11])
####SEASONAL ANALYSIS####
#need to make tables to store monthly averages for each model output (each parameter has one table)
MVar_kplant = data.frame(matrix(1,1,11))
colnames(MVar_kplant)=c("Month", colnames(out[,2:11]))
MVar_LitterRateC = data.frame(matrix(1,1,11))
colnames(MVar_LitterRateC)=c("Month", colnames(out[,2:11]))
MVar_LitterRateN = data.frame(matrix(1,1,11))
colnames(MVar_LitterRateN)=c("Month", colnames(out[,2:11]))
MVar_RespRate = data.frame(matrix(1,1,11))
colnames(MVar_RespRate)=c("Month", colnames(out[,2:11]))
MVar_UptakeRate = data.frame(matrix(1,1,11))
colnames(MVar_UptakeRate)=c("Month", colnames(out[,2:11]))
MVar_propN_fol = data.frame(matrix(1,1,11))
colnames(MVar_propN_fol)=c("Month", colnames(out[,2:11]))
MVar_propN_roots = data.frame(matrix(1,1,11))
colnames(MVar_propN_roots)=c("Month", colnames(out[,2:11]))
MVar_q10 = data.frame(matrix(1,1,11))
colnames(MVar_q10)=c("Month", colnames(out[,2:11]))
MVar_netNrate = data.frame(matrix(1,1,11))
colnames(MVar_netNrate)=c("Month", colnames(out[,2:11]))
MVar_cue = data.frame(matrix(1,1,11))
colnames(MVar_cue)=c("Month", colnames(out[,2:11]))
#need to create a vector of months to append to model output
months = rep(c(seq(1:12)),
c(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31))
months.leap = rep(c(seq(1:12)),
c(31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31))
months = c(months, months, months, months.leap)
#kplant
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[1] = unlist(c(param.keep[i,1])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_kplant)
MVar_kplant = rbind(MVar_kplant, monthly.avg)
}
#LitterRateC
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[2] = unlist(c(param.keep[i,2])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_LitterRateC)
MVar_LitterRateC = rbind(MVar_LitterRateC, monthly.avg)
}
#LitterRateN
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[3] = unlist(c(param.keep[i,3])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_LitterRateN)
MVar_LitterRateN = rbind(MVar_LitterRateN, monthly.avg)
}
#RespRate
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[4] = unlist(c(param.keep[i,4])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_RespRate)
MVar_RespRate = rbind(MVar_RespRate, monthly.avg)
}
#UptakeRate
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[5] = unlist(c(param.keep[i,5])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_UptakeRate)
MVar_UptakeRate = rbind(MVar_UptakeRate, monthly.avg)
}
#propN_fol
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[6] = unlist(c(param.keep[i,6])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_propN_fol)
MVar_propN_fol = rbind(MVar_propN_fol, monthly.avg)
}
#propN_roots
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[7] = unlist(c(param.keep[i,7])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_propN_roots)
MVar_propN_roots = rbind(MVar_propN_roots, monthly.avg)
}
#q10
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[8] = unlist(c(param.keep[i,8])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_q10)
MVar_q10 = rbind(MVar_q10, monthly.avg)
}
#netNrate
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[9] = unlist(c(param.keep[i,9])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_netNrate)
MVar_netNrate = rbind(MVar_netNrate, monthly.avg)
}
#cue
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[10] = unlist(c(param.keep[i,10])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_cue)
MVar_cue = rbind(MVar_cue, monthly.avg)
}
MVar_kplant = MVar_kplant[-1,]
MVar_LitterRateC = MVar_LitterRateC[-1,]
MVar_LitterRateN = MVar_LitterRateN[-1,]
MVar_RespRate = MVar_RespRate[-1,]
MVar_UptakeRate = MVar_UptakeRate[-1,]
MVar_propN_fol = MVar_propN_fol[-1,]
MVar_propN_roots = MVar_propN_roots[-1,]
MVar_q10 = MVar_q10[-1,]
MVar_netNrate = MVar_netNrate[-1,]
MVar_cue = MVar_cue[-1,]
var.kplant = aggregate(MVar_kplant[,2:11], list(MVar_kplant$Month), var)
var.LitterRateC = aggregate(MVar_LitterRateC[,2:11], list(MVar_LitterRateC$Month), var)
var.LitterRateN = aggregate(MVar_LitterRateN[,2:11], list(MVar_LitterRateN$Month), var)
var.RespRate = aggregate(MVar_RespRate[,2:11], list(MVar_RespRate$Month), var)
var.UptakeRate = aggregate(MVar_UptakeRate[,2:11], list(MVar_UptakeRate$Month), var)
var.propN_fol = aggregate(MVar_propN_fol[,2:11], list(MVar_propN_fol$Month), var)
var.propN_roots = aggregate(MVar_propN_roots[,2:11], list(MVar_propN_roots$Month), var)
var.q10 = aggregate(MVar_q10[,2:11], list(MVar_q10$Month), var)
var.netNrate = aggregate(MVar_netNrate[,2:11], list(MVar_netNrate$Month), var)
var.cue = aggregate(MVar_cue[,2:11], list(MVar_cue$Month), var)
parameters = rep(names(params), c(12,12,12,12,12,12,12,12,12,12))
all = rbind(var.kplant, var.LitterRateC, var.LitterRateN, var.RespRate, var.UptakeRate, var.propN_fol,
var.propN_roots, var.q10, var.netNrate, var.cue)
all=cbind(Parameters = parameters, all)
#calculate total variance
var.total = aggregate(all[3:12], list(all$Group.1), sum) #CHECK THIS
#now calculate percent variance
perc.kplant = (var.kplant[,2:11]/var.total[,2:11])*100
perc.kplant = cbind(Parameter = rep("kplant", 12), Month=var.total$Group.1, perc.kplant)
perc.LitterRateC = (var.LitterRateC[,2:11]/var.total[,2:11])*100
perc.LitterRateC = cbind(Parameter = rep("LitterRateC", 12), Month=var.total$Group.1, perc.LitterRateC)
perc.LitterRateN = (var.LitterRateN[,2:11]/var.total[,2:11])*100
perc.LitterRateN = cbind(Parameter = rep("LitterRateN", 12), Month=var.total$Group.1, perc.LitterRateN)
perc.RespRate = (var.RespRate[,2:11]/var.total[,2:11])*100
perc.RespRate = cbind(Parameter = rep("RespRate", 12), Month=var.total$Group.1, perc.RespRate)
perc.UptakeRate = (var.UptakeRate[,2:11]/var.total[,2:11])*100
perc.UptakeRate = cbind(Parameter = rep("UptakeRate", 12), Month=var.total$Group.1, perc.UptakeRate)
perc.propN_fol = (var.propN_fol[,2:11]/var.total[,2:11])*100
perc.propN_fol = cbind(Parameter = rep("propN_fol", 12), Month=var.total$Group.1, perc.propN_fol)
perc.propN_roots = (var.propN_roots[,2:11]/var.total[,2:11])*100
perc.propN_roots = cbind(Parameter = rep("propN_roots", 12), Month=var.total$Group.1, perc.propN_roots)
perc.q10 = (var.q10[,2:11]/var.total[,2:11])*100
perc.q10 = cbind(Parameter = rep("q10", 12), Month=var.total$Group.1, perc.q10)
perc.netNrate = (var.netNrate[,2:11]/var.total[,2:11])*100
perc.netNrate = cbind(Parameter = rep("netNrate", 12), Month=var.total$Group.1, perc.netNrate)
perc.cue = (var.cue[,2:11]/var.total[,2:11])*100
perc.cue = cbind(Parameter = rep("cue", 12), Month=var.total$Group.1, perc.cue)
#create a table binding all together
perc.all = rbind(perc.kplant, perc.LitterRateC, perc.LitterRateN, perc.RespRate,
perc.UptakeRate, perc.propN_fol, perc.propN_roots, perc.q10, perc.netNrate,
perc.cue)
perc.all = perc.all[,-11]
head(perc.all)
tail(perc.all)
####barplots####
par(mfrow=c(3,3), mar=c(4,4,2,2))
for (n in 3:11) { #for each parameter
sub = perc.all[,c(1,2,n)]
sub1 = table(sub$Parameter, sub$Month)
sub1[1,] = sub[1:12,3]
sub1[2,] = sub[13:24,3]
sub1[3,] = sub[25:36,3]
sub1[4,] = sub[37:48,3]
sub1[5,] = sub[49:60,3]
sub1[6,] = sub[61:72,3]
sub1[7,] = sub[73:84,3]
sub1[8,] = sub[85:96,3]
sub1[9,] = sub[97:108,3]
sub1[10,] = sub[109:120,3]
sub1[11,] = sub[121:132,3]
barplot(sub1, col=c("chartreuse", "cadetblue", "aquamarine", "darkblue", "darkseagreen",
"deepskyblue", "dodgerblue3", "forestgreen", "darkslategray1", "purple"),
main=names(perc.all[n]), names.arg=seq(1:12), axisnames=TRUE, ylim=c(0,100)) #plot the data
} #end of for loop
par(mfrow=c(1,1), mar=c(4,4,2,2))
#NEE
sub = perc.all[,c(1,2,8)]
sub1 = table(sub$Parameter, sub$Month)
sub1[1,] = sub[1:12,3]
sub1[2,] = sub[13:24,3]
sub1[3,] = sub[25:36,3]
sub1[4,] = sub[37:48,3]
sub1[5,] = sub[49:60,3]
sub1[6,] = sub[61:72,3]
sub1[7,] = sub[73:84,3]
sub1[8,] = sub[85:96,3]
sub1[9,] = sub[97:108,3]
sub1[10,] = sub[109:120,3]
sub1[11,] = sub[121:132,3]
barplot(sub1, col=c("chartreuse", "cadetblue", "aquamarine", "darkblue", "darkseagreen",
"deepskyblue", "dodgerblue3", "forestgreen", "darkslategray1", "purple"),
main=names(perc.all[8]), names.arg=seq(1:12), axisnames=TRUE, ylim=c(0,100), legend=TRUE) #plot the data
####ANNUAL ANALYSIS####
#need to make tables to store annual sums for each model output (each parameter has one table)
AVar_kplant = data.frame(matrix(1,1,3))
colnames(AVar_kplant)=colnames(out[,7:9])
AVar_LitterRateC = data.frame(matrix(1,1,3))
colnames(AVar_LitterRateC)=colnames(out[,7:9])
AVar_LitterRateN = data.frame(matrix(1,1,3))
colnames(AVar_LitterRateN)=colnames(out[,7:9])
AVar_RespRate = data.frame(matrix(1,1,3))
colnames(AVar_RespRate)=colnames(out[,7:9])
AVar_UptakeRate = data.frame(matrix(1,1,3))
colnames(AVar_UptakeRate)=colnames(out[,7:9])
AVar_propN_fol = data.frame(matrix(1,1,3))
colnames(AVar_propN_fol)=colnames(out[,7:9])
AVar_propN_roots = data.frame(matrix(1,1,3))
colnames(AVar_propN_roots)=colnames(out[,7:9])
AVar_q10 = data.frame(matrix(1,1,3))
colnames(AVar_q10)=colnames(out[,7:9])
AVar_netNrate = data.frame(matrix(1,1,3))
colnames(AVar_netNrate)=colnames(out[,7:9])
AVar_cue = data.frame(matrix(1,1,3))
colnames(AVar_cue)=colnames(out[,7:9])
#kplant
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[1] = unlist(c(param.keep[i,1])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_kplant) #change names
AVar_kplant = rbind(AVar_kplant, annual.avg) #add row to table
}
#LitterRateC
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[2] = unlist(c(param.keep[i,2])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_LitterRateC) #change names
AVar_LitterRateC = rbind(AVar_LitterRateC, annual.avg) #add row to table
}
#LitterRateN
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[3] = unlist(c(param.keep[i,3])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_LitterRateN) #change names
AVar_LitterRateN = rbind(AVar_LitterRateN, annual.avg) #add row to table
}
#RespRate
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[4] = unlist(c(param.keep[i,4])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_RespRate) #change names
AVar_RespRate = rbind(AVar_RespRate, annual.avg) #add row to table
}
#UptakeRate
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[5] = unlist(c(param.keep[i,5])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_UptakeRate) #change names
AVar_UptakeRate = rbind(AVar_UptakeRate, annual.avg) #add row to table
}
#propN_fol
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[6] = unlist(c(param.keep[i,6])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_propN_fol) #change names
AVar_propN_fol = rbind(AVar_propN_fol, annual.avg) #add row to table
}
#propN_roots
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[7] = unlist(c(param.keep[i,7])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_propN_roots) #change names
AVar_propN_roots = rbind(AVar_propN_roots, annual.avg) #add row to table
}
#q10
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[8] = unlist(c(param.keep[i,8])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_q10) #change names
AVar_q10 = rbind(AVar_q10, annual.avg) #add row to table
}
#netNrate
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[9] = unlist(c(param.keep[i,9])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_netNrate) #change names
AVar_netNrate = rbind(AVar_netNrate, annual.avg) #add row to table
}
#cue
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[10] = unlist(c(param.keep[i,10])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_cue) #change names
AVar_cue = rbind(AVar_cue, annual.avg) #add row to table
}
AVar_kplant = AVar_kplant[-1,]
AVar_LitterRateC = AVar_LitterRateC[-1,]
AVar_LitterRateN = AVar_LitterRateN[-1,]
AVar_RespRate = AVar_RespRate[-1,]
AVar_UptakeRate = AVar_UptakeRate[-1,]
AVar_propN_fol = AVar_propN_fol[-1,]
AVar_propN_roots = AVar_propN_roots[-1,]
AVar_q10 = AVar_q10[-1,]
AVar_netNrate = AVar_netNrate[-1,]
AVar_cue = AVar_cue[-1,]
var.kplant = apply(AVar_kplant, 2, var)
var.LitterRateC = apply(AVar_LitterRateC, 2, var)
var.LitterRateN = apply(AVar_LitterRateN, 2, var)
var.RespRate = apply(AVar_RespRate, 2, var)
var.UptakeRate = apply(AVar_UptakeRate, 2, var)
var.propN_fol = apply(AVar_propN_fol, 2, var)
var.propN_roots = apply(AVar_propN_roots, 2, var)
var.q10 = apply(AVar_q10, 2, var)
var.netNrate = apply(AVar_netNrate, 2, var)
var.cue = apply(AVar_cue, 2, var)
parameters = names(params)
all_1 = rbind(var.kplant, var.LitterRateC, var.LitterRateN, var.RespRate, var.UptakeRate, var.propN_fol,
var.propN_roots, var.q10, var.netNrate, var.cue)
all_1 = cbind(Parameters = parameters, all)
#calculate total variance
var.total = apply(all_1[2:4], sum)
#now calculate percent variance
perc.kplant = (var.kplant/var.total)*100
perc.LitterRateC = (var.LitterRateC/var.total)*100
perc.LitterRateN = (var.LitterRateN/var.total)*100
perc.RespRate = (var.RespRate/var.total)*100
perc.UptakeRate = (var.UptakeRate/var.total)*100
perc.propN_fol = (var.propN_fol/var.total)*100
perc.propN_roots = (var.propN_roots/var.total)*100
perc.q10 = (var.q10/var.total)*100
perc.netNrate = (var.netNrate/var.total)*100
perc.cue = (var.cue/var.total)*100
#create a table binding all together
perc.all_1 = rbind(perc.kplant, perc.LitterRateC, perc.LitterRateN, perc.RespRate,
perc.UptakeRate, perc.propN_fol, perc.propN_roots, perc.q10, perc.netNrate,
perc.cue)
perc.all_1 = cbind(Parameters=parameters, perc.all_1
head(perc.all_1)
tail(perc.all_1)
####barplots####
#START EDITING HERE
par(mfrow=c(3,3), mar=c(4,4,2,2))
for (n in 3:11) { #for each parameter
sub = perc.all[,c(1,2,n)]
sub1 = table(sub$Parameter, sub$Month)
sub1[1,] = sub[1:12,3]
sub1[2,] = sub[13:24,3]
sub1[3,] = sub[25:36,3]
sub1[4,] = sub[37:48,3]
sub1[5,] = sub[49:60,3]
sub1[6,] = sub[61:72,3]
sub1[7,] = sub[73:84,3]
sub1[8,] = sub[85:96,3]
sub1[9,] = sub[97:108,3]
sub1[10,] = sub[109:120,3]
sub1[11,] = sub[121:132,3]
barplot(sub1, col=c("chartreuse", "cadetblue", "aquamarine", "darkblue", "darkseagreen",
"deepskyblue", "dodgerblue3", "forestgreen", "darkslategray1", "purple"),
main=names(perc.all[n]), names.arg=seq(1:12), axisnames=TRUE, ylim=c(0,100)) #plot the data
} #end of for loop
par(mfrow=c(1,1), mar=c(4,4,2,2))
#NEE
sub = perc.all[,c(1,2,8)]
sub1 = table(sub$Parameter, sub$Month)
sub1[1,] = sub[1:12,3]
sub1[2,] = sub[13:24,3]
sub1[3,] = sub[25:36,3]
sub1[4,] = sub[37:48,3]
sub1[5,] = sub[49:60,3]
sub1[6,] = sub[61:72,3]
sub1[7,] = sub[73:84,3]
sub1[8,] = sub[85:96,3]
sub1[9,] = sub[97:108,3]
sub1[10,] = sub[109:120,3]
sub1[11,] = sub[121:132,3]
barplot(sub1, col=c("chartreuse", "cadetblue", "aquamarine", "darkblue", "darkseagreen",
"deepskyblue", "dodgerblue3", "forestgreen", "darkslategray1", "purple"),
main=names(perc.all[8]), names.arg=seq(1:12), axisnames=TRUE, ylim=c(0,100), legend=TRUE) #plot the data
save.image(file="Variance_09252015.Rdata")
| /8_OptimizationAnalysis.R | no_license | keenmisty/C-N-model | R | false | false | 25,230 | r | #use this script to validate model and optimization
#also includes variance decomposition analysis
#Load workspace and save the summary statistics to summary table
load("Step2_NEE_NDVI_UNBdata_MELstarting.Rdata") #load workspace
q05=apply(param.keep, 2, quantile, 0.05) #calculate 5% quantile
q25=apply(param.keep, 2, quantile, 0.25) #calculate 25% quantile
means=apply(param.keep, 2, mean)
q75=apply(param.keep, 2, quantile, 0.75) #calculate 75% quantile
q95=apply(param.keep, 2, quantile, 0.95) #calculate 95%
summarytable=data.frame(q05 = q05, q25 = q25, mean = means,
q75 = q75, q95 = q95) #bind all of the information together in the proper order (same order as summarytable columns)
param.keep_NEE_NDVI_UNBdata = param.keep #save the table of accepted parameters under a new name
write.csv(param.keep_NEE_NDVI_UNBdata, "Params_NEE_NDVI_UNBdata.csv")
###comparison using data that was assimilated
data.compare1 = data.frame(data.compare1)
out=data.frame(solvemodel(param.best, state)) #with columns to match data.assim
out.compare1 = out[match(data.compare1$time, out$time),]
out.compare1=out.compare1[,c(1,7,11)]
head(out.compare1)
head(data.compare1)
#now calculate bias mean error, MAE, and R2 for each stock/flux of interest
#calculate RMSE
error = (data.compare1[,c(2,3)]-out.compare1[,c(2,3)])
errorsquared = error^2
mean = apply(errorsquared,2,mean,na.rm=TRUE)
RMSE = sqrt(mean)
#calculate MAE
abs.error = abs(out.compare1-data.compare1)
MAE = apply(abs.error[,c(2,3)],2,mean,na.rm=TRUE)
#calculate r2
reg_NEE = lm(data.compare1[,2]~out.compare1[,2])
r2_NEE = summary(reg_NEE)$r.squared
reg_NDVI = lm(data.compare1[,3]~out.compare1[,3])
r2_NDVI = summary(reg_NDVI)$r.squared
##plot linear regression for assimilated data
par(mfrow=c(2,2), mar=c(4,4,2,2))
plot(data.compare1$NEE, out.compare1$NEE, xlab= "Actual", ylab="Modelled", main = "NEE")
abline(0,1,col="red")
plot(density(resid(reg_NEE)), main="Density of Residuals")
plot(data.compare1$NDVI, out.compare1$NDVI, xlab= "Actual", ylab="Modelled", main = "NDVI")
abline(0,1,col="red")
plot(density(resid(reg_NDVI)), main="Density of Residuals")
par(mfrow=c(2,1), mar=c(4,4,2,2))
plot(out.compare1$NEE~out.compare1$time, pch=16)
points(data.compare1$NEE~data.compare1$time, col="red")
plot(out.compare1$NDVI~out.compare1$time, pch=16)
points(data.compare1$NDVI~data.compare1$time, col="red")
###comparison using data that was NOT assimilated
data.compare=read.csv("Assimilation_data_ALL.csv")
data.compare1 = data.compare[data.compare$Year==2011,]
data.compare1 = data.frame(data.compare1)
out=data.frame(solvemodel(param.best, state)) #with columns to match data.assim
out.compare1 = out[match(data.compare1$Time, out$time),]
out.compare1=out.compare1[,c(1,7,8,9,11)]
data.compare1=data.compare1[,c(3,6,7,8,9)]
head(out.compare1)
head(data.compare1)
#now calculate bias mean error, MAE, and R2 for each stock/flux of interest
#calculate RMSE
error = (data.compare1[,c(2:5)]-out.compare1[,c(2:5)])
errorsquared = error^2
mean = apply(errorsquared,2,mean,na.rm=TRUE)
RMSE = sqrt(mean)
#calculate MAE
abs.error = abs(out.compare1-data.compare1)
MAE = apply(abs.error,2,mean,na.rm=TRUE)
#calculate r2
reg_NEE = lm(data.compare1[,2]~out.compare1[,2])
r2_NEE = summary(reg_NEE)$r.squared
reg_GPP = lm(data.compare1[,3]~out.compare1[,3])
r2_GPP = summary(reg_GPP)$r.squared
reg_Re = lm(data.compare1[,4]~out.compare1[,4])
r2_Re = summary(reg_Re)$r.squared
reg_NDVI = lm(data.compare1[,5]~out.compare1[,5])
r2_NDVI = summary(reg_NDVI)$r.squared
##plot linear regression for assimilated data
par(mfrow=c(2,2), mar=c(4,4,2,2))
plot(data.compare1[,2], out.compare1[,2], xlab= "Actual", ylab="Modelled", main = "NEE")
abline(0,1,col="red")
plot(density(resid(reg_NEE)), main="Density of Residuals")
plot(data.compare1[,4], out.compare1[,4], xlab= "Actual", ylab="Modelled", main = "NDVI")
abline(0,1,col="red")
plot(density(resid(reg_NDVI)), main="Density of Residuals")
par(mfrow=c(2,1), mar=c(4,4,2,2))
plot(out.compare1$NEE~out.compare1$time, pch=16)
points(data.compare1$NEE~data.compare1$Time, col="red")
plot(out.compare1$NDVI~out.compare1$time, pch=16)
points(data.compare1$NDVI~data.compare1$Time, col="red")
##########################VARIANCE DECOMPOSITION ANALYSIS#######################
load("Step2_NEE_NDVI_UNBdata_MELstarting.Rdata") #load best experiment (experiment 4)
head(param.keep) #view table of accepted parameters
means=apply(param.keep, 2, mean) #calculate parameter means
#to perform the variance decomposition analysis, you need to:
# 1) alter each parameter individually holding all other parameters constant at their means
# 2) run the model for each parameter set to obtain an ensemble of model runs
# 3) for each model run, calculate the monthly average of the output
# 4) Calculate the variance in monthly averages for each parameter - this gives you the contribution of that parameter to the model variance
head(out[,1:11])
####SEASONAL ANALYSIS####
#need to make tables to store monthly averages for each model output (each parameter has one table)
MVar_kplant = data.frame(matrix(1,1,11))
colnames(MVar_kplant)=c("Month", colnames(out[,2:11]))
MVar_LitterRateC = data.frame(matrix(1,1,11))
colnames(MVar_LitterRateC)=c("Month", colnames(out[,2:11]))
MVar_LitterRateN = data.frame(matrix(1,1,11))
colnames(MVar_LitterRateN)=c("Month", colnames(out[,2:11]))
MVar_RespRate = data.frame(matrix(1,1,11))
colnames(MVar_RespRate)=c("Month", colnames(out[,2:11]))
MVar_UptakeRate = data.frame(matrix(1,1,11))
colnames(MVar_UptakeRate)=c("Month", colnames(out[,2:11]))
MVar_propN_fol = data.frame(matrix(1,1,11))
colnames(MVar_propN_fol)=c("Month", colnames(out[,2:11]))
MVar_propN_roots = data.frame(matrix(1,1,11))
colnames(MVar_propN_roots)=c("Month", colnames(out[,2:11]))
MVar_q10 = data.frame(matrix(1,1,11))
colnames(MVar_q10)=c("Month", colnames(out[,2:11]))
MVar_netNrate = data.frame(matrix(1,1,11))
colnames(MVar_netNrate)=c("Month", colnames(out[,2:11]))
MVar_cue = data.frame(matrix(1,1,11))
colnames(MVar_cue)=c("Month", colnames(out[,2:11]))
#need to create a vector of months to append to model output
months = rep(c(seq(1:12)),
c(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31))
months.leap = rep(c(seq(1:12)),
c(31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31))
months = c(months, months, months, months.leap)
#kplant
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[1] = unlist(c(param.keep[i,1])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_kplant)
MVar_kplant = rbind(MVar_kplant, monthly.avg)
}
#LitterRateC
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[2] = unlist(c(param.keep[i,2])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_LitterRateC)
MVar_LitterRateC = rbind(MVar_LitterRateC, monthly.avg)
}
#LitterRateN
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[3] = unlist(c(param.keep[i,3])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_LitterRateN)
MVar_LitterRateN = rbind(MVar_LitterRateN, monthly.avg)
}
#RespRate
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[4] = unlist(c(param.keep[i,4])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_RespRate)
MVar_RespRate = rbind(MVar_RespRate, monthly.avg)
}
#UptakeRate
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[5] = unlist(c(param.keep[i,5])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_UptakeRate)
MVar_UptakeRate = rbind(MVar_UptakeRate, monthly.avg)
}
#propN_fol
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[6] = unlist(c(param.keep[i,6])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_propN_fol)
MVar_propN_fol = rbind(MVar_propN_fol, monthly.avg)
}
#propN_roots
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[7] = unlist(c(param.keep[i,7])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_propN_roots)
MVar_propN_roots = rbind(MVar_propN_roots, monthly.avg)
}
#q10
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[8] = unlist(c(param.keep[i,8])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_q10)
MVar_q10 = rbind(MVar_q10, monthly.avg)
}
#netNrate
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[9] = unlist(c(param.keep[i,9])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_netNrate)
MVar_netNrate = rbind(MVar_netNrate, monthly.avg)
}
#cue
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[10] = unlist(c(param.keep[i,10])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
out.i = cbind(out.i, Month = months) #add month vector
monthly.avg=aggregate(out.i[,2:11], list(out.i$Month), mean)
names(monthly.avg) = names(MVar_cue)
MVar_cue = rbind(MVar_cue, monthly.avg)
}
MVar_kplant = MVar_kplant[-1,]
MVar_LitterRateC = MVar_LitterRateC[-1,]
MVar_LitterRateN = MVar_LitterRateN[-1,]
MVar_RespRate = MVar_RespRate[-1,]
MVar_UptakeRate = MVar_UptakeRate[-1,]
MVar_propN_fol = MVar_propN_fol[-1,]
MVar_propN_roots = MVar_propN_roots[-1,]
MVar_q10 = MVar_q10[-1,]
MVar_netNrate = MVar_netNrate[-1,]
MVar_cue = MVar_cue[-1,]
var.kplant = aggregate(MVar_kplant[,2:11], list(MVar_kplant$Month), var)
var.LitterRateC = aggregate(MVar_LitterRateC[,2:11], list(MVar_LitterRateC$Month), var)
var.LitterRateN = aggregate(MVar_LitterRateN[,2:11], list(MVar_LitterRateN$Month), var)
var.RespRate = aggregate(MVar_RespRate[,2:11], list(MVar_RespRate$Month), var)
var.UptakeRate = aggregate(MVar_UptakeRate[,2:11], list(MVar_UptakeRate$Month), var)
var.propN_fol = aggregate(MVar_propN_fol[,2:11], list(MVar_propN_fol$Month), var)
var.propN_roots = aggregate(MVar_propN_roots[,2:11], list(MVar_propN_roots$Month), var)
var.q10 = aggregate(MVar_q10[,2:11], list(MVar_q10$Month), var)
var.netNrate = aggregate(MVar_netNrate[,2:11], list(MVar_netNrate$Month), var)
var.cue = aggregate(MVar_cue[,2:11], list(MVar_cue$Month), var)
parameters = rep(names(params), c(12,12,12,12,12,12,12,12,12,12))
all = rbind(var.kplant, var.LitterRateC, var.LitterRateN, var.RespRate, var.UptakeRate, var.propN_fol,
var.propN_roots, var.q10, var.netNrate, var.cue)
all=cbind(Parameters = parameters, all)
#calculate total variance
var.total = aggregate(all[3:12], list(all$Group.1), sum) #CHECK THIS
#now calculate percent variance
perc.kplant = (var.kplant[,2:11]/var.total[,2:11])*100
perc.kplant = cbind(Parameter = rep("kplant", 12), Month=var.total$Group.1, perc.kplant)
perc.LitterRateC = (var.LitterRateC[,2:11]/var.total[,2:11])*100
perc.LitterRateC = cbind(Parameter = rep("LitterRateC", 12), Month=var.total$Group.1, perc.LitterRateC)
perc.LitterRateN = (var.LitterRateN[,2:11]/var.total[,2:11])*100
perc.LitterRateN = cbind(Parameter = rep("LitterRateN", 12), Month=var.total$Group.1, perc.LitterRateN)
perc.RespRate = (var.RespRate[,2:11]/var.total[,2:11])*100
perc.RespRate = cbind(Parameter = rep("RespRate", 12), Month=var.total$Group.1, perc.RespRate)
perc.UptakeRate = (var.UptakeRate[,2:11]/var.total[,2:11])*100
perc.UptakeRate = cbind(Parameter = rep("UptakeRate", 12), Month=var.total$Group.1, perc.UptakeRate)
perc.propN_fol = (var.propN_fol[,2:11]/var.total[,2:11])*100
perc.propN_fol = cbind(Parameter = rep("propN_fol", 12), Month=var.total$Group.1, perc.propN_fol)
perc.propN_roots = (var.propN_roots[,2:11]/var.total[,2:11])*100
perc.propN_roots = cbind(Parameter = rep("propN_roots", 12), Month=var.total$Group.1, perc.propN_roots)
perc.q10 = (var.q10[,2:11]/var.total[,2:11])*100
perc.q10 = cbind(Parameter = rep("q10", 12), Month=var.total$Group.1, perc.q10)
perc.netNrate = (var.netNrate[,2:11]/var.total[,2:11])*100
perc.netNrate = cbind(Parameter = rep("netNrate", 12), Month=var.total$Group.1, perc.netNrate)
perc.cue = (var.cue[,2:11]/var.total[,2:11])*100
perc.cue = cbind(Parameter = rep("cue", 12), Month=var.total$Group.1, perc.cue)
#create a table binding all together
perc.all = rbind(perc.kplant, perc.LitterRateC, perc.LitterRateN, perc.RespRate,
perc.UptakeRate, perc.propN_fol, perc.propN_roots, perc.q10, perc.netNrate,
perc.cue)
perc.all = perc.all[,-11]
head(perc.all)
tail(perc.all)
####barplots####
par(mfrow=c(3,3), mar=c(4,4,2,2))
for (n in 3:11) { #for each parameter
sub = perc.all[,c(1,2,n)]
sub1 = table(sub$Parameter, sub$Month)
sub1[1,] = sub[1:12,3]
sub1[2,] = sub[13:24,3]
sub1[3,] = sub[25:36,3]
sub1[4,] = sub[37:48,3]
sub1[5,] = sub[49:60,3]
sub1[6,] = sub[61:72,3]
sub1[7,] = sub[73:84,3]
sub1[8,] = sub[85:96,3]
sub1[9,] = sub[97:108,3]
sub1[10,] = sub[109:120,3]
sub1[11,] = sub[121:132,3]
barplot(sub1, col=c("chartreuse", "cadetblue", "aquamarine", "darkblue", "darkseagreen",
"deepskyblue", "dodgerblue3", "forestgreen", "darkslategray1", "purple"),
main=names(perc.all[n]), names.arg=seq(1:12), axisnames=TRUE, ylim=c(0,100)) #plot the data
} #end of for loop
par(mfrow=c(1,1), mar=c(4,4,2,2))
#NEE
sub = perc.all[,c(1,2,8)]
sub1 = table(sub$Parameter, sub$Month)
sub1[1,] = sub[1:12,3]
sub1[2,] = sub[13:24,3]
sub1[3,] = sub[25:36,3]
sub1[4,] = sub[37:48,3]
sub1[5,] = sub[49:60,3]
sub1[6,] = sub[61:72,3]
sub1[7,] = sub[73:84,3]
sub1[8,] = sub[85:96,3]
sub1[9,] = sub[97:108,3]
sub1[10,] = sub[109:120,3]
sub1[11,] = sub[121:132,3]
barplot(sub1, col=c("chartreuse", "cadetblue", "aquamarine", "darkblue", "darkseagreen",
"deepskyblue", "dodgerblue3", "forestgreen", "darkslategray1", "purple"),
main=names(perc.all[8]), names.arg=seq(1:12), axisnames=TRUE, ylim=c(0,100), legend=TRUE) #plot the data
####ANNUAL ANALYSIS####
#need to make tables to store annual sums for each model output (each parameter has one table)
AVar_kplant = data.frame(matrix(1,1,3))
colnames(AVar_kplant)=colnames(out[,7:9])
AVar_LitterRateC = data.frame(matrix(1,1,3))
colnames(AVar_LitterRateC)=colnames(out[,7:9])
AVar_LitterRateN = data.frame(matrix(1,1,3))
colnames(AVar_LitterRateN)=colnames(out[,7:9])
AVar_RespRate = data.frame(matrix(1,1,3))
colnames(AVar_RespRate)=colnames(out[,7:9])
AVar_UptakeRate = data.frame(matrix(1,1,3))
colnames(AVar_UptakeRate)=colnames(out[,7:9])
AVar_propN_fol = data.frame(matrix(1,1,3))
colnames(AVar_propN_fol)=colnames(out[,7:9])
AVar_propN_roots = data.frame(matrix(1,1,3))
colnames(AVar_propN_roots)=colnames(out[,7:9])
AVar_q10 = data.frame(matrix(1,1,3))
colnames(AVar_q10)=colnames(out[,7:9])
AVar_netNrate = data.frame(matrix(1,1,3))
colnames(AVar_netNrate)=colnames(out[,7:9])
AVar_cue = data.frame(matrix(1,1,3))
colnames(AVar_cue)=colnames(out[,7:9])
#kplant
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[1] = unlist(c(param.keep[i,1])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_kplant) #change names
AVar_kplant = rbind(AVar_kplant, annual.avg) #add row to table
}
#LitterRateC
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[2] = unlist(c(param.keep[i,2])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_LitterRateC) #change names
AVar_LitterRateC = rbind(AVar_LitterRateC, annual.avg) #add row to table
}
#LitterRateN
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[3] = unlist(c(param.keep[i,3])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_LitterRateN) #change names
AVar_LitterRateN = rbind(AVar_LitterRateN, annual.avg) #add row to table
}
#RespRate
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[4] = unlist(c(param.keep[i,4])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_RespRate) #change names
AVar_RespRate = rbind(AVar_RespRate, annual.avg) #add row to table
}
#UptakeRate
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[5] = unlist(c(param.keep[i,5])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_UptakeRate) #change names
AVar_UptakeRate = rbind(AVar_UptakeRate, annual.avg) #add row to table
}
#propN_fol
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[6] = unlist(c(param.keep[i,6])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_propN_fol) #change names
AVar_propN_fol = rbind(AVar_propN_fol, annual.avg) #add row to table
}
#propN_roots
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[7] = unlist(c(param.keep[i,7])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_propN_roots) #change names
AVar_propN_roots = rbind(AVar_propN_roots, annual.avg) #add row to table
}
#q10
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[8] = unlist(c(param.keep[i,8])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_q10) #change names
AVar_q10 = rbind(AVar_q10, annual.avg) #add row to table
}
#netNrate
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[9] = unlist(c(param.keep[i,9])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_netNrate) #change names
AVar_netNrate = rbind(AVar_netNrate, annual.avg) #add row to table
}
#cue
for(i in 1:1000){
params.i = means #set parmeters to mean values
params.i[10] = unlist(c(param.keep[i,10])) #change the parameter value of interest
out.i = data.frame(solvemodel(params.i, state)) #run model
annual.sum=aggregate(out.i[,7:9], list(out.i$year), sum) #calculate annual sum
annual.avg=apply(annual.sum[,-1], 2, mean) #calculate sum across all years
names(annual.avg) = names(AVar_cue) #change names
AVar_cue = rbind(AVar_cue, annual.avg) #add row to table
}
AVar_kplant = AVar_kplant[-1,]
AVar_LitterRateC = AVar_LitterRateC[-1,]
AVar_LitterRateN = AVar_LitterRateN[-1,]
AVar_RespRate = AVar_RespRate[-1,]
AVar_UptakeRate = AVar_UptakeRate[-1,]
AVar_propN_fol = AVar_propN_fol[-1,]
AVar_propN_roots = AVar_propN_roots[-1,]
AVar_q10 = AVar_q10[-1,]
AVar_netNrate = AVar_netNrate[-1,]
AVar_cue = AVar_cue[-1,]
var.kplant = apply(AVar_kplant, 2, var)
var.LitterRateC = apply(AVar_LitterRateC, 2, var)
var.LitterRateN = apply(AVar_LitterRateN, 2, var)
var.RespRate = apply(AVar_RespRate, 2, var)
var.UptakeRate = apply(AVar_UptakeRate, 2, var)
var.propN_fol = apply(AVar_propN_fol, 2, var)
var.propN_roots = apply(AVar_propN_roots, 2, var)
var.q10 = apply(AVar_q10, 2, var)
var.netNrate = apply(AVar_netNrate, 2, var)
var.cue = apply(AVar_cue, 2, var)
parameters = names(params)
all_1 = rbind(var.kplant, var.LitterRateC, var.LitterRateN, var.RespRate, var.UptakeRate, var.propN_fol,
var.propN_roots, var.q10, var.netNrate, var.cue)
all_1 = cbind(Parameters = parameters, all)
#calculate total variance
var.total = apply(all_1[2:4], sum)
#now calculate percent variance
perc.kplant = (var.kplant/var.total)*100
perc.LitterRateC = (var.LitterRateC/var.total)*100
perc.LitterRateN = (var.LitterRateN/var.total)*100
perc.RespRate = (var.RespRate/var.total)*100
perc.UptakeRate = (var.UptakeRate/var.total)*100
perc.propN_fol = (var.propN_fol/var.total)*100
perc.propN_roots = (var.propN_roots/var.total)*100
perc.q10 = (var.q10/var.total)*100
perc.netNrate = (var.netNrate/var.total)*100
perc.cue = (var.cue/var.total)*100
#create a table binding all together
perc.all_1 = rbind(perc.kplant, perc.LitterRateC, perc.LitterRateN, perc.RespRate,
perc.UptakeRate, perc.propN_fol, perc.propN_roots, perc.q10, perc.netNrate,
perc.cue)
perc.all_1 = cbind(Parameters=parameters, perc.all_1
head(perc.all_1)
tail(perc.all_1)
####barplots####
#START EDITING HERE
par(mfrow=c(3,3), mar=c(4,4,2,2))
for (n in 3:11) { #for each parameter
sub = perc.all[,c(1,2,n)]
sub1 = table(sub$Parameter, sub$Month)
sub1[1,] = sub[1:12,3]
sub1[2,] = sub[13:24,3]
sub1[3,] = sub[25:36,3]
sub1[4,] = sub[37:48,3]
sub1[5,] = sub[49:60,3]
sub1[6,] = sub[61:72,3]
sub1[7,] = sub[73:84,3]
sub1[8,] = sub[85:96,3]
sub1[9,] = sub[97:108,3]
sub1[10,] = sub[109:120,3]
sub1[11,] = sub[121:132,3]
barplot(sub1, col=c("chartreuse", "cadetblue", "aquamarine", "darkblue", "darkseagreen",
"deepskyblue", "dodgerblue3", "forestgreen", "darkslategray1", "purple"),
main=names(perc.all[n]), names.arg=seq(1:12), axisnames=TRUE, ylim=c(0,100)) #plot the data
} #end of for loop
par(mfrow=c(1,1), mar=c(4,4,2,2))
#NEE
sub = perc.all[,c(1,2,8)]
sub1 = table(sub$Parameter, sub$Month)
sub1[1,] = sub[1:12,3]
sub1[2,] = sub[13:24,3]
sub1[3,] = sub[25:36,3]
sub1[4,] = sub[37:48,3]
sub1[5,] = sub[49:60,3]
sub1[6,] = sub[61:72,3]
sub1[7,] = sub[73:84,3]
sub1[8,] = sub[85:96,3]
sub1[9,] = sub[97:108,3]
sub1[10,] = sub[109:120,3]
sub1[11,] = sub[121:132,3]
barplot(sub1, col=c("chartreuse", "cadetblue", "aquamarine", "darkblue", "darkseagreen",
"deepskyblue", "dodgerblue3", "forestgreen", "darkslategray1", "purple"),
main=names(perc.all[8]), names.arg=seq(1:12), axisnames=TRUE, ylim=c(0,100), legend=TRUE) #plot the data
save.image(file="Variance_09252015.Rdata")
|
#' Set up control for aggregation into sentiment measures
#'
#' @author Samuel Borms, Keven Bluteau
#'
#' @description Sets up control object for aggregation of document-level textual sentiment into textual
#' sentiment measures (indices).
#'
#' @details For currently available options on how aggregation can occur (via the \code{howWithin},
#' \code{howDocs} and \code{howTime} arguments), call \code{\link{get_hows}}.
#'
#' @param howWithin a single \code{character} vector defining how aggregation within documents will be performed. Should
#' \code{length(howWithin) > 1}, the first element is used. For currently available options on how aggregation can occur, see
#' \code{\link{get_hows}()$words}.
#' @param howDocs a single \code{character} vector defining how aggregation across documents per date will be performed.
#' Should \code{length(howDocs) > 1}, the first element is used. For currently available options on how aggregation can occur,
#' see \code{\link{get_hows}()$docs}.
#' @param howTime a \code{character} vector defining how aggregation across dates will be performed. More than one choice
#' is possible. For currently available options on how aggregation can occur, see \code{\link{get_hows}()$time}.
#' @param do.ignoreZeros a \code{logical} indicating whether zero sentiment values have to be ignored in the determination of
#' the document weights while aggregating across documents. By default \code{do.ignoreZeros = TRUE}, such that documents with
#' an exact score of zero are considered irrelevant.
#' @param by a single \code{character} vector, either \code{"day", "week", "month"} or \code{"year"}, to indicate at what
#' level the dates should be aggregated. Dates are displayed as the first day of the period, if applicable (e.g.,
#' \code{"2017-03-01"} for March 2017).
#' @param lag a single \code{integer} vector, being the time lag to be specified for aggregation across time. By default
#' equal to \code{1L}, meaning no aggregation across time.
#' @param fill a single \code{character} vector, one of \code{c("zero", "latest", "none")}, to control how missing
#' sentiment values across the continuum of dates considered are added. This impacts the aggregation across time,
#' applying the \code{\link{fill_measures}} function before aggregating, except if \code{fill = "none"}. By default equal to
#' \code{"zero"}, which sets the scores (and thus also the weights) of the added dates to zero in the time aggregation.
#' @param alphasExp a \code{numeric} vector of all exponential smoothing factors to calculate weights for, used if
#' \code{"exponential" \%in\% howTime}. Values should be between 0 and 1 (both excluded).
#' @param ordersAlm a \code{numeric} vector of all Almon polynomial orders to calculate weights for, used if
#' \code{"almon" \%in\% howTime}.
#' @param do.inverseAlm a \code{logical} indicating if for every Almon polynomial its inverse has to be added, used
#' if \code{"almon" \%in\% howTime}.
#' @param do.normalizeAlm a \code{logical} indicating if every Almon polynomial weights column should sum to one, used if
#' \code{"almon" \%in\% howTime}.
#' @param weights an optional own weighting scheme, always used if provided as a \code{data.frame} with the number of rows
#' equal to the desired \code{lag}. The automatic Almon polynomials are created sequentially; if the user wants only specific
#' of such time weighting series it can use \code{\link{almons}}, select the columns it requires, combine it into a
#' \code{data.frame} and supply it under this argument (see examples).
#' @param dfm optional; see \code{\link{compute_sentiment}}.
#'
#' @return A \code{list} encapsulating the control parameters.
#'
#' @seealso \code{\link{fill_measures}}, \code{\link{almons}}, \code{\link{compute_sentiment}}
#'
#' @examples
#' # simple control function
#' ctr1 <- ctr_agg(howTime = "linear", by = "year", lag = 3)
#'
#' # more elaborate control function (particular attention to time weighting schemes)
#' ctr2 <- ctr_agg(howWithin = "tf-idf",
#' howDocs = "proportional",
#' howTime = c("equal_weight", "linear", "almon", "exponential", "own"),
#' do.ignoreZeros = TRUE,
#' by = "day",
#' lag = 20,
#' ordersAlm = 1:3,
#' do.inverseAlm = TRUE,
#' do.normalizeAlm = TRUE,
#' alphasExp = c(0.20, 0.50, 0.70, 0.95),
#' weights = data.frame(myWeights = runif(20)))
#'
#' # set up control function with one linear and two chosen Almon weighting schemes
#' a <- almons(n = 70, orders = 1:3, do.inverse = TRUE, do.normalize = TRUE)
#' ctr3 <- ctr_agg(howTime = c("linear", "own"), by = "year", lag = 70,
#' weights = data.frame(a1 = a[, 1], a2 = a[, 3]))
#'
#' @export
ctr_agg <- function(howWithin = "proportional", howDocs = "equal_weight", howTime = "equal_weight",
do.ignoreZeros = TRUE, by = "day", lag = 1L, fill = "zero", alphasExp = seq(0.1, 0.5, by = 0.1),
ordersAlm = 1:3, do.inverseAlm = TRUE, do.normalizeAlm = TRUE, weights = NULL, dfm = NULL) {
if (length(howWithin) > 1) howWithin <- howWithin[1]
if (length(howDocs) > 1) howDocs <- howDocs[1]
# check if provided aggregation specifications are supported
hows <- get_hows() # get all supported options for each aggregation level
warned <- 0
if (!(howWithin %in% hows[["words"]])) {
warning(paste0(howWithin, " is no current option for aggregation across words."))
warned <- warned + 1
}
if (!(howDocs %in% hows[["docs"]])) {
warning(paste0(howDocs, " is no current option for aggregation across docs."))
warned <- warned + 1
}
if (!all(howTime %in% hows[["time"]])) {
warning(paste0(howTime[!(howTime %in% hows[["time"]])], " is no current option for aggregation across time. "))
warned <- warned + 1
}
if ("own" %in% howTime & is.null(weights)) {
warning(paste0("Provide a 'weights' data.frame if 'own' provided as an option in 'howTime'."))
warned <- warned + 1
}
if (!("own" %in% howTime) & is.data.frame(weights)) {
howTime <- c(howTime, "own")
warning(paste0("The option 'own' is added to 'howTime' since a valid (not NULL) 'weights' data.frame was supplied."))
}
if ("own" %in% howTime) {
if (lag != nrow(weights)) {
lag <- nrow(weights)
warning("Argument 'lag' is set equal to the number of rows in the 'weights' data.frame.")
}
}
if (max(alphasExp) >= 1 & min(alphasExp) <= 0) {
warning("Values in 'alphasExp' should be between 0 and 1 (both excluded).")
warned <- warned + 1
}
if (lag <= 0) {
warning("Argument 'lag' should be greater than zero.")
warned <- warned + 1
}
if (!(by %in% c("year", "month", "week", "day"))) {
warning(paste0(by, " is no current 'by' option."))
warned <- warned + 1
}
if (!(fill %in% c("zero", "latest", "none"))) {
warning(paste0(fill, " is no current 'fill' option."))
warned <- warned + 1
}
if (!is.null(dfm) & !quanteda::is.dfm(dfm)) {
warning("The 'dfm' argument should pass quanteda::is.dfm(dfm) when it is not equal to NULL.")
warned <- warned + 1
}
if (warned > 0) stop("Wrong inputs. See warning messages for specifics.")
other <- list(alphasExp = alphasExp, ordersAlm = ordersAlm, do.inverseAlm = do.inverseAlm,
do.normalizeAlm = do.normalizeAlm, weights = weights)
ctr <- list(howWithin = howWithin,
howDocs = howDocs,
howTime = howTime,
do.ignoreZeros = do.ignoreZeros,
by = by,
lag = lag,
fill = fill,
other = other)
return(ctr)
}
#' One-way road towards a sentomeasures object
#'
#' @author Samuel Borms, Keven Bluteau
#'
#' @description Wrapper function which assembles calls to \code{\link{compute_sentiment}} and \code{\link{perform_agg}}, and
#' includes the input \code{sentocorpus} and computed sentiment scores in its output. Serves as the most direct way towards a
#' panel of textual sentiment measures as a \code{sentomeasures} object.
#'
#' @param sentocorpus a \code{sentocorpus} object created with \code{\link{sento_corpus}}.
#' @param lexicons output from a \code{\link{setup_lexicons}} call.
#' @param ctr output from a \code{\link{ctr_agg}} call.
#'
#' @return A \code{sentomeasures} object, which is a \code{list} containing:
#' \item{measures}{a \code{data.table} with a \code{"date"} column and all textual sentiment measures as remaining columns.}
#' \item{features}{a \code{character} vector of the different features.}
#' \item{lexicons}{a \code{character} vector of the different lexicons used.}
#' \item{time}{a \code{character} vector of the different time weighting schemes used.}
#' \item{by}{a single \code{character} vector specifying the time interval of aggregation used.}
#' \item{stats}{a \code{data.frame} with a series of elementary statistics (mean, standard deviation, maximum, minimum, and
#' average correlation with all other measures) for each individual sentiment measure.}
#' \item{sentiment}{the sentiment scores \code{data.table} with \code{"date"}, \code{"word_count"} and lexicon--feature sentiment
#' scores columns.
#' If \code{ctr$do.ignoreZeros = TRUE}, all zeros are replaced by \code{NA}.}
#' \item{howWithin}{a single \code{character} vector to remind how sentiment within documents was aggregated.}
#' \item{howDocs}{a single \code{character} vector to remind how sentiment across documents was aggregated.}
#' \item{fill}{a single \code{character} vector that specifies if and how missing dates have been added before
#' aggregation across time was carried out.}
#' \item{do.ignoreZeros}{a single \code{character} vector to remind if documents with zero sentiment have been ignored in the
#' within-document aggregation.}
#' \item{attribWeights}{a \code{list} of document and time weights used in the \code{\link{retrieve_attributions}} function.
#' Serves further no direct purpose.}
#'
#' @seealso \code{\link{compute_sentiment}}, \code{\link{perform_agg}}
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 750)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howWithin = "tf-idf",
#' howDocs = "proportional",
#' howTime = c("equal_weight", "linear", "almon"),
#' by = "month",
#' lag = 3,
#' ordersAlm = 1:3,
#' do.inverseAlm = TRUE,
#' do.normalizeAlm = TRUE)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#' summary(sentomeasures)
#'
#' @import data.table
#' @export
sento_measures<- function(sentocorpus, lexicons, ctr) {
check_class(sentocorpus, "sentocorpus")
toAgg <- compute_sentiment(sentocorpus, lexicons, how = ctr$howWithin)
sentomeasures <- perform_agg(toAgg, ctr)
return(sentomeasures)
}
#' @export
summary.sentomeasures <- function(object, ...) {
sentomeasures <- object
freq <- c("daily", "weekly", "monthly", "yearly")[c("day", "week", "month", "year") %in% sentomeasures$by]
cat("This sentomeasures object contains", dim(sentomeasures$measures)[2] - 1, "textual sentiment time series",
"with", dim(sentomeasures$measures)[1], "observations each,", "at a", freq, "frequency.", "\n")
cat("The corpus has following features:", sentomeasures$features, "\n")
cat("\n")
cat("Following lexicons were used to calculate sentiment:", sentomeasures$lexicons, "\n")
cat("Following scheme was applied for aggregation within documents:", sentomeasures$howWithin, "\n")
cat("Following scheme was applied for aggregation across documents:", sentomeasures$howDocs, "\n")
cat("Following schemes were applied for aggregation across time:", sentomeasures$time, "\n")
cat("\n")
cat("Aggregate statistics:", "\n")
print(round(rowMeans(sentomeasures$stats), 5))
}
#' @export
print.sentomeasures <- function(x, ...) {
sentomeasures <- x
n <- dim(sentomeasures$measures)[2] - 1
m <- dim(sentomeasures$measures)[1]
cat("A sentomeasures object that carries with it", n, "distinct textual sentiment time series of", m, "observations each.")
}
#' Set up lexicons (and valence word list) for use in sentiment analysis
#'
#' @author Samuel Borms
#'
#' @description Structures provided lexicons and potentially integrates valence words. One can also provide (part of) the
#' built-in lexicons from \code{data("lexicons")} or a valence word list from \code{data("valence")} as an argument.
#' Makes use of the \code{\link[sentimentr]{as_key}} function from the \pkg{sentimentr} package to make the output coherent
#' and check for duplicates.
#'
#' @param lexiconsIn a named \code{list} of (raw) lexicons, each element being a \code{data.frame} or a \code{data.table} with
#' respectively a words column and a polarity score column. Alternatively, a subset of the already formatted built-in lexicons
#' accessible via \code{lexicons} can be declared too, as part of the same list input. If only (some of) the package built-in
#' lexicons want to be used (with \emph{no} valence shifters), one can simply supply \code{lexicons[c(...)]} as an argument to
#' either \code{\link{sento_measures}} or \code{\link{compute_sentiment}}. However, it is strongly recommended to pass all
#' lexicons (and a valence word list) to this function first, in any case.
#' @param valenceIn a single valence word list as a \code{data.frame} or a \code{data.table} with respectively a words column,
#' a type column (\code{1} for negators, \code{2} for amplifiers/intensifiers, and \code{3} for deamplifiers/downtoners) and a
#' score column. Suggested scores are -1, 2, and 0.5 respectively, and should be the same within each type. This argument can
#' also be one of the already formatted built-in valence word lists accessible via \code{valence}. If \code{NULL}, no valence
#' word list is part of this function's output, nor will it applied in the sentiment analysis.
#' @param do.split a \code{logical} that if \code{TRUE} splits every lexicon into a separate positive polarity and negative
#' polarity lexicon.
#'
#' @return A \code{list} with each lexicon as a separate element according to its name, as a \code{data.table}, and optionally
#' an element named \code{valence} that comprises the valence words. Every \code{x} column contains the words, every \code{y}
#' column contains the polarity score, and for the valence word list, \code{t} contains the word type. If a valence word list
#' is provided, all lexicons are expanded by copying the respective lexicon, and changing the words and scores according to
#' the valence word type: "NOT_" is added for negators, "VERY_" is added for amplifiers and "HARDLY_" is added for
#' deamplifiers. Lexicon scores are multiplied by -1, 2 and 0.5 by default, respectively, or the first value of the scores
#' column of the valence word list.
#'
#' @seealso \code{\link[sentimentr]{as_key}}
#'
#' @examples
#' data("lexicons")
#' data("valence")
#'
#' # sets up output list straight from built-in word lists including valence words
#' l1 <- c(lexicons[c("LM_eng", "HENRY_eng")], valence[["eng"]])
#'
#' # including a self-made lexicon, with and without valence shifters
#' lexIn <- c(list(myLexicon = data.table(w = c("nice", "boring"), s = c(2, -1))),
#' lexicons[c("GI_eng")])
#' valIn <- valence[["valence_eng"]]
#' l2 <- setup_lexicons(lexIn)
#' l3 <- setup_lexicons(lexIn, valIn)
#' l4 <- setup_lexicons(lexIn, valIn, do.split = TRUE)
#'
#' \dontrun{
#' # include lexicons from lexicon package
#' library("lexicon")
#' lexIn2 <- list(hul = lexicon::hash_sentiment_huliu, joc = lexicon::hash_sentiment_jockers)
#' l5 <- setup_lexicons(c(lexIn, lexIn2), valIn)}
#'
#' @export
setup_lexicons <- function(lexiconsIn, valenceIn = NULL, do.split = FALSE) {
if (!is.list(lexiconsIn))
stop("The 'lexiconsIn' input should be a list.")
if (!is.data.frame(valenceIn) & !is.null(valenceIn))
stop("The 'valenceIn' argument should be a data.table or data.frame if not NULL.")
if (any(is.na(names(lexiconsIn))))
stop("At least one lexicon's name is NA. Please provide proper list names.")
# check for duplicated lexicon names
if (sum(duplicated(names(lexiconsIn))) > 0) {
duplics <- unique(names(lexiconsIn[duplicated(names(lexiconsIn))]))
stop(paste0("Names of lexicons are not unique. Following names occur at least twice: ",
paste0(duplics, collapse = ", ")))
}
lexNames <- names(lexiconsIn)
# convert to sentimentr format while supressing warnings on removal of duplicated values
lexicons <- suppressWarnings(lapply(lexiconsIn, sentimentr::as_key, comparison = NULL))
lexicons <- lapply(lexicons, function(x) {names(x) <- c("x", "y"); return(x)})
names(lexicons) <- lexNames
if (!is.null(valenceIn)) {
names(valenceIn) <- c("x", "t", "y")
valTypes <- unique(valenceIn$t)
scores <- c(valenceIn[valenceIn$t == 1, ]$y[1], valenceIn[valenceIn$t == 2, ]$y[1], valenceIn[valenceIn$t == 3, ]$y[1])
lexicons <- expand_lexicons(lexicons, types = valTypes, scores = scores)
}
# split each lexicon into a positive and a negative polarity words only lexicon
if (do.split == TRUE) {
lexiconsPos <- lapply(lexicons, function(lex) return(lex[lex$y > 0]))
names(lexiconsPos) <- paste0(names(lexicons), "_POS")
lexiconsNeg <- lapply(lexicons, function(lex) return(lex[lex$y < 0]))
names(lexiconsNeg) <- paste0(names(lexicons), "_NEG")
lexicons <- c(lexiconsPos, lexiconsNeg)
}
if (!is.null(valenceIn)) {
lexicons[["valence"]] <- valenceIn[!duplicated(valenceIn$x), ]
}
return(lexicons)
}
.compute_sentiment <- function(sentocorpus, lexicons, how = get_hows()$words, dfm = NULL) {
check_class(sentocorpus, "sentocorpus")
if (length(how) > 1) how <- how[1]
if ("valence" %in% names(lexicons)) {
cat("Modify corpus to account for valence words... ")
quanteda::texts(sentocorpus) <- include_valence(quanteda::texts(sentocorpus), lexicons[["valence"]])
cat("Done.", "\n")
}
lexNames <- names(lexicons)[names(lexicons) != "valence"]
features <- names(quanteda::docvars(sentocorpus))[-1] # drop date column
cat("Compute sentiment... ")
# frequency-based document-feature matrix (rows are corpus ids, columns are words)
if (is.null(dfm)) {
dfm <- quanteda::dfm(quanteda::tokens(sentocorpus, remove_punct = TRUE, remove_numbers = TRUE,
remove_symbols = TRUE, remove_separators = TRUE), verbose = FALSE)
} else if (!quanteda::is.dfm(dfm))
stop("The 'dfm' argument should pass quanteda::is.dfm(dfm).")
if (how == "counts" || how == "proportional") {
fdm <- quanteda::t(dfm) # feature-document matrix
} else if (how == "tf-idf") {
weights <- quanteda::tfidf(dfm, scheme_tf = "prop")
fdmWeighted <- quanteda::t(weights)
} else stop("Please select an appropriate aggregation 'how'.")
s <- as.data.table(matrix(0, nrow = quanteda::ndoc(sentocorpus), ncol = length(lexNames)))
names(s) <- lexNames
allWords <- quanteda::featnames(dfm)
wCounts <- quanteda::rowSums(dfm, na.rm = TRUE)
for (lexicon in lexNames) { # locate polarized words and set weights to their polarity or keep at zero
lexWords <- lexicons[[lexicon]]$x
lexScores <- lexicons[[lexicon]]$y
names(lexScores) <- lexWords
allScores <- rep(0, length(allWords))
polInd <- allWords %in% lexWords
allScores[polInd] <- lexScores[allWords[polInd]]
names(allScores) <- allWords
if (how == "counts") {
scores <- quanteda::rowSums(quanteda::t(fdm * allScores))
} else if (how == "proportional") {
scores <- quanteda::rowSums(quanteda::t(fdm * allScores)) / wCounts
} else scores <- quanteda::rowSums(quanteda::t(fdmWeighted * allScores))
scores[is.na(scores)] <- 0 # set NA/NaN sentiment to 0 (e.g., if document contains no words)
s[, (lexicon) := scores]
}
# structure: date - feature1 - ... - word_count - lexicon1 (sentiment) - ...
s <- as.data.table(cbind(id = quanteda::docnames(sentocorpus), quanteda::docvars(sentocorpus), word_count = wCounts, s))
# compute feature-sentiment per document for all lexicons and order by date
sent <- get_features_sentiment(s, features, lexNames)
sent <- sent[order(date)]
cat("Done.", "\n")
sentOut <- list(corpus = sentocorpus, # not the same as input corpus if accounted for valence shifters
sentiment = sent,
features = features,
lexicons = lexNames,
howWithin = how)
return(sentOut)
}
#' Compute document-level sentiment across features and lexicons
#'
#' @author Samuel Borms
#'
#' @description Given a corpus of texts, computes sentiment per document using the bag-of-words approach
#' based on the lexicons provided and a choice of aggregation across words per document. Relies partly on the
#' \pkg{quanteda} package. The scores computed are net sentiment (sum of positive minus sum of negative scores).
#'
#' @details
#' For a separate calculation of positive (resp. negative) sentiment, one has to provide distinct positive (resp. negative)
#' lexicons. This can be done using the \code{do.split} option in the \code{\link{setup_lexicons}} function, which splits out
#' the lexicons into a positive and a negative polarity counterpart. \code{NA}s are converted to 0, under the assumption that
#' this is equivalent to no sentiment.
#'
#' @param sentocorpus a \code{sentocorpus} object created with \code{\link{sento_corpus}}.
#' @param lexicons output from a \code{\link{setup_lexicons}} call.
#' @param how a single \code{character} vector defining how aggregation within documents should be performed. For currently
#' available options on how aggregation can occur, see \code{\link{get_hows}()$words}.
#' @param dfm optional; an output from a \pkg{quanteda} \code{\link[quanteda]{dfm}} call, such that users can specify their
#' own tokenization scheme (via \code{\link[quanteda]{tokens}}) as well as other parameters related to the construction of
#' a document-feature matrix (dfm). By default, a dfm is created based on a tokenization that removes punctuation, numbers,
#' symbols and separators. We suggest to stick to unigrams, as the remainder of the sentiment computation and built-in
#' lexicons assume the same.
#'
#' @return A \code{list} containing:
#' \item{corpus}{the supplied \code{sentocorpus} object; the texts are altered if valence shifters are part of the lexicons.}
#' \item{sentiment}{the sentiment scores \code{data.table} with a \code{"date"} and a \code{"word_count"} column and all
#' lexicon--feature sentiment scores columns.}
#' \item{features}{a \code{character} vector of the different features.}
#' \item{lexicons}{a \code{character} vector of the different lexicons used.}
#' \item{howWithin}{a \code{character} vector to remind how sentiment within documents was aggregated.}
#'
#' @seealso \code{\link[quanteda]{dfm}}, \code{\link[quanteda]{tokens}}
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # sentiment computation based on raw frequency counts
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 1000)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' sent <- compute_sentiment(corpusSample, l, how = "counts")
#'
#' \dontrun{
#' # same sentiment computation based on a user-supplied dfm with default settings
#' dfm <- quanteda::dfm(quanteda::tokens(corpus), verbose = FALSE)
#' sent <- compute_sentiment(corpusSample, l, how = "counts", dfm = dfm)}
#'
#' @export
compute_sentiment <- compiler::cmpfun(.compute_sentiment)
.get_features_sentiment <- function(sent, features, lexNames) {
for (lexicon in lexNames) { # multiply lexicons with features to obtain feature-sentiment scores per lexicon
nms <- paste0(lexicon, "--", features)
sent[, nms] <- sent[[lexicon]] * sent[, features, with = FALSE]
}
sent[, eval(c(lexNames, features)) := NULL][] # remove since replaced by lexicon--feature columns
return(sent)
}
get_features_sentiment <- compiler::cmpfun(.get_features_sentiment)
#' Aggregate textual sentiment across documents and time
#'
#' @author Samuel Borms, Keven Bluteau
#'
#' @description Condense document-level textual sentiment scores into a panel of textual sentiment
#' measures by aggregating across documents and time. This function is called within \code{\link{sento_measures}},
#' applied on the output of \code{\link{compute_sentiment}}.
#'
#' @param toAgg output from a \code{\link{compute_sentiment}} call.
#' @param ctr output from a \code{\link{ctr_agg}} call. The \code{"howWithin"} argument plays no more role.
#'
#' @return A \code{sentomeasures} object.
#'
#' @seealso \code{\link{compute_sentiment}}, \code{\link{ctr_agg}}, \code{\link{sento_measures}}
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # computation of sentiment and aggregation into sentiment measures
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 1000)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' sent <- compute_sentiment(corpusSample, l, how = "counts")
#' ctr <- ctr_agg(howTime = c("linear"), by = "year", lag = 3)
#' sentomeasures <- perform_agg(sent, ctr)
#'
#' @export
perform_agg <- function(toAgg, ctr) {
howDocs <- ctr$howDocs
howTime <- ctr$howTime
do.ignoreZeros <- ctr$do.ignoreZeros
by <- ctr$by
lag <- ctr$lag
fill <- ctr$fill
otherVars <- ctr$other # list or empty
aggDocs <- agg_documents(toAgg, by = by, how = howDocs, do.ignoreZeros = do.ignoreZeros)
sentomeasures <- agg_time(aggDocs, lag = lag, fill = fill, how = howTime, otherVars)
return(sentomeasures)
}
agg_documents <- function(toAgg, by, how = get_hows()$docs, do.ignoreZeros = TRUE) {
features <- toAgg$features
lexNames <- toAgg$lexicons
sent <- toAgg$sentiment
attribWeights <- list(W = NA, B = NA) # list with weights useful in later attribution analysis
# reformat dates so they can be aggregated at the specified 'by' level, and cast to Date format
if (by == "year") {
years <- sapply(stringi::stri_split(sent$date, regex = "-"), "[", 1)
dates <- as.Date(paste0(years, "-01-01"), format = "%Y-%m-%d")
} else if (by == "month") {
months <- unlist(lapply(stringi::stri_split(sent$date, regex = "-"), function(d) return(paste0(d[1:2], collapse = "-"))))
dates <- as.Date(paste0(months, "-01"), format = "%Y-%m-%d")
} else if (by == "week") {
weeks <- ISOweek::ISOweek(sent$date)
dates <- ISOweek::ISOweek2date(paste(weeks, 1, sep = "-")) # get first day of week based on ISO standard
} else {
dates <- as.Date(sent$date, format = "%Y-%m-%d")
}
sent$date <- dates
# ignore documents with zero sentiment in aggregation (if do.ignoreZeros is TRUE)
if (do.ignoreZeros == TRUE)
sent[, names(sent)] <- sent[, names(sent), with = FALSE][, lapply(.SD, function(x) replace(x, which(x == 0), NA))]
# aggregate feature-sentiment per document by date for all lexicon columns
s <- sent[, -1]
if (how == "equal_weight") {
if (do.ignoreZeros == TRUE) {
docsIn <- s[, lapply(.SD, function(x) (x * 1) / x), by = date] # indicator of 1 if document score not equal to NA
weights <- docsIn[, lapply(.SD, function(x) x / sum(x, na.rm = TRUE)), by = date][, -1:-2]
} else {
weights <- s[, w := 1 / .N, by = date][["w"]]
s[, w := NULL]
}
} else if (how == "proportional") { # proportional w.r.t. words in document vs. total words in all documents per date
if (do.ignoreZeros == TRUE) {
docsIn <- s[, lapply(.SD, function(x) (x * word_count) / x), by = date]
weights <- docsIn[, lapply(.SD, function(x) x / sum(x, na.rm = TRUE)), by = date][, -1:-2]
} else {
weights <- s[, list(w = word_count / sum(word_count, na.rm = TRUE)), by = date][["w"]]
}
}
attribWeights[["W"]] <- data.table(id = sent$id, date = sent$date, weights)
sw <- data.table(date = s$date, s[, -1:-2] * weights)
measures <- sw[, lapply(.SD, function(x) sum(x, na.rm = TRUE)), by = date]
sentomeasures <- list(measures = measures,
features = features,
lexicons = lexNames,
time = NA,
by = by,
stats = NA, # zeros replaced by NAs if do.ignoreZeros = TRUE
sentiment = sent,
howWithin = toAgg$howWithin,
howDocs = how,
fill = NA,
do.ignoreZeros = do.ignoreZeros,
attribWeights = attribWeights)
class(sentomeasures) <- c("sentomeasures")
return(sentomeasures)
}
agg_time <- function(sentomeasures, lag, fill, how = get_hows()$time, ...) {
check_class(sentomeasures, "sentomeasures")
dots <- tryCatch(list(...)[[1]], # extract list from list of list
error = function(x) list(...)) # if ... is empty
# construct all weights and check for duplicated names
weights <- setup_time_weights(lag, how, dots)
if (sum(duplicated(colnames(weights))) > 0) {
duplics <- unique(colnames(weights)[duplicated(colnames(weights))])
stop(paste0("Names of weighting schemes are not unique. Following names occur at least twice: ",
paste0(duplics, collapse = ", ")))
}
sentomeasures$attribWeights[["B"]] <- copy(weights)
# apply rolling time window, if not too large, for every weights column and combine all new measures column-wise
if (!(fill %in% "none")) sentomeasures <- fill_measures(sentomeasures, fill = fill)
measures <- sentomeasures$measures
toRoll <- measures[, -1]
n <- nrow(weights)
m <- nrow(measures)
if (n > m)
stop("Rolling time aggregation window (= ", n, ") is too large for number of observations per measure (= ", m, ")")
for (i in 1:ncol(weights)) {
name <- colnames(weights)[i]
add <- RcppRoll::roll_sum(as.matrix(toRoll), n = n, weights = as.vector(weights[, i]),
normalize = FALSE, align = "right", na.rm = TRUE)
colnames(add) <- paste0(colnames(toRoll), "--", name)
if (i == 1) measuresAggTime <- add
else measuresAggTime <- cbind(measuresAggTime, add)
}
measuresAggTime <- as.data.table(measuresAggTime)
if (n > 1) date <- measures$date[-1:-(n-1)]
else date <- measures$date
measuresAggTime$date <- date
measuresAggTime <- setcolorder(measuresAggTime, c("date", colnames(measuresAggTime)[-ncol(measuresAggTime)]))
sentomeasures$measures <- measuresAggTime
sentomeasures$time <- colnames(weights)
sentomeasures$stats <- compute_stats(sentomeasures)
sentomeasures$fill <- fill
return(sentomeasures)
}
#' Set up control for merging sentiment measures
#'
#' @author Samuel Borms
#'
#' @description Sets up control object for the optional merging (additional aggregation) of sentiment measures as
#' done by \code{\link{merge_measures}}.
#'
#' @param sentomeasures a \code{sentomeasures} object created using \code{\link{sento_measures}}. This is necessary to check
#' whether the other input arguments make sense.
#' @param lexicons a \code{list} with unique lexicons to merge at given name, e.g., \cr
#' \code{list(lex12 = c("lex1", "lex2"))}. See \code{sentomeasures$lexicons} for the exact names to use. Use \code{NA} to
#' apply no merging across this dimension.
#' @param features a \code{list} with unique features to merge at given name, e.g., \cr
#' \code{list(feat12 = c("feat1", "feat2"))}. See \code{sentomeasures$features} for the exact names to use. Use \code{NA} to
#' apply no merging across this dimension.
#' @param time a \code{list} with unique time weighting schemes to merge at given name, e.g., \cr
#' \code{list(tw12 = c("tw1", "tw2"))}. See \code{sentomeasures$time} for the exact names to use. Use \code{NA} to
#' apply no merging across this dimension.
#' @param do.keep a \code{logical} indicating if the original sentiment measures should be kept (i.e., the merged
#' sentiment measures will be added to the current sentiment measures as additional indices if \code{do.keep = TRUE}).
#'
#' @return A \code{list} encapsulating the control parameters.
#'
#' @seealso \code{\link{merge_measures}}
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 750)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howTime = c("equal_weight", "linear"), by = "year", lag = 3)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#'
#' # set up a correct control function
#' ctrMerge <- ctr_merge(sentomeasures,
#' time = list(W = c("equal_weight", "linear")),
#' lexicons = list(LEX = c("LM_eng", "HENRY_eng")),
#' features = list(journals = c("wsj", "wapo")),
#' do.keep = TRUE)
#'
#' \dontrun{
#' # produces an informative error message
#' ctrMerge <- ctr_merge(sentomeasures,
#' time = list(W = c("equal_weight", "almon1")),
#' lexicons = list(LEX = c("LM_eng", "HENRY_eng")),
#' features = list(journals = c("notInHere", "wapo")))}
#'
#' @export
ctr_merge <- function(sentomeasures, features = NA, lexicons = NA, time = NA, do.keep = FALSE) {
check_class(sentomeasures, "sentomeasures")
# check if columns to merge exist (missings) and if all merges have at least two columns to combine and are unique (tooFew)
missings <- c()
tooFew <- c()
if (all(!is.na(lexicons))) {
missings <- c(missings, unlist(lexicons)[!(unlist(lexicons) %in% sentomeasures$lexicons)])
for (i in seq_along(lexicons)) {
if (length(lexicons[[i]]) <= 1 | length(unique(lexicons[[i]])) != length(lexicons[[i]]))
tooFew <- c(tooFew, names(lexicons)[i])
}
}
if (all(!is.na(features))) {
missings <- c(missings, unlist(features)[!(unlist(features) %in% sentomeasures$features)])
for (i in seq_along(features)) {
if (length(features[[i]]) <= 1 | length(unique(features[[i]])) != length(features[[i]]))
tooFew <- c(tooFew, names(features)[i])
}
}
if (all(!is.na(time))) {
missings <- c(missings, unlist(time)[!(unlist(time) %in% sentomeasures$time)])
for (i in seq_along(time)) {
if (length(time[[i]]) <= 1 | length(unique(time[[i]])) != length(time[[i]]))
tooFew <- c(tooFew, names(time)[i])
}
}
# assemble warning messages if any
msg1 <- c()
msg2 <- c()
if (length(missings) > 0) {
msg1 <- paste0("Following columns to merge are not found: ",
paste0(missings, collapse = ", "), ".")
warning(msg1)
}
if (length(tooFew) > 0) {
msg2 <- paste0("Following merges have less than two or not all unique columns: ",
paste0(tooFew, collapse = ", "), ".")
warning(msg2)
}
if (length(msg1) > 0 | length((msg2) > 0)) stop("Wrong inputs. See warning messages for specifics.")
ctr <- list(sentomeasures = sentomeasures,
lexicons = lexicons,
features = features,
time = time,
do.keep = do.keep)
return(ctr)
}
#' Merge sentiment measures
#'
#' @author Samuel Borms
#'
#' @description Merge (further aggregate) measures by combining across provided lexicons, features, and time weighting schemes
#' dimensions. The combination occurs by taking the mean of the relevant measures.
#'
#' @param ctr output from a \code{\link{ctr_merge}} call.
#'
#' @return A modified \code{sentomeasures} object, with only the sentiment measures required, including updated information
#' and statistics, but the original sentiment scores \code{data.table} untouched.
#'
#' @seealso \code{\link{ctr_merge}}
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 500)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howTime = c("equal_weight", "linear"), by = "year", lag = 3)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#'
#' # set up control function and perform the merging
#' ctrMerge <- ctr_merge(sentomeasures,
#' time = list(W = c("equal_weight", "linear")),
#' features = list(journals = c("wsj", "wapo")),
#' do.keep = TRUE)
#' sentomeasuresMerged <- merge_measures(ctrMerge)
#'
#' @export
merge_measures <- function(ctr) {
sentomeasures <- ctr$sentomeasures
measures <- sentomeasures$measures
toMerge <- ctr[c("lexicons", "features", "time")]
do.keep <- ctr$do.keep
if (do.keep == TRUE) {
measuresOld <- measures
namesOld <- colnames(measures)
}
# loop over lex(icon), feat(ure) and time lists
for (across in toMerge[!is.na(toMerge)]) {
# loop over set of aggregation levels to merge (combine) into given name (e.g., lex12 = c("lex1", "lex2"))
for (i in seq_along(across)) {
name <- names(across)[i] # e.g. "lex12"
cols <- across[[i]] # e.g. c("lex1", "lex2")
# find all sentiment columns aggregated at one of the 'cols' aggregation levels and stack them into ls
ls <- list()
for (elem in cols) {
sel <- colnames(measures)[stringi::stri_detect(colnames(measures), regex = paste0("\\b", elem, "\\b"))] # exact match
ls[[elem]] <- measures[, sel, with = FALSE, drop = FALSE]
measures <- measures[, !sel, with = FALSE, drop = FALSE]
}
# take element-wise average for every row/column combination across columns to merge
if (ncol(ls[[1]] >= 2)) { # ncol across elements of ls is the same
all <- abind::abind(ls, along = 3)
merged <- apply(all, c(1, 2), mean, na.rm = TRUE)
} else merged <- rowSums(abind::abind(ls, along = 2))
# insert new name at name location of aggregation level (e.g. "lex1--top1" + "lex2--top1" = "lex12--top1")
nms <- stringi::stri_split(colnames(merged), regex = "--") # list
loc <- which(stringi::stri_detect(nms[[1]], regex = elem))[1]
nmsNew <- lapply(nms, function(x) {
x[loc] <- name
return(paste0(x, collapse = "--"))
})
colnames(merged) <- unlist(nmsNew)
measures <- cbind(measures, merged) # add back merged columns for further merging if needed
}
}
# add old unmerged measures to merged measures (if do.keep is TRUE)
if (do.keep == TRUE) measures <- cbind(measures, measuresOld[, !(namesOld %in% colnames(measures)), with = FALSE])
sentomeasures <- update_info(sentomeasures, measures) # update information in sentomeasures object
return(sentomeasures)
}
#' Merge sentiment measures into one global sentiment measure
#'
#' @author Samuel Borms
#'
#' @description Merges all sentiment measures into one global textual sentiment measure based on a set of weights to
#' indicate the importance of each component in the \code{lexicons}, \code{features}, and \code{time} vectors as specified
#' in the input \code{sentomeasures} object. Every measure receives a weight in the global measure equal to the multiplication
#' of the supplied weights of the components it is contained of. The global sentiment measure then corresponds to a
#' weighted average of these weights times the sentiment scores, per date.
#'
#' @details This function returns no \code{sentomeasures} object, however the global sentiment measure as outputted can
#' be added to regressions as an additional variable using the \code{x} argument in the \code{\link{sento_model}} function.
#'
#' @param sentomeasures a \code{sentomeasures} object created using \code{\link{sento_measures}}.
#' @param lexicons a \code{numeric} vector of weights, of size \code{length(sentomeasures$lexicons)}, in the same order
#' and summing to one. By default set to 1, which means equally weighted.
#' @param features a \code{numeric} vector of weights, of size \code{length(sentomeasures$features)}, in the same order
#' and summing to one. By default set to 1, which means equally weighted.
#' @param time a \code{numeric} vector of weights, of size \code{length(sentomeasures$time)}, in the same order and summing
#' to one. By default set to 1, which means equally weighted.
#'
#' @return A \code{data.frame} with the values for the global sentiment measure under the \code{global} column and dates as
#' row names.
#'
#' @seealso \code{\link{sento_model}}
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 1250)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howTime = c("equal_weight", "linear"), by = "year", lag = 3)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#'
#' # merge into one global sentiment measure, with specified weighting for lexicons and features
#' global <- to_global(sentomeasures, lexicons = c(0.40, 0.60),
#' features = c(0.10, 0.20, 0.30, 0.40),
#' time = 1)
#'
#' @export
to_global <- function(sentomeasures, lexicons = 1, features = 1, time = 1) {
check_class(sentomeasures, "sentomeasures")
dims <- list(sentomeasures$lexicons, sentomeasures$features, sentomeasures$time)
n <- sapply(dims, length)
weightsInp <- list(lexicons, features, time)
weights <- sapply(1:3, function(i) {
if (length(weightsInp[[i]]) == 1) w <- as.list(rep(1/n[i], n[i])) # modify weights if equal to default value of 1
else {
w <- as.list(weightsInp[[i]])
if (length(w) != n[i] || sum(unlist(w)) != 1)
stop("All weights must be equal in length to the respective number of components and sum to one.")
}
names(w) <- dims[[i]] # named weight lists
return(w)
})
measures <- sentomeasures$measures
measuresLong <- to_long(measures) # long format
# extract different weights based on how measuresLong is ordered and add a global weights (w) column
wLex <- unlist(weights[[1]][measuresLong[["lexicons"]]])
wFeat <- unlist(weights[[2]][measuresLong[["features"]]])
wTime <- unlist(weights[[3]][measuresLong[["time"]]])
# add a global weights column as the multiplication of the individual weights across the three dimensions per row
measuresLong[, "w" := wLex * wFeat * wTime]
global <- as.data.frame(measuresLong[, list(global = sum(value * w)), by = date])
row.names(global) <- global$date
global$date <- NULL
return(global)
}
#' Select a subset of sentiment measures
#'
#' @author Samuel Borms
#'
#' @description Selects the subset of sentiment measures which include either all of the given selection components combined,
#' or those who's name consist of at least one of the selection components. One can also extract measures within a subset
#' of dates.
#'
#' @param sentomeasures a \code{sentomeasures} object created using \code{\link{sento_measures}}.
#' @param toSelect a \code{character} vector of the lexicon, feature and time weighting scheme names, to indicate which
#' measures need to be selected. By default equal to \code{"all"}, which means no selection of the sentiment measures is made;
#' this may be used if one only wants to extract a subset of dates via the \code{dates} argument.
#' @param do.combine a \code{logical} indicating if only measures for which all (\code{do.combine = TRUE}) or at least one
#' (\code{do.combine = FALSE}) of the selection components should occur in each sentiment measure's name in the subset. If
#' \code{do.combine = TRUE}, the \code{toSelect} argument can only consist of one lexicon, one feature, and one time weighting
#' scheme at maximum.
#' @param dates any expression, in the form of a \code{character} vector, that would correctly evaluate to a \code{logical}
#' vector, features the variable \code{date} and has dates specified as \code{"yyyy-mm-dd"}, e.g.
#' \code{dates = "date >= '2000-01-15'"}. This argument may also be a vector of class \code{Date} which extracts all dates
#' that show up in that vector. See the examples. By default equal to \code{NA}, meaning no subsetting based on dates is done.
#'
#' @return A modified \code{sentomeasures} object, with only the sentiment measures required, including updated information
#' and statistics, but the original sentiment scores \code{data.table} untouched.
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 1000)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howTime = c("equal_weight", "linear"), by = "year", lag = 3)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#'
#' # different selections
#' sel1 <- select_measures(sentomeasures, c("equal_weight"))
#' sel2 <- select_measures(sentomeasures, c("equal_weight", "linear"), do.combine = FALSE)
#' sel3 <- select_measures(sentomeasures, c("linear", "LM_eng"))
#' sel4 <- select_measures(sentomeasures, c("linear", "LM_eng", "wsj", "economy"),
#' do.combine = FALSE)
#' sel5 <- select_measures(sentomeasures, c("linear", "LM_eng"),
#' dates = "date >= '1996-12-31' & date <= '2000-12-31'")
#' d <- seq(as.Date("2000-01-01"), as.Date("2013-12-01"), by = "month")
#' sel6 <- select_measures(sentomeasures, c("linear", "LM_eng"), dates = d)
#'
#' @export
select_measures <- function(sentomeasures, toSelect = "all", do.combine = TRUE, dates = NA) {
check_class(sentomeasures, "sentomeasures")
allOpts <- c(sentomeasures$features, sentomeasures$lexicons, sentomeasures$time)
if ("all" %in% toSelect) {
toSelect <- allOpts
do.combine = FALSE
}
valid <- toSelect %in% allOpts
if (any(!valid)) {
stop("Following components make up none of the sentiment measures: ", paste0(toSelect[!valid], collapse = ', '))
}
if (all(is.na(dates))) measures <- sentomeasures$measures
else if (inherits(dates, "Date")) measures <- sentomeasures$measures[date %in% dates, ]
else measures <- sentomeasures$measures[eval(parse(text = dates)), ]
namesList <- stringi::stri_split(colnames(measures), regex = "--")
if (do.combine == TRUE) fun <- all
else fun <- any
ind <- sapply(namesList, function(x) return(fun(toSelect %in% x)))
if (!any(ind)) {
warning("No appropriate combination is found. Input sentomeasures object is returned.")
return(sentomeasures)
} else ind[1] <- TRUE # include date column
measuresNew <- measures[, ind, with = FALSE]
sentomeasures <- update_info(sentomeasures, measuresNew) # update information in sentomeasures object
return(sentomeasures)
}
#' Plot sentiment measures
#'
#' @author Samuel Borms
#'
#' @method plot sentomeasures
#'
#' @description Straightforward plotting method that shows all sentiment measures from the provided \code{sentomeasures}
#' object in one plot, or the average along one of the lexicons, features and time weighting dimensions. We suggest to make
#' use of the \code{\link{select_measures}} function when you desire to plot only a subset of the sentiment measures.
#'
#' @param x a \code{sentomeasures} object created using \code{\link{sento_measures}}.
#' @param group a value from \code{c("lexicons", "features", "time", "all")}. The first three choices display the average of
#' all measures from the same group, in a different color. The choice \code{"all"} displays every single sentiment measure
#' in a separate color, but this may look visually overwhelming very fast, and can be quite slow.
#' @param ... not used.
#'
#' @return Returns a simple \code{\link{ggplot}} object, which can be added onto (or to alter its default elements) by using
#' the \code{+} operator (see examples). By default, a legend is positioned at the top if there are at maximum twelve line
#' graphs plotted.
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 500)
#' l <- setup_lexicons(lexicons[c("LM_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howTime = c("equal_weight", "linear"), by = "year", lag = 3)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#'
#' # plot sentiment measures
#' plot(sentomeasures)
#' plot(sentomeasures, group = "features")
#'
#' # adjust appearance of plot
#' p <- plot(sentomeasures)
#' p <- p +
#' ggthemes::theme_base() +
#' scale_x_date(name = "month-year") +
#' scale_y_continuous(name = "newName")
#' p
#'
#' @import ggplot2
#' @export
plot.sentomeasures <- function(x, group = "all", ...) {
if (!(group %in% c("lexicons", "features", "time", "all")))
stop("The 'group' argument should be either 'lexicons', 'features', 'time' or 'all'.")
# melt sentiment measures for plotting
sentomeasures <- x
measures <- sentomeasures$measures
if (group == "all") {
measuresMelt <- melt(measures, id.vars = "date", variable.factor = FALSE)
} else {
measuresMelt <- to_long(measures)[, c("date", group, "value"), with = FALSE]
measuresMelt <- measuresMelt[, list(value = mean(value)), by = list(date, variable = eval(parse(text = group)))]
}
measuresMelt <- measuresMelt[order(rank(as.character(variable)))]
legendPos <- ifelse(length(unique(measuresMelt[["variable"]])) <= 12, "top", "none")
p <- ggplot(data = measuresMelt, aes(x = date, y = value, color = variable)) +
geom_line() +
geom_hline(yintercept = 0, size = 0.50, linetype = "dotted") +
scale_x_date(name = "Date", date_labels = "%m-%Y") +
scale_y_continuous(name = "Sentiment") +
ggthemes::theme_tufte(base_size = 12) +
theme(legend.title = element_blank(), legend.position = legendPos)
return(p)
}
#' Add and fill missing dates
#'
#' @author Samuel Borms
#'
#' @description Adds missing dates between earliest and latest date of a \code{sentomeasures} object, such that time series
#' is continuous date-wise. Fills in these dates with either 0, the respective latest non-missing value or \code{NA}.
#'
#' @param sentomeasures a \code{sentomeasures} object created using \code{\link{sento_measures}}.
#' @param fill an element of \code{c("zero", "latest", NA)}; the first and last assume missing dates represent zero sentiment,
#' the second assumes missing dates represent constant sentiment.
#'
#' @return A modified \code{sentomeasures} object.
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, sample = 500)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howTime = c("equal_weight", "linear"), by = "year", lag = 3)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#'
#' # fill measures
#' f1 <- fill_measures(sentomeasures)
#' f2 <- fill_measures(sentomeasures, fill = "latest")
#' f3 <- fill_measures(sentomeasures, fill = NA)
#'
#' @export
fill_measures <- function(sentomeasures, fill = "zero") {
check_class(sentomeasures, "sentomeasures")
by <- sentomeasures$by
measures <- sentomeasures$measures
dates <- measures$date
ts <- seq(dates[1], dates[length(dates)], by = by) # continuous date series
dt <- data.table(date = ts)
# join and fill as provided to new measures
measuresFill <- merge(dt, measures, by = "date", all = TRUE) # fills with NA
if (is.na(fill)) {
sentomeasures$measures <- measuresFill
return(sentomeasures)
} else if (fill == "zero") {
measuresFill[is.na(measuresFill)] <- 0
} else if (fill == "latest") {
measuresFill <- zoo::na.locf(measuresFill)
} else stop("Input variable 'fill' should be either 'zero', 'latest' or NA.")
measuresFill <- data.table(date = ts, measuresFill[, lapply(.SD, as.numeric), .SDcols = colnames(measures)[-1]])
sentomeasures$measures <- measuresFill
return(sentomeasures)
}
#' Scaling and centering of sentiment measures
#'
#' @author Samuel Borms
#'
#' @description Scales and centers the sentiment measures from a \code{sentomeasures} object, column-per-column. By default,
#' the measures are normalized. \code{NA}s are removed first.
#'
#' @param x a \code{sentomeasures} object created using \code{\link{sento_measures}}.
#' @param center a \code{logical}, see documentation for the generic \code{\link{scale}}.
#' @param scale a \code{logical}, see documentation for the generic \code{\link{scale}}.
#'
#' @return A modified \code{sentomeasures} object, with the measures replaced by the scaled measures as well as updated
#' statistics.
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 500)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howTime = c("equal_weight", "linear"), by = "year", lag = 3)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#'
#' # scale sentiment measures
#' scaled <- scale(sentomeasures)
#'
#' @export
scale.sentomeasures <- function(x, center = TRUE, scale = TRUE) {
sentomeasures <- x
dates <- sentomeasures$measures[, 1]
measures <- sentomeasures$measures[, -1] # drop date column
measuresNorm <- scale(measures, center, scale)
sentomeasures$measures <- data.table(dates, measuresNorm)
sentomeasures$stats <- compute_stats(sentomeasures)
return(sentomeasures)
}
#' Extract documents related to sentiment peaks
#'
#' @author Samuel Borms
#'
#' @description This function gives the dates and documents for which aggregated sentiment was
#' most extreme (lowest, highest or both in absolute terms).
#'
#' @param sentomeasures a \code{sentomeasures} object created using \code{\link{sento_measures}}.
#' @param sentocorpus the \code{sentocorpus} object created with \code{\link{sento_corpus}}, used for the construction
#' of the input \code{sentomeasures} object.
#' @param n a \code{numeric} value to indicate the number of documents to extract. The associated dates are not
#' necessarily unique, given that, for example, extreme sentiment may occur on only one date but for different sentiment
#' measures.
#' @param type a \code{character} value, either \code{"pos"}, \code{"neg"} or \code{"both"}; respectively to look
#' for the \code{n} most positive, most negative or most extreme (in absolute terms) sentiment occurrences.
#' @param do.average a \code{logical} to indicate whether peaks should be selected based on the average sentiment
#' value per date. If \code{do.average = TRUE}, \code{n} unique dates are guaranteed (cf. argument \code{n}).
#'
#' @return A \code{list} with as elements \code{"dates"}, \code{"ids"} and \code{"documents"}, corresponding to
#' the \code{n} sentiment peaks.
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, sample = 500)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howTime = c("equal_weight", "linear"), by = "month", lag = 3)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#'
#' # extract the peaks
#' peaksAbs <- extract_peakdocs(sentomeasures, corpus, n = 5)
#' peaksPos <- extract_peakdocs(sentomeasures, corpus, n = 5, type = "pos")
#' peaksNeg <- extract_peakdocs(sentomeasures, corpus, n = 5, type = "neg")
#'
#' @export
extract_peakdocs <- function(sentomeasures, sentocorpus, n = 10, type = "both", do.average = FALSE) {
check_class(sentomeasures, "sentomeasures")
measures <- sentomeasures$measures[, -1]
m <- dim(measures)[2]
if (n >= (dim(measures)[1] * m)) stop("The parameter 'n' exceeds the total number of sentiment values.")
if (do.average == TRUE) {
measures <- rowMeans(measures, na.rm = TRUE)
dates <- sentomeasures$measures$date
} else dates <- rep(sentomeasures$measures$date, m)
if (type == "both") measures <- abs(measures)
indx <- order(measures, decreasing = ifelse(type == "neg", FALSE, TRUE))[1:(m * n)]
peakDates <- unique(dates[indx])[1:n]
ids <- sentomeasures$sentiment[date %in% peakDates, ]$id # get document IDs
peakDocs <- quanteda::texts(sentocorpus)[row.names(sentocorpus$documents) %in% ids]
peaks <- list(dates = peakDates, ids = ids, docs = peakDocs)
return(peaks)
}
#' Differencing of sentiment measures
#'
#' @author Samuel Borms
#'
#' @description Differences the sentiment measures from a \code{sentomeasures} object.
#'
#' @param x a \code{sentomeasures} object created using \code{\link{sento_measures}}.
#' @param lag a \code{numeric}, see documentation for the generic \code{\link{diff}}.
#' @param differences a \code{numeric}, see documentation for the generic \code{\link{diff}}.
#' @param ... not used.
#'
#' @return A modified \code{sentomeasures} object, with the measures replaced by the differenced measures as well as updated
#' statistics.
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 500)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howTime = c("equal_weight", "linear"), by = "year", lag = 3)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#'
#' # first-order difference sentiment measures with a lag of two
#' diffed <- diff(sentomeasures, lag = 2, differences = 1)
#'
#' @export
diff.sentomeasures <- function(x, lag = 1, differences = 1, ...) {
sentomeasures <- x
dates <- sentomeasures$measures[, 1][-1:-(lag * differences)]
measures <- sentomeasures$measures[, -1] # drop date column
measuresDiff <- diff(as.matrix(measures), lag = lag, differences = differences)
sentomeasures$measures <- data.table(dates, measuresDiff)
sentomeasures$stats <- compute_stats(sentomeasures)
return(sentomeasures)
}
| /R/sentomeasures.R | no_license | trinker/sentometrics | R | false | false | 58,939 | r |
#' Set up control for aggregation into sentiment measures
#'
#' @author Samuel Borms, Keven Bluteau
#'
#' @description Sets up control object for aggregation of document-level textual sentiment into textual
#' sentiment measures (indices).
#'
#' @details For currently available options on how aggregation can occur (via the \code{howWithin},
#' \code{howDocs} and \code{howTime} arguments), call \code{\link{get_hows}}.
#'
#' @param howWithin a single \code{character} vector defining how aggregation within documents will be performed. Should
#' \code{length(howWithin) > 1}, the first element is used. For currently available options on how aggregation can occur, see
#' \code{\link{get_hows}()$words}.
#' @param howDocs a single \code{character} vector defining how aggregation across documents per date will be performed.
#' Should \code{length(howDocs) > 1}, the first element is used. For currently available options on how aggregation can occur,
#' see \code{\link{get_hows}()$docs}.
#' @param howTime a \code{character} vector defining how aggregation across dates will be performed. More than one choice
#' is possible. For currently available options on how aggregation can occur, see \code{\link{get_hows}()$time}.
#' @param do.ignoreZeros a \code{logical} indicating whether zero sentiment values have to be ignored in the determination of
#' the document weights while aggregating across documents. By default \code{do.ignoreZeros = TRUE}, such that documents with
#' an exact score of zero are considered irrelevant.
#' @param by a single \code{character} vector, either \code{"day", "week", "month"} or \code{"year"}, to indicate at what
#' level the dates should be aggregated. Dates are displayed as the first day of the period, if applicable (e.g.,
#' \code{"2017-03-01"} for March 2017).
#' @param lag a single \code{integer} vector, being the time lag to be specified for aggregation across time. By default
#' equal to \code{1L}, meaning no aggregation across time.
#' @param fill a single \code{character} vector, one of \code{c("zero", "latest", "none")}, to control how missing
#' sentiment values across the continuum of dates considered are added. This impacts the aggregation across time,
#' applying the \code{\link{fill_measures}} function before aggregating, except if \code{fill = "none"}. By default equal to
#' \code{"zero"}, which sets the scores (and thus also the weights) of the added dates to zero in the time aggregation.
#' @param alphasExp a \code{numeric} vector of all exponential smoothing factors to calculate weights for, used if
#' \code{"exponential" \%in\% howTime}. Values should be between 0 and 1 (both excluded).
#' @param ordersAlm a \code{numeric} vector of all Almon polynomial orders to calculate weights for, used if
#' \code{"almon" \%in\% howTime}.
#' @param do.inverseAlm a \code{logical} indicating if for every Almon polynomial its inverse has to be added, used
#' if \code{"almon" \%in\% howTime}.
#' @param do.normalizeAlm a \code{logical} indicating if every Almon polynomial weights column should sum to one, used if
#' \code{"almon" \%in\% howTime}.
#' @param weights an optional own weighting scheme, always used if provided as a \code{data.frame} with the number of rows
#' equal to the desired \code{lag}. The automatic Almon polynomials are created sequentially; if the user wants only specific
#' of such time weighting series it can use \code{\link{almons}}, select the columns it requires, combine it into a
#' \code{data.frame} and supply it under this argument (see examples).
#' @param dfm optional; see \code{\link{compute_sentiment}}.
#'
#' @return A \code{list} encapsulating the control parameters.
#'
#' @seealso \code{\link{fill_measures}}, \code{\link{almons}}, \code{\link{compute_sentiment}}
#'
#' @examples
#' # simple control function
#' ctr1 <- ctr_agg(howTime = "linear", by = "year", lag = 3)
#'
#' # more elaborate control function (particular attention to time weighting schemes)
#' ctr2 <- ctr_agg(howWithin = "tf-idf",
#' howDocs = "proportional",
#' howTime = c("equal_weight", "linear", "almon", "exponential", "own"),
#' do.ignoreZeros = TRUE,
#' by = "day",
#' lag = 20,
#' ordersAlm = 1:3,
#' do.inverseAlm = TRUE,
#' do.normalizeAlm = TRUE,
#' alphasExp = c(0.20, 0.50, 0.70, 0.95),
#' weights = data.frame(myWeights = runif(20)))
#'
#' # set up control function with one linear and two chosen Almon weighting schemes
#' a <- almons(n = 70, orders = 1:3, do.inverse = TRUE, do.normalize = TRUE)
#' ctr3 <- ctr_agg(howTime = c("linear", "own"), by = "year", lag = 70,
#' weights = data.frame(a1 = a[, 1], a2 = a[, 3]))
#'
#' @export
ctr_agg <- function(howWithin = "proportional", howDocs = "equal_weight", howTime = "equal_weight",
do.ignoreZeros = TRUE, by = "day", lag = 1L, fill = "zero", alphasExp = seq(0.1, 0.5, by = 0.1),
ordersAlm = 1:3, do.inverseAlm = TRUE, do.normalizeAlm = TRUE, weights = NULL, dfm = NULL) {
if (length(howWithin) > 1) howWithin <- howWithin[1]
if (length(howDocs) > 1) howDocs <- howDocs[1]
# check if provided aggregation specifications are supported
hows <- get_hows() # get all supported options for each aggregation level
warned <- 0
if (!(howWithin %in% hows[["words"]])) {
warning(paste0(howWithin, " is no current option for aggregation across words."))
warned <- warned + 1
}
if (!(howDocs %in% hows[["docs"]])) {
warning(paste0(howDocs, " is no current option for aggregation across docs."))
warned <- warned + 1
}
if (!all(howTime %in% hows[["time"]])) {
warning(paste0(howTime[!(howTime %in% hows[["time"]])], " is no current option for aggregation across time. "))
warned <- warned + 1
}
if ("own" %in% howTime & is.null(weights)) {
warning(paste0("Provide a 'weights' data.frame if 'own' provided as an option in 'howTime'."))
warned <- warned + 1
}
if (!("own" %in% howTime) & is.data.frame(weights)) {
howTime <- c(howTime, "own")
warning(paste0("The option 'own' is added to 'howTime' since a valid (not NULL) 'weights' data.frame was supplied."))
}
if ("own" %in% howTime) {
if (lag != nrow(weights)) {
lag <- nrow(weights)
warning("Argument 'lag' is set equal to the number of rows in the 'weights' data.frame.")
}
}
if (max(alphasExp) >= 1 & min(alphasExp) <= 0) {
warning("Values in 'alphasExp' should be between 0 and 1 (both excluded).")
warned <- warned + 1
}
if (lag <= 0) {
warning("Argument 'lag' should be greater than zero.")
warned <- warned + 1
}
if (!(by %in% c("year", "month", "week", "day"))) {
warning(paste0(by, " is no current 'by' option."))
warned <- warned + 1
}
if (!(fill %in% c("zero", "latest", "none"))) {
warning(paste0(fill, " is no current 'fill' option."))
warned <- warned + 1
}
if (!is.null(dfm) & !quanteda::is.dfm(dfm)) {
warning("The 'dfm' argument should pass quanteda::is.dfm(dfm) when it is not equal to NULL.")
warned <- warned + 1
}
if (warned > 0) stop("Wrong inputs. See warning messages for specifics.")
other <- list(alphasExp = alphasExp, ordersAlm = ordersAlm, do.inverseAlm = do.inverseAlm,
do.normalizeAlm = do.normalizeAlm, weights = weights)
ctr <- list(howWithin = howWithin,
howDocs = howDocs,
howTime = howTime,
do.ignoreZeros = do.ignoreZeros,
by = by,
lag = lag,
fill = fill,
other = other)
return(ctr)
}
#' One-way road towards a sentomeasures object
#'
#' @author Samuel Borms, Keven Bluteau
#'
#' @description Wrapper function which assembles calls to \code{\link{compute_sentiment}} and \code{\link{perform_agg}}, and
#' includes the input \code{sentocorpus} and computed sentiment scores in its output. Serves as the most direct way towards a
#' panel of textual sentiment measures as a \code{sentomeasures} object.
#'
#' @param sentocorpus a \code{sentocorpus} object created with \code{\link{sento_corpus}}.
#' @param lexicons output from a \code{\link{setup_lexicons}} call.
#' @param ctr output from a \code{\link{ctr_agg}} call.
#'
#' @return A \code{sentomeasures} object, which is a \code{list} containing:
#' \item{measures}{a \code{data.table} with a \code{"date"} column and all textual sentiment measures as remaining columns.}
#' \item{features}{a \code{character} vector of the different features.}
#' \item{lexicons}{a \code{character} vector of the different lexicons used.}
#' \item{time}{a \code{character} vector of the different time weighting schemes used.}
#' \item{by}{a single \code{character} vector specifying the time interval of aggregation used.}
#' \item{stats}{a \code{data.frame} with a series of elementary statistics (mean, standard deviation, maximum, minimum, and
#' average correlation with all other measures) for each individual sentiment measure.}
#' \item{sentiment}{the sentiment scores \code{data.table} with \code{"date"}, \code{"word_count"} and lexicon--feature sentiment
#' scores columns.
#' If \code{ctr$do.ignoreZeros = TRUE}, all zeros are replaced by \code{NA}.}
#' \item{howWithin}{a single \code{character} vector to remind how sentiment within documents was aggregated.}
#' \item{howDocs}{a single \code{character} vector to remind how sentiment across documents was aggregated.}
#' \item{fill}{a single \code{character} vector that specifies if and how missing dates have been added before
#' aggregation across time was carried out.}
#' \item{do.ignoreZeros}{a single \code{character} vector to remind if documents with zero sentiment have been ignored in the
#' within-document aggregation.}
#' \item{attribWeights}{a \code{list} of document and time weights used in the \code{\link{retrieve_attributions}} function.
#' Serves further no direct purpose.}
#'
#' @seealso \code{\link{compute_sentiment}}, \code{\link{perform_agg}}
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 750)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howWithin = "tf-idf",
#' howDocs = "proportional",
#' howTime = c("equal_weight", "linear", "almon"),
#' by = "month",
#' lag = 3,
#' ordersAlm = 1:3,
#' do.inverseAlm = TRUE,
#' do.normalizeAlm = TRUE)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#' summary(sentomeasures)
#'
#' @import data.table
#' @export
sento_measures<- function(sentocorpus, lexicons, ctr) {
check_class(sentocorpus, "sentocorpus")
toAgg <- compute_sentiment(sentocorpus, lexicons, how = ctr$howWithin)
sentomeasures <- perform_agg(toAgg, ctr)
return(sentomeasures)
}
#' @export
summary.sentomeasures <- function(object, ...) {
sentomeasures <- object
freq <- c("daily", "weekly", "monthly", "yearly")[c("day", "week", "month", "year") %in% sentomeasures$by]
cat("This sentomeasures object contains", dim(sentomeasures$measures)[2] - 1, "textual sentiment time series",
"with", dim(sentomeasures$measures)[1], "observations each,", "at a", freq, "frequency.", "\n")
cat("The corpus has following features:", sentomeasures$features, "\n")
cat("\n")
cat("Following lexicons were used to calculate sentiment:", sentomeasures$lexicons, "\n")
cat("Following scheme was applied for aggregation within documents:", sentomeasures$howWithin, "\n")
cat("Following scheme was applied for aggregation across documents:", sentomeasures$howDocs, "\n")
cat("Following schemes were applied for aggregation across time:", sentomeasures$time, "\n")
cat("\n")
cat("Aggregate statistics:", "\n")
print(round(rowMeans(sentomeasures$stats), 5))
}
#' @export
print.sentomeasures <- function(x, ...) {
sentomeasures <- x
n <- dim(sentomeasures$measures)[2] - 1
m <- dim(sentomeasures$measures)[1]
cat("A sentomeasures object that carries with it", n, "distinct textual sentiment time series of", m, "observations each.")
}
#' Set up lexicons (and valence word list) for use in sentiment analysis
#'
#' @author Samuel Borms
#'
#' @description Structures provided lexicons and potentially integrates valence words. One can also provide (part of) the
#' built-in lexicons from \code{data("lexicons")} or a valence word list from \code{data("valence")} as an argument.
#' Makes use of the \code{\link[sentimentr]{as_key}} function from the \pkg{sentimentr} package to make the output coherent
#' and check for duplicates.
#'
#' @param lexiconsIn a named \code{list} of (raw) lexicons, each element being a \code{data.frame} or a \code{data.table} with
#' respectively a words column and a polarity score column. Alternatively, a subset of the already formatted built-in lexicons
#' accessible via \code{lexicons} can be declared too, as part of the same list input. If only (some of) the package built-in
#' lexicons want to be used (with \emph{no} valence shifters), one can simply supply \code{lexicons[c(...)]} as an argument to
#' either \code{\link{sento_measures}} or \code{\link{compute_sentiment}}. However, it is strongly recommended to pass all
#' lexicons (and a valence word list) to this function first, in any case.
#' @param valenceIn a single valence word list as a \code{data.frame} or a \code{data.table} with respectively a words column,
#' a type column (\code{1} for negators, \code{2} for amplifiers/intensifiers, and \code{3} for deamplifiers/downtoners) and a
#' score column. Suggested scores are -1, 2, and 0.5 respectively, and should be the same within each type. This argument can
#' also be one of the already formatted built-in valence word lists accessible via \code{valence}. If \code{NULL}, no valence
#' word list is part of this function's output, nor will it applied in the sentiment analysis.
#' @param do.split a \code{logical} that if \code{TRUE} splits every lexicon into a separate positive polarity and negative
#' polarity lexicon.
#'
#' @return A \code{list} with each lexicon as a separate element according to its name, as a \code{data.table}, and optionally
#' an element named \code{valence} that comprises the valence words. Every \code{x} column contains the words, every \code{y}
#' column contains the polarity score, and for the valence word list, \code{t} contains the word type. If a valence word list
#' is provided, all lexicons are expanded by copying the respective lexicon, and changing the words and scores according to
#' the valence word type: "NOT_" is added for negators, "VERY_" is added for amplifiers and "HARDLY_" is added for
#' deamplifiers. Lexicon scores are multiplied by -1, 2 and 0.5 by default, respectively, or the first value of the scores
#' column of the valence word list.
#'
#' @seealso \code{\link[sentimentr]{as_key}}
#'
#' @examples
#' data("lexicons")
#' data("valence")
#'
#' # sets up output list straight from built-in word lists including valence words
#' l1 <- c(lexicons[c("LM_eng", "HENRY_eng")], valence[["eng"]])
#'
#' # including a self-made lexicon, with and without valence shifters
#' lexIn <- c(list(myLexicon = data.table(w = c("nice", "boring"), s = c(2, -1))),
#' lexicons[c("GI_eng")])
#' valIn <- valence[["valence_eng"]]
#' l2 <- setup_lexicons(lexIn)
#' l3 <- setup_lexicons(lexIn, valIn)
#' l4 <- setup_lexicons(lexIn, valIn, do.split = TRUE)
#'
#' \dontrun{
#' # include lexicons from lexicon package
#' library("lexicon")
#' lexIn2 <- list(hul = lexicon::hash_sentiment_huliu, joc = lexicon::hash_sentiment_jockers)
#' l5 <- setup_lexicons(c(lexIn, lexIn2), valIn)}
#'
#' @export
setup_lexicons <- function(lexiconsIn, valenceIn = NULL, do.split = FALSE) {
if (!is.list(lexiconsIn))
stop("The 'lexiconsIn' input should be a list.")
if (!is.data.frame(valenceIn) & !is.null(valenceIn))
stop("The 'valenceIn' argument should be a data.table or data.frame if not NULL.")
if (any(is.na(names(lexiconsIn))))
stop("At least one lexicon's name is NA. Please provide proper list names.")
# check for duplicated lexicon names
if (sum(duplicated(names(lexiconsIn))) > 0) {
duplics <- unique(names(lexiconsIn[duplicated(names(lexiconsIn))]))
stop(paste0("Names of lexicons are not unique. Following names occur at least twice: ",
paste0(duplics, collapse = ", ")))
}
lexNames <- names(lexiconsIn)
# convert to sentimentr format while supressing warnings on removal of duplicated values
lexicons <- suppressWarnings(lapply(lexiconsIn, sentimentr::as_key, comparison = NULL))
lexicons <- lapply(lexicons, function(x) {names(x) <- c("x", "y"); return(x)})
names(lexicons) <- lexNames
if (!is.null(valenceIn)) {
names(valenceIn) <- c("x", "t", "y")
valTypes <- unique(valenceIn$t)
scores <- c(valenceIn[valenceIn$t == 1, ]$y[1], valenceIn[valenceIn$t == 2, ]$y[1], valenceIn[valenceIn$t == 3, ]$y[1])
lexicons <- expand_lexicons(lexicons, types = valTypes, scores = scores)
}
# split each lexicon into a positive and a negative polarity words only lexicon
if (do.split == TRUE) {
lexiconsPos <- lapply(lexicons, function(lex) return(lex[lex$y > 0]))
names(lexiconsPos) <- paste0(names(lexicons), "_POS")
lexiconsNeg <- lapply(lexicons, function(lex) return(lex[lex$y < 0]))
names(lexiconsNeg) <- paste0(names(lexicons), "_NEG")
lexicons <- c(lexiconsPos, lexiconsNeg)
}
if (!is.null(valenceIn)) {
lexicons[["valence"]] <- valenceIn[!duplicated(valenceIn$x), ]
}
return(lexicons)
}
.compute_sentiment <- function(sentocorpus, lexicons, how = get_hows()$words, dfm = NULL) {
check_class(sentocorpus, "sentocorpus")
if (length(how) > 1) how <- how[1]
if ("valence" %in% names(lexicons)) {
cat("Modify corpus to account for valence words... ")
quanteda::texts(sentocorpus) <- include_valence(quanteda::texts(sentocorpus), lexicons[["valence"]])
cat("Done.", "\n")
}
lexNames <- names(lexicons)[names(lexicons) != "valence"]
features <- names(quanteda::docvars(sentocorpus))[-1] # drop date column
cat("Compute sentiment... ")
# frequency-based document-feature matrix (rows are corpus ids, columns are words)
if (is.null(dfm)) {
dfm <- quanteda::dfm(quanteda::tokens(sentocorpus, remove_punct = TRUE, remove_numbers = TRUE,
remove_symbols = TRUE, remove_separators = TRUE), verbose = FALSE)
} else if (!quanteda::is.dfm(dfm))
stop("The 'dfm' argument should pass quanteda::is.dfm(dfm).")
if (how == "counts" || how == "proportional") {
fdm <- quanteda::t(dfm) # feature-document matrix
} else if (how == "tf-idf") {
weights <- quanteda::tfidf(dfm, scheme_tf = "prop")
fdmWeighted <- quanteda::t(weights)
} else stop("Please select an appropriate aggregation 'how'.")
s <- as.data.table(matrix(0, nrow = quanteda::ndoc(sentocorpus), ncol = length(lexNames)))
names(s) <- lexNames
allWords <- quanteda::featnames(dfm)
wCounts <- quanteda::rowSums(dfm, na.rm = TRUE)
for (lexicon in lexNames) { # locate polarized words and set weights to their polarity or keep at zero
lexWords <- lexicons[[lexicon]]$x
lexScores <- lexicons[[lexicon]]$y
names(lexScores) <- lexWords
allScores <- rep(0, length(allWords))
polInd <- allWords %in% lexWords
allScores[polInd] <- lexScores[allWords[polInd]]
names(allScores) <- allWords
if (how == "counts") {
scores <- quanteda::rowSums(quanteda::t(fdm * allScores))
} else if (how == "proportional") {
scores <- quanteda::rowSums(quanteda::t(fdm * allScores)) / wCounts
} else scores <- quanteda::rowSums(quanteda::t(fdmWeighted * allScores))
scores[is.na(scores)] <- 0 # set NA/NaN sentiment to 0 (e.g., if document contains no words)
s[, (lexicon) := scores]
}
# structure: date - feature1 - ... - word_count - lexicon1 (sentiment) - ...
s <- as.data.table(cbind(id = quanteda::docnames(sentocorpus), quanteda::docvars(sentocorpus), word_count = wCounts, s))
# compute feature-sentiment per document for all lexicons and order by date
sent <- get_features_sentiment(s, features, lexNames)
sent <- sent[order(date)]
cat("Done.", "\n")
sentOut <- list(corpus = sentocorpus, # not the same as input corpus if accounted for valence shifters
sentiment = sent,
features = features,
lexicons = lexNames,
howWithin = how)
return(sentOut)
}
#' Compute document-level sentiment across features and lexicons
#'
#' @author Samuel Borms
#'
#' @description Given a corpus of texts, computes sentiment per document using the bag-of-words approach
#' based on the lexicons provided and a choice of aggregation across words per document. Relies partly on the
#' \pkg{quanteda} package. The scores computed are net sentiment (sum of positive minus sum of negative scores).
#'
#' @details
#' For a separate calculation of positive (resp. negative) sentiment, one has to provide distinct positive (resp. negative)
#' lexicons. This can be done using the \code{do.split} option in the \code{\link{setup_lexicons}} function, which splits out
#' the lexicons into a positive and a negative polarity counterpart. \code{NA}s are converted to 0, under the assumption that
#' this is equivalent to no sentiment.
#'
#' @param sentocorpus a \code{sentocorpus} object created with \code{\link{sento_corpus}}.
#' @param lexicons output from a \code{\link{setup_lexicons}} call.
#' @param how a single \code{character} vector defining how aggregation within documents should be performed. For currently
#' available options on how aggregation can occur, see \code{\link{get_hows}()$words}.
#' @param dfm optional; an output from a \pkg{quanteda} \code{\link[quanteda]{dfm}} call, such that users can specify their
#' own tokenization scheme (via \code{\link[quanteda]{tokens}}) as well as other parameters related to the construction of
#' a document-feature matrix (dfm). By default, a dfm is created based on a tokenization that removes punctuation, numbers,
#' symbols and separators. We suggest to stick to unigrams, as the remainder of the sentiment computation and built-in
#' lexicons assume the same.
#'
#' @return A \code{list} containing:
#' \item{corpus}{the supplied \code{sentocorpus} object; the texts are altered if valence shifters are part of the lexicons.}
#' \item{sentiment}{the sentiment scores \code{data.table} with a \code{"date"} and a \code{"word_count"} column and all
#' lexicon--feature sentiment scores columns.}
#' \item{features}{a \code{character} vector of the different features.}
#' \item{lexicons}{a \code{character} vector of the different lexicons used.}
#' \item{howWithin}{a \code{character} vector to remind how sentiment within documents was aggregated.}
#'
#' @seealso \code{\link[quanteda]{dfm}}, \code{\link[quanteda]{tokens}}
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # sentiment computation based on raw frequency counts
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 1000)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' sent <- compute_sentiment(corpusSample, l, how = "counts")
#'
#' \dontrun{
#' # same sentiment computation based on a user-supplied dfm with default settings
#' dfm <- quanteda::dfm(quanteda::tokens(corpus), verbose = FALSE)
#' sent <- compute_sentiment(corpusSample, l, how = "counts", dfm = dfm)}
#'
#' @export
compute_sentiment <- compiler::cmpfun(.compute_sentiment)
.get_features_sentiment <- function(sent, features, lexNames) {
for (lexicon in lexNames) { # multiply lexicons with features to obtain feature-sentiment scores per lexicon
nms <- paste0(lexicon, "--", features)
sent[, nms] <- sent[[lexicon]] * sent[, features, with = FALSE]
}
sent[, eval(c(lexNames, features)) := NULL][] # remove since replaced by lexicon--feature columns
return(sent)
}
get_features_sentiment <- compiler::cmpfun(.get_features_sentiment)
#' Aggregate textual sentiment across documents and time
#'
#' @author Samuel Borms, Keven Bluteau
#'
#' @description Condense document-level textual sentiment scores into a panel of textual sentiment
#' measures by aggregating across documents and time. This function is called within \code{\link{sento_measures}},
#' applied on the output of \code{\link{compute_sentiment}}.
#'
#' @param toAgg output from a \code{\link{compute_sentiment}} call.
#' @param ctr output from a \code{\link{ctr_agg}} call. The \code{"howWithin"} argument plays no more role.
#'
#' @return A \code{sentomeasures} object.
#'
#' @seealso \code{\link{compute_sentiment}}, \code{\link{ctr_agg}}, \code{\link{sento_measures}}
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # computation of sentiment and aggregation into sentiment measures
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 1000)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' sent <- compute_sentiment(corpusSample, l, how = "counts")
#' ctr <- ctr_agg(howTime = c("linear"), by = "year", lag = 3)
#' sentomeasures <- perform_agg(sent, ctr)
#'
#' @export
perform_agg <- function(toAgg, ctr) {
howDocs <- ctr$howDocs
howTime <- ctr$howTime
do.ignoreZeros <- ctr$do.ignoreZeros
by <- ctr$by
lag <- ctr$lag
fill <- ctr$fill
otherVars <- ctr$other # list or empty
aggDocs <- agg_documents(toAgg, by = by, how = howDocs, do.ignoreZeros = do.ignoreZeros)
sentomeasures <- agg_time(aggDocs, lag = lag, fill = fill, how = howTime, otherVars)
return(sentomeasures)
}
agg_documents <- function(toAgg, by, how = get_hows()$docs, do.ignoreZeros = TRUE) {
features <- toAgg$features
lexNames <- toAgg$lexicons
sent <- toAgg$sentiment
attribWeights <- list(W = NA, B = NA) # list with weights useful in later attribution analysis
# reformat dates so they can be aggregated at the specified 'by' level, and cast to Date format
if (by == "year") {
years <- sapply(stringi::stri_split(sent$date, regex = "-"), "[", 1)
dates <- as.Date(paste0(years, "-01-01"), format = "%Y-%m-%d")
} else if (by == "month") {
months <- unlist(lapply(stringi::stri_split(sent$date, regex = "-"), function(d) return(paste0(d[1:2], collapse = "-"))))
dates <- as.Date(paste0(months, "-01"), format = "%Y-%m-%d")
} else if (by == "week") {
weeks <- ISOweek::ISOweek(sent$date)
dates <- ISOweek::ISOweek2date(paste(weeks, 1, sep = "-")) # get first day of week based on ISO standard
} else {
dates <- as.Date(sent$date, format = "%Y-%m-%d")
}
sent$date <- dates
# ignore documents with zero sentiment in aggregation (if do.ignoreZeros is TRUE)
if (do.ignoreZeros == TRUE)
sent[, names(sent)] <- sent[, names(sent), with = FALSE][, lapply(.SD, function(x) replace(x, which(x == 0), NA))]
# aggregate feature-sentiment per document by date for all lexicon columns
s <- sent[, -1]
if (how == "equal_weight") {
if (do.ignoreZeros == TRUE) {
docsIn <- s[, lapply(.SD, function(x) (x * 1) / x), by = date] # indicator of 1 if document score not equal to NA
weights <- docsIn[, lapply(.SD, function(x) x / sum(x, na.rm = TRUE)), by = date][, -1:-2]
} else {
weights <- s[, w := 1 / .N, by = date][["w"]]
s[, w := NULL]
}
} else if (how == "proportional") { # proportional w.r.t. words in document vs. total words in all documents per date
if (do.ignoreZeros == TRUE) {
docsIn <- s[, lapply(.SD, function(x) (x * word_count) / x), by = date]
weights <- docsIn[, lapply(.SD, function(x) x / sum(x, na.rm = TRUE)), by = date][, -1:-2]
} else {
weights <- s[, list(w = word_count / sum(word_count, na.rm = TRUE)), by = date][["w"]]
}
}
attribWeights[["W"]] <- data.table(id = sent$id, date = sent$date, weights)
sw <- data.table(date = s$date, s[, -1:-2] * weights)
measures <- sw[, lapply(.SD, function(x) sum(x, na.rm = TRUE)), by = date]
sentomeasures <- list(measures = measures,
features = features,
lexicons = lexNames,
time = NA,
by = by,
stats = NA, # zeros replaced by NAs if do.ignoreZeros = TRUE
sentiment = sent,
howWithin = toAgg$howWithin,
howDocs = how,
fill = NA,
do.ignoreZeros = do.ignoreZeros,
attribWeights = attribWeights)
class(sentomeasures) <- c("sentomeasures")
return(sentomeasures)
}
agg_time <- function(sentomeasures, lag, fill, how = get_hows()$time, ...) {
check_class(sentomeasures, "sentomeasures")
dots <- tryCatch(list(...)[[1]], # extract list from list of list
error = function(x) list(...)) # if ... is empty
# construct all weights and check for duplicated names
weights <- setup_time_weights(lag, how, dots)
if (sum(duplicated(colnames(weights))) > 0) {
duplics <- unique(colnames(weights)[duplicated(colnames(weights))])
stop(paste0("Names of weighting schemes are not unique. Following names occur at least twice: ",
paste0(duplics, collapse = ", ")))
}
sentomeasures$attribWeights[["B"]] <- copy(weights)
# apply rolling time window, if not too large, for every weights column and combine all new measures column-wise
if (!(fill %in% "none")) sentomeasures <- fill_measures(sentomeasures, fill = fill)
measures <- sentomeasures$measures
toRoll <- measures[, -1]
n <- nrow(weights)
m <- nrow(measures)
if (n > m)
stop("Rolling time aggregation window (= ", n, ") is too large for number of observations per measure (= ", m, ")")
for (i in 1:ncol(weights)) {
name <- colnames(weights)[i]
add <- RcppRoll::roll_sum(as.matrix(toRoll), n = n, weights = as.vector(weights[, i]),
normalize = FALSE, align = "right", na.rm = TRUE)
colnames(add) <- paste0(colnames(toRoll), "--", name)
if (i == 1) measuresAggTime <- add
else measuresAggTime <- cbind(measuresAggTime, add)
}
measuresAggTime <- as.data.table(measuresAggTime)
if (n > 1) date <- measures$date[-1:-(n-1)]
else date <- measures$date
measuresAggTime$date <- date
measuresAggTime <- setcolorder(measuresAggTime, c("date", colnames(measuresAggTime)[-ncol(measuresAggTime)]))
sentomeasures$measures <- measuresAggTime
sentomeasures$time <- colnames(weights)
sentomeasures$stats <- compute_stats(sentomeasures)
sentomeasures$fill <- fill
return(sentomeasures)
}
#' Set up control for merging sentiment measures
#'
#' @author Samuel Borms
#'
#' @description Sets up control object for the optional merging (additional aggregation) of sentiment measures as
#' done by \code{\link{merge_measures}}.
#'
#' @param sentomeasures a \code{sentomeasures} object created using \code{\link{sento_measures}}. This is necessary to check
#' whether the other input arguments make sense.
#' @param lexicons a \code{list} with unique lexicons to merge at given name, e.g., \cr
#' \code{list(lex12 = c("lex1", "lex2"))}. See \code{sentomeasures$lexicons} for the exact names to use. Use \code{NA} to
#' apply no merging across this dimension.
#' @param features a \code{list} with unique features to merge at given name, e.g., \cr
#' \code{list(feat12 = c("feat1", "feat2"))}. See \code{sentomeasures$features} for the exact names to use. Use \code{NA} to
#' apply no merging across this dimension.
#' @param time a \code{list} with unique time weighting schemes to merge at given name, e.g., \cr
#' \code{list(tw12 = c("tw1", "tw2"))}. See \code{sentomeasures$time} for the exact names to use. Use \code{NA} to
#' apply no merging across this dimension.
#' @param do.keep a \code{logical} indicating if the original sentiment measures should be kept (i.e., the merged
#' sentiment measures will be added to the current sentiment measures as additional indices if \code{do.keep = TRUE}).
#'
#' @return A \code{list} encapsulating the control parameters.
#'
#' @seealso \code{\link{merge_measures}}
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 750)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howTime = c("equal_weight", "linear"), by = "year", lag = 3)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#'
#' # set up a correct control function
#' ctrMerge <- ctr_merge(sentomeasures,
#' time = list(W = c("equal_weight", "linear")),
#' lexicons = list(LEX = c("LM_eng", "HENRY_eng")),
#' features = list(journals = c("wsj", "wapo")),
#' do.keep = TRUE)
#'
#' \dontrun{
#' # produces an informative error message
#' ctrMerge <- ctr_merge(sentomeasures,
#' time = list(W = c("equal_weight", "almon1")),
#' lexicons = list(LEX = c("LM_eng", "HENRY_eng")),
#' features = list(journals = c("notInHere", "wapo")))}
#'
#' @export
ctr_merge <- function(sentomeasures, features = NA, lexicons = NA, time = NA, do.keep = FALSE) {
check_class(sentomeasures, "sentomeasures")
# check if columns to merge exist (missings) and if all merges have at least two columns to combine and are unique (tooFew)
missings <- c()
tooFew <- c()
if (all(!is.na(lexicons))) {
missings <- c(missings, unlist(lexicons)[!(unlist(lexicons) %in% sentomeasures$lexicons)])
for (i in seq_along(lexicons)) {
if (length(lexicons[[i]]) <= 1 | length(unique(lexicons[[i]])) != length(lexicons[[i]]))
tooFew <- c(tooFew, names(lexicons)[i])
}
}
if (all(!is.na(features))) {
missings <- c(missings, unlist(features)[!(unlist(features) %in% sentomeasures$features)])
for (i in seq_along(features)) {
if (length(features[[i]]) <= 1 | length(unique(features[[i]])) != length(features[[i]]))
tooFew <- c(tooFew, names(features)[i])
}
}
if (all(!is.na(time))) {
missings <- c(missings, unlist(time)[!(unlist(time) %in% sentomeasures$time)])
for (i in seq_along(time)) {
if (length(time[[i]]) <= 1 | length(unique(time[[i]])) != length(time[[i]]))
tooFew <- c(tooFew, names(time)[i])
}
}
# assemble warning messages if any
msg1 <- c()
msg2 <- c()
if (length(missings) > 0) {
msg1 <- paste0("Following columns to merge are not found: ",
paste0(missings, collapse = ", "), ".")
warning(msg1)
}
if (length(tooFew) > 0) {
msg2 <- paste0("Following merges have less than two or not all unique columns: ",
paste0(tooFew, collapse = ", "), ".")
warning(msg2)
}
if (length(msg1) > 0 | length((msg2) > 0)) stop("Wrong inputs. See warning messages for specifics.")
ctr <- list(sentomeasures = sentomeasures,
lexicons = lexicons,
features = features,
time = time,
do.keep = do.keep)
return(ctr)
}
#' Merge sentiment measures
#'
#' @author Samuel Borms
#'
#' @description Merge (further aggregate) measures by combining across provided lexicons, features, and time weighting schemes
#' dimensions. The combination occurs by taking the mean of the relevant measures.
#'
#' @param ctr output from a \code{\link{ctr_merge}} call.
#'
#' @return A modified \code{sentomeasures} object, with only the sentiment measures required, including updated information
#' and statistics, but the original sentiment scores \code{data.table} untouched.
#'
#' @seealso \code{\link{ctr_merge}}
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 500)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howTime = c("equal_weight", "linear"), by = "year", lag = 3)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#'
#' # set up control function and perform the merging
#' ctrMerge <- ctr_merge(sentomeasures,
#' time = list(W = c("equal_weight", "linear")),
#' features = list(journals = c("wsj", "wapo")),
#' do.keep = TRUE)
#' sentomeasuresMerged <- merge_measures(ctrMerge)
#'
#' @export
merge_measures <- function(ctr) {
sentomeasures <- ctr$sentomeasures
measures <- sentomeasures$measures
toMerge <- ctr[c("lexicons", "features", "time")]
do.keep <- ctr$do.keep
if (do.keep == TRUE) {
measuresOld <- measures
namesOld <- colnames(measures)
}
# loop over lex(icon), feat(ure) and time lists
for (across in toMerge[!is.na(toMerge)]) {
# loop over set of aggregation levels to merge (combine) into given name (e.g., lex12 = c("lex1", "lex2"))
for (i in seq_along(across)) {
name <- names(across)[i] # e.g. "lex12"
cols <- across[[i]] # e.g. c("lex1", "lex2")
# find all sentiment columns aggregated at one of the 'cols' aggregation levels and stack them into ls
ls <- list()
for (elem in cols) {
sel <- colnames(measures)[stringi::stri_detect(colnames(measures), regex = paste0("\\b", elem, "\\b"))] # exact match
ls[[elem]] <- measures[, sel, with = FALSE, drop = FALSE]
measures <- measures[, !sel, with = FALSE, drop = FALSE]
}
# take element-wise average for every row/column combination across columns to merge
if (ncol(ls[[1]] >= 2)) { # ncol across elements of ls is the same
all <- abind::abind(ls, along = 3)
merged <- apply(all, c(1, 2), mean, na.rm = TRUE)
} else merged <- rowSums(abind::abind(ls, along = 2))
# insert new name at name location of aggregation level (e.g. "lex1--top1" + "lex2--top1" = "lex12--top1")
nms <- stringi::stri_split(colnames(merged), regex = "--") # list
loc <- which(stringi::stri_detect(nms[[1]], regex = elem))[1]
nmsNew <- lapply(nms, function(x) {
x[loc] <- name
return(paste0(x, collapse = "--"))
})
colnames(merged) <- unlist(nmsNew)
measures <- cbind(measures, merged) # add back merged columns for further merging if needed
}
}
# add old unmerged measures to merged measures (if do.keep is TRUE)
if (do.keep == TRUE) measures <- cbind(measures, measuresOld[, !(namesOld %in% colnames(measures)), with = FALSE])
sentomeasures <- update_info(sentomeasures, measures) # update information in sentomeasures object
return(sentomeasures)
}
#' Merge sentiment measures into one global sentiment measure
#'
#' @author Samuel Borms
#'
#' @description Merges all sentiment measures into one global textual sentiment measure based on a set of weights to
#' indicate the importance of each component in the \code{lexicons}, \code{features}, and \code{time} vectors as specified
#' in the input \code{sentomeasures} object. Every measure receives a weight in the global measure equal to the multiplication
#' of the supplied weights of the components it is contained of. The global sentiment measure then corresponds to a
#' weighted average of these weights times the sentiment scores, per date.
#'
#' @details This function returns no \code{sentomeasures} object, however the global sentiment measure as outputted can
#' be added to regressions as an additional variable using the \code{x} argument in the \code{\link{sento_model}} function.
#'
#' @param sentomeasures a \code{sentomeasures} object created using \code{\link{sento_measures}}.
#' @param lexicons a \code{numeric} vector of weights, of size \code{length(sentomeasures$lexicons)}, in the same order
#' and summing to one. By default set to 1, which means equally weighted.
#' @param features a \code{numeric} vector of weights, of size \code{length(sentomeasures$features)}, in the same order
#' and summing to one. By default set to 1, which means equally weighted.
#' @param time a \code{numeric} vector of weights, of size \code{length(sentomeasures$time)}, in the same order and summing
#' to one. By default set to 1, which means equally weighted.
#'
#' @return A \code{data.frame} with the values for the global sentiment measure under the \code{global} column and dates as
#' row names.
#'
#' @seealso \code{\link{sento_model}}
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 1250)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howTime = c("equal_weight", "linear"), by = "year", lag = 3)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#'
#' # merge into one global sentiment measure, with specified weighting for lexicons and features
#' global <- to_global(sentomeasures, lexicons = c(0.40, 0.60),
#' features = c(0.10, 0.20, 0.30, 0.40),
#' time = 1)
#'
#' @export
to_global <- function(sentomeasures, lexicons = 1, features = 1, time = 1) {
check_class(sentomeasures, "sentomeasures")
dims <- list(sentomeasures$lexicons, sentomeasures$features, sentomeasures$time)
n <- sapply(dims, length)
weightsInp <- list(lexicons, features, time)
weights <- sapply(1:3, function(i) {
if (length(weightsInp[[i]]) == 1) w <- as.list(rep(1/n[i], n[i])) # modify weights if equal to default value of 1
else {
w <- as.list(weightsInp[[i]])
if (length(w) != n[i] || sum(unlist(w)) != 1)
stop("All weights must be equal in length to the respective number of components and sum to one.")
}
names(w) <- dims[[i]] # named weight lists
return(w)
})
measures <- sentomeasures$measures
measuresLong <- to_long(measures) # long format
# extract different weights based on how measuresLong is ordered and add a global weights (w) column
wLex <- unlist(weights[[1]][measuresLong[["lexicons"]]])
wFeat <- unlist(weights[[2]][measuresLong[["features"]]])
wTime <- unlist(weights[[3]][measuresLong[["time"]]])
# add a global weights column as the multiplication of the individual weights across the three dimensions per row
measuresLong[, "w" := wLex * wFeat * wTime]
global <- as.data.frame(measuresLong[, list(global = sum(value * w)), by = date])
row.names(global) <- global$date
global$date <- NULL
return(global)
}
#' Select a subset of sentiment measures
#'
#' @author Samuel Borms
#'
#' @description Selects the subset of sentiment measures which include either all of the given selection components combined,
#' or those who's name consist of at least one of the selection components. One can also extract measures within a subset
#' of dates.
#'
#' @param sentomeasures a \code{sentomeasures} object created using \code{\link{sento_measures}}.
#' @param toSelect a \code{character} vector of the lexicon, feature and time weighting scheme names, to indicate which
#' measures need to be selected. By default equal to \code{"all"}, which means no selection of the sentiment measures is made;
#' this may be used if one only wants to extract a subset of dates via the \code{dates} argument.
#' @param do.combine a \code{logical} indicating if only measures for which all (\code{do.combine = TRUE}) or at least one
#' (\code{do.combine = FALSE}) of the selection components should occur in each sentiment measure's name in the subset. If
#' \code{do.combine = TRUE}, the \code{toSelect} argument can only consist of one lexicon, one feature, and one time weighting
#' scheme at maximum.
#' @param dates any expression, in the form of a \code{character} vector, that would correctly evaluate to a \code{logical}
#' vector, features the variable \code{date} and has dates specified as \code{"yyyy-mm-dd"}, e.g.
#' \code{dates = "date >= '2000-01-15'"}. This argument may also be a vector of class \code{Date} which extracts all dates
#' that show up in that vector. See the examples. By default equal to \code{NA}, meaning no subsetting based on dates is done.
#'
#' @return A modified \code{sentomeasures} object, with only the sentiment measures required, including updated information
#' and statistics, but the original sentiment scores \code{data.table} untouched.
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 1000)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howTime = c("equal_weight", "linear"), by = "year", lag = 3)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#'
#' # different selections
#' sel1 <- select_measures(sentomeasures, c("equal_weight"))
#' sel2 <- select_measures(sentomeasures, c("equal_weight", "linear"), do.combine = FALSE)
#' sel3 <- select_measures(sentomeasures, c("linear", "LM_eng"))
#' sel4 <- select_measures(sentomeasures, c("linear", "LM_eng", "wsj", "economy"),
#' do.combine = FALSE)
#' sel5 <- select_measures(sentomeasures, c("linear", "LM_eng"),
#' dates = "date >= '1996-12-31' & date <= '2000-12-31'")
#' d <- seq(as.Date("2000-01-01"), as.Date("2013-12-01"), by = "month")
#' sel6 <- select_measures(sentomeasures, c("linear", "LM_eng"), dates = d)
#'
#' @export
select_measures <- function(sentomeasures, toSelect = "all", do.combine = TRUE, dates = NA) {
check_class(sentomeasures, "sentomeasures")
allOpts <- c(sentomeasures$features, sentomeasures$lexicons, sentomeasures$time)
if ("all" %in% toSelect) {
toSelect <- allOpts
do.combine = FALSE
}
valid <- toSelect %in% allOpts
if (any(!valid)) {
stop("Following components make up none of the sentiment measures: ", paste0(toSelect[!valid], collapse = ', '))
}
if (all(is.na(dates))) measures <- sentomeasures$measures
else if (inherits(dates, "Date")) measures <- sentomeasures$measures[date %in% dates, ]
else measures <- sentomeasures$measures[eval(parse(text = dates)), ]
namesList <- stringi::stri_split(colnames(measures), regex = "--")
if (do.combine == TRUE) fun <- all
else fun <- any
ind <- sapply(namesList, function(x) return(fun(toSelect %in% x)))
if (!any(ind)) {
warning("No appropriate combination is found. Input sentomeasures object is returned.")
return(sentomeasures)
} else ind[1] <- TRUE # include date column
measuresNew <- measures[, ind, with = FALSE]
sentomeasures <- update_info(sentomeasures, measuresNew) # update information in sentomeasures object
return(sentomeasures)
}
#' Plot sentiment measures
#'
#' @author Samuel Borms
#'
#' @method plot sentomeasures
#'
#' @description Straightforward plotting method that shows all sentiment measures from the provided \code{sentomeasures}
#' object in one plot, or the average along one of the lexicons, features and time weighting dimensions. We suggest to make
#' use of the \code{\link{select_measures}} function when you desire to plot only a subset of the sentiment measures.
#'
#' @param x a \code{sentomeasures} object created using \code{\link{sento_measures}}.
#' @param group a value from \code{c("lexicons", "features", "time", "all")}. The first three choices display the average of
#' all measures from the same group, in a different color. The choice \code{"all"} displays every single sentiment measure
#' in a separate color, but this may look visually overwhelming very fast, and can be quite slow.
#' @param ... not used.
#'
#' @return Returns a simple \code{\link{ggplot}} object, which can be added onto (or to alter its default elements) by using
#' the \code{+} operator (see examples). By default, a legend is positioned at the top if there are at maximum twelve line
#' graphs plotted.
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 500)
#' l <- setup_lexicons(lexicons[c("LM_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howTime = c("equal_weight", "linear"), by = "year", lag = 3)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#'
#' # plot sentiment measures
#' plot(sentomeasures)
#' plot(sentomeasures, group = "features")
#'
#' # adjust appearance of plot
#' p <- plot(sentomeasures)
#' p <- p +
#' ggthemes::theme_base() +
#' scale_x_date(name = "month-year") +
#' scale_y_continuous(name = "newName")
#' p
#'
#' @import ggplot2
#' @export
plot.sentomeasures <- function(x, group = "all", ...) {
if (!(group %in% c("lexicons", "features", "time", "all")))
stop("The 'group' argument should be either 'lexicons', 'features', 'time' or 'all'.")
# melt sentiment measures for plotting
sentomeasures <- x
measures <- sentomeasures$measures
if (group == "all") {
measuresMelt <- melt(measures, id.vars = "date", variable.factor = FALSE)
} else {
measuresMelt <- to_long(measures)[, c("date", group, "value"), with = FALSE]
measuresMelt <- measuresMelt[, list(value = mean(value)), by = list(date, variable = eval(parse(text = group)))]
}
measuresMelt <- measuresMelt[order(rank(as.character(variable)))]
legendPos <- ifelse(length(unique(measuresMelt[["variable"]])) <= 12, "top", "none")
p <- ggplot(data = measuresMelt, aes(x = date, y = value, color = variable)) +
geom_line() +
geom_hline(yintercept = 0, size = 0.50, linetype = "dotted") +
scale_x_date(name = "Date", date_labels = "%m-%Y") +
scale_y_continuous(name = "Sentiment") +
ggthemes::theme_tufte(base_size = 12) +
theme(legend.title = element_blank(), legend.position = legendPos)
return(p)
}
#' Add and fill missing dates
#'
#' @author Samuel Borms
#'
#' @description Adds missing dates between earliest and latest date of a \code{sentomeasures} object, such that time series
#' is continuous date-wise. Fills in these dates with either 0, the respective latest non-missing value or \code{NA}.
#'
#' @param sentomeasures a \code{sentomeasures} object created using \code{\link{sento_measures}}.
#' @param fill an element of \code{c("zero", "latest", NA)}; the first and last assume missing dates represent zero sentiment,
#' the second assumes missing dates represent constant sentiment.
#'
#' @return A modified \code{sentomeasures} object.
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, sample = 500)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howTime = c("equal_weight", "linear"), by = "year", lag = 3)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#'
#' # fill measures
#' f1 <- fill_measures(sentomeasures)
#' f2 <- fill_measures(sentomeasures, fill = "latest")
#' f3 <- fill_measures(sentomeasures, fill = NA)
#'
#' @export
fill_measures <- function(sentomeasures, fill = "zero") {
check_class(sentomeasures, "sentomeasures")
by <- sentomeasures$by
measures <- sentomeasures$measures
dates <- measures$date
ts <- seq(dates[1], dates[length(dates)], by = by) # continuous date series
dt <- data.table(date = ts)
# join and fill as provided to new measures
measuresFill <- merge(dt, measures, by = "date", all = TRUE) # fills with NA
if (is.na(fill)) {
sentomeasures$measures <- measuresFill
return(sentomeasures)
} else if (fill == "zero") {
measuresFill[is.na(measuresFill)] <- 0
} else if (fill == "latest") {
measuresFill <- zoo::na.locf(measuresFill)
} else stop("Input variable 'fill' should be either 'zero', 'latest' or NA.")
measuresFill <- data.table(date = ts, measuresFill[, lapply(.SD, as.numeric), .SDcols = colnames(measures)[-1]])
sentomeasures$measures <- measuresFill
return(sentomeasures)
}
#' Scaling and centering of sentiment measures
#'
#' @author Samuel Borms
#'
#' @description Scales and centers the sentiment measures from a \code{sentomeasures} object, column-per-column. By default,
#' the measures are normalized. \code{NA}s are removed first.
#'
#' @param x a \code{sentomeasures} object created using \code{\link{sento_measures}}.
#' @param center a \code{logical}, see documentation for the generic \code{\link{scale}}.
#' @param scale a \code{logical}, see documentation for the generic \code{\link{scale}}.
#'
#' @return A modified \code{sentomeasures} object, with the measures replaced by the scaled measures as well as updated
#' statistics.
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 500)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howTime = c("equal_weight", "linear"), by = "year", lag = 3)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#'
#' # scale sentiment measures
#' scaled <- scale(sentomeasures)
#'
#' @export
scale.sentomeasures <- function(x, center = TRUE, scale = TRUE) {
sentomeasures <- x
dates <- sentomeasures$measures[, 1]
measures <- sentomeasures$measures[, -1] # drop date column
measuresNorm <- scale(measures, center, scale)
sentomeasures$measures <- data.table(dates, measuresNorm)
sentomeasures$stats <- compute_stats(sentomeasures)
return(sentomeasures)
}
#' Extract documents related to sentiment peaks
#'
#' @author Samuel Borms
#'
#' @description This function gives the dates and documents for which aggregated sentiment was
#' most extreme (lowest, highest or both in absolute terms).
#'
#' @param sentomeasures a \code{sentomeasures} object created using \code{\link{sento_measures}}.
#' @param sentocorpus the \code{sentocorpus} object created with \code{\link{sento_corpus}}, used for the construction
#' of the input \code{sentomeasures} object.
#' @param n a \code{numeric} value to indicate the number of documents to extract. The associated dates are not
#' necessarily unique, given that, for example, extreme sentiment may occur on only one date but for different sentiment
#' measures.
#' @param type a \code{character} value, either \code{"pos"}, \code{"neg"} or \code{"both"}; respectively to look
#' for the \code{n} most positive, most negative or most extreme (in absolute terms) sentiment occurrences.
#' @param do.average a \code{logical} to indicate whether peaks should be selected based on the average sentiment
#' value per date. If \code{do.average = TRUE}, \code{n} unique dates are guaranteed (cf. argument \code{n}).
#'
#' @return A \code{list} with as elements \code{"dates"}, \code{"ids"} and \code{"documents"}, corresponding to
#' the \code{n} sentiment peaks.
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, sample = 500)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howTime = c("equal_weight", "linear"), by = "month", lag = 3)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#'
#' # extract the peaks
#' peaksAbs <- extract_peakdocs(sentomeasures, corpus, n = 5)
#' peaksPos <- extract_peakdocs(sentomeasures, corpus, n = 5, type = "pos")
#' peaksNeg <- extract_peakdocs(sentomeasures, corpus, n = 5, type = "neg")
#'
#' @export
extract_peakdocs <- function(sentomeasures, sentocorpus, n = 10, type = "both", do.average = FALSE) {
check_class(sentomeasures, "sentomeasures")
measures <- sentomeasures$measures[, -1]
m <- dim(measures)[2]
if (n >= (dim(measures)[1] * m)) stop("The parameter 'n' exceeds the total number of sentiment values.")
if (do.average == TRUE) {
measures <- rowMeans(measures, na.rm = TRUE)
dates <- sentomeasures$measures$date
} else dates <- rep(sentomeasures$measures$date, m)
if (type == "both") measures <- abs(measures)
indx <- order(measures, decreasing = ifelse(type == "neg", FALSE, TRUE))[1:(m * n)]
peakDates <- unique(dates[indx])[1:n]
ids <- sentomeasures$sentiment[date %in% peakDates, ]$id # get document IDs
peakDocs <- quanteda::texts(sentocorpus)[row.names(sentocorpus$documents) %in% ids]
peaks <- list(dates = peakDates, ids = ids, docs = peakDocs)
return(peaks)
}
#' Differencing of sentiment measures
#'
#' @author Samuel Borms
#'
#' @description Differences the sentiment measures from a \code{sentomeasures} object.
#'
#' @param x a \code{sentomeasures} object created using \code{\link{sento_measures}}.
#' @param lag a \code{numeric}, see documentation for the generic \code{\link{diff}}.
#' @param differences a \code{numeric}, see documentation for the generic \code{\link{diff}}.
#' @param ... not used.
#'
#' @return A modified \code{sentomeasures} object, with the measures replaced by the differenced measures as well as updated
#' statistics.
#'
#' @examples
#' data("usnews")
#' data("lexicons")
#' data("valence")
#'
#' # construct a sentomeasures object to start with
#' corpus <- sento_corpus(corpusdf = usnews)
#' corpusSample <- quanteda::corpus_sample(corpus, size = 500)
#' l <- setup_lexicons(lexicons[c("LM_eng", "HENRY_eng")], valence[["valence_eng"]])
#' ctr <- ctr_agg(howTime = c("equal_weight", "linear"), by = "year", lag = 3)
#' sentomeasures <- sento_measures(corpusSample, l, ctr)
#'
#' # first-order difference sentiment measures with a lag of two
#' diffed <- diff(sentomeasures, lag = 2, differences = 1)
#'
#' @export
diff.sentomeasures <- function(x, lag = 1, differences = 1, ...) {
sentomeasures <- x
dates <- sentomeasures$measures[, 1][-1:-(lag * differences)]
measures <- sentomeasures$measures[, -1] # drop date column
measuresDiff <- diff(as.matrix(measures), lag = lag, differences = differences)
sentomeasures$measures <- data.table(dates, measuresDiff)
sentomeasures$stats <- compute_stats(sentomeasures)
return(sentomeasures)
}
|
#prepare package to be used----
packages.paper= c("openxlsx""dplyr","tidyverse","knitr", "tidyr",
"fastDummies", "openxlsx", "rstatix","ggpubr","caret","flextable", "officer", "plotly",
"gtable","egg", "gridExtra","grid","lavaan","robustbase","robustlmm","arsenal","GGally","mice","WRS2")
invisible(lapply(packages.paper, library, character.only = TRUE))
#load data frame----
names(clinical.trial)
clinical.trial=read.xlsx("RCT.bMBI.endo.pain.xlsx")
View(clinical.trial)
# Data frame structure----
glimpse(clinical.trial)
headTail(clinical.trial)
#Socio demographics and baseline outcome variables table----
sociodemographic=read.xlsx("sociodemographic.sub.xlsx")
names(sociodemographic)
#collapse education categories
sociodemographic = sociodemographic %>%
mutate(education=case_when
(education %in% c("incomplete elementary school","elementary school","incomplete high school") ~ "< high school",
education %in% c("high school")~"high school", education %in% c("incomplete higher education","higher educationl","graduate")
~ "university")) %>%
select(id,group,age,race,marital.status,education,BMI,physical.exercise,sleep.hours.per.night,current.endometriosis.medication,n.surgeries,time.chronic.pain,
analgesic,anxiety,depression)
#table sociodemographic
#change default statistic in tableby
mycontrols= tableby.control( test=FALSE,
numeric.stats=c("mean","sd", "median", "q1q3"),
cat.stats=c("countpct"),
stats.labels=list(mean="Mean",sd="SD", median="Median", q1q3="Q1,Q3"))
mycontrols2= tableby.control(test=TRUE, total=FALSE,
numeric.test="wt", cat.test="chisq",
numeric.stats=c("mean","sd", "median", "q1q3"),
cat.stats=c("countpct"),
stats.labels=list(mean="Mean",sd="SD", median="Median", q1q3="Q1,Q3"))
#table1
tab1=tableby(group~age+race+marital.status+education+BMI+physical.exercise+sleep.hours.per.night+current.endometriosis.medication+n.surgeries+
time.chronic.pain+analgesic+anxiety+depression , data=sociodemographic, control=mycontrols2)
mylabels = list(age="age (y)", BMI="BMI (kg/m2)")
tab1=summary(tab1,text=TRUE, labelTranslations = mylabels)
tab1=as.data.frame(tab1)
tab1= tab1 %>%
rename(variables="") %>%
flextable(col_keys = c("variables","control (N=32)","intervention (N=31)","Total (N=63)","p value")) %>%
colformat_num(digits=2,j=c("variables","control (N=32)","intervention (N=31)","Total (N=63)","p value")) %>%
autofit()
#test difference in baseline per outcome variable
clinical.trial %>%
filter(time=="t1") %>%
select(id,group, sf.36.physical.sum,sf.36.mental.sum,sf_36_physical.functioning,
sf_36_limitations.physical.functioning,
sf_36_pain, sf_36_general.health, sf_36_vitality, sf_36_social.function, sf_36_emotional.role,
sf_36_mental.health) %>%
melt(id.vars=c("id","group")) %>%
group_by(variable) %>%
wilcox_test(value~group, detailed = TRUE) %>%
adjust_pvalue() %>%
add_significance("p") %>%
mutate(estimate=round(estimate,3),p=round(p,3),conf.low=round(conf.low,3),
conf.high=round(conf.high,3),p.adj=round(p.adj,3)) %>%
select(variable,estimate,statistic,p,conf.low,conf.high,p.adj) %>%
flextable() %>%
autofit()
clinical.trial %>%
filter(time=="t1") %>%
select(id,group, pelvic.pain,pain.unpleasantness,dysuria,dyspareunia,dyschezia,dysmenorrhea,PSS_10_total) %>%
melt(id.vars=c("id","group")) %>%
group_by(variable) %>%
wilcox_test(value~group, detailed = TRUE) %>%
adjust_pvalue() %>%
add_significance("p") %>%
mutate(estimate=round(estimate,3),p=round(p,3),conf.low=round(conf.low,3),
conf.high=round(conf.high,3),p.adj=round(p.adj,3)) %>%
select(variable,estimate,statistic,p,conf.low,conf.high,p.adj) %>%
flextable() %>%
autofit()
clinical.trial %>%
filter(time=="t1") %>%
select(id,group, FFMQ_total) %>%
melt(id.vars=c("id","group")) %>%
group_by(variable) %>%
t_test(value~group, detailed = TRUE) %>%
adjust_pvalue() %>%
add_significance("p") %>%
mutate(estimate=round(estimate,3),p=round(p,3),conf.low=round(conf.low,3),
conf.high=round(conf.high,3),p.adj=round(p.adj,3)) %>%
select(variable,estimate,statistic,p,conf.low,conf.high,p.adj) %>%
flextable() %>%
autofit()
#outcome variables between time and group
outcome.table=clinical.trial %>%
select(id,group,time, pelvic.pain,pain.unpleasantness,dysuria,dyspareunia,dyschezia,dysmenorrhea,PSS_10_total,
sf.36.physical.sum,sf.36.mental.sum,sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain, sf_36_general.health, sf_36_vitality, sf_36_social.function, sf_36_emotional.role,
sf_36_mental.health, FFMQ_total) %>%
pivot_wider(names_from = time,values_from=c(pelvic.pain,pain.unpleasantness,dysuria,dyspareunia,dyschezia,dysmenorrhea,PSS_10_total,
sf.36.physical.sum,sf.36.mental.sum,sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain, sf_36_general.health, sf_36_vitality, sf_36_social.function, sf_36_emotional.role,
sf_36_mental.health, FFMQ_total))
#table
tab2=tableby(group~pelvic.pain_t1+pelvic.pain_t2+pelvic.pain_t3+pain.unpleasantness_t1+pain.unpleasantness_t2+
pain.unpleasantness_t3+dysuria_t1+dysuria_t1+dysuria_t2+dysuria_t3+dyspareunia_t1+dyspareunia_t2+
dyspareunia_t3+dyschezia_t1+dyschezia_t2+dyschezia_t3+dysmenorrhea_t1+dysmenorrhea_t2+dysmenorrhea_t3+
PSS_10_total_t1+PSS_10_total_t2+PSS_10_total_t3+sf.36.physical.sum_t1+sf.36.physical.sum_t2+sf.36.physical.sum_t3+
sf.36.mental.sum_t1+sf.36.mental.sum_t2+sf.36.mental.sum_t3+sf_36_physical.functioning_t1+sf_36_physical.functioning_t2+
sf_36_physical.functioning_t1+sf_36_physical.functioning_t2+sf_36_physical.functioning_t3+
sf_36_limitations.physical.functioning_t1+sf_36_limitations.physical.functioning_t2+sf_36_limitations.physical.functioning_t3+
sf_36_pain_t1+sf_36_pain_t2+sf_36_pain_t3+sf_36_general.health_t1+sf_36_general.health_t2+sf_36_general.health_t3+
sf_36_general.health_t1+sf_36_general.health_t2+sf_36_general.health_t3+sf_36_vitality_t1+sf_36_vitality_t2+sf_36_vitality_t3+
sf_36_social.function_t1+sf_36_social.function_t2+sf_36_social.function_t3+sf_36_emotional.role_t1+sf_36_emotional.role_t2+
sf_36_emotional.role_t3+sf_36_mental.health_t1+sf_36_mental.health_t2+sf_36_mental.health_t3+FFMQ_total_t1+FFMQ_total_t2+
FFMQ_total_t3, data=outcome.table, control=mycontrols)
tab2=summary(tab2,text=TRUE)
tab2=as.data.frame(tab2)
tab2= tab2 %>%
rename(variables="") %>%
flextable(col_keys = c("variables","control (N=32)","intervention (N=31)","Total (N=63)","p value")) %>%
colformat_num(digits=2,j=c("variables","control (N=32)","intervention (N=31)","Total (N=63)","p value")) %>%
autofit()
#Meditation diary, mean of meditation time by week and total----
meditation.diary=read.xlsx("meditation.diary.all.xlsx")
names(meditation.diary)
hist(meditation.diary$mean_total)
meditation.diary %>%
pivot_longer(-id) %>%
group_by(name) %>%
filter(!is.na(value)) %>%
summarise(mean=mean(value),sd=sd(value))
# Missing by Time: T1, T2, T3 per variable----
missing.values.t1= clinical.trial %>%
filter(time=="t1") %>%
gather(key = "key", value = "val") %>%
mutate(is.missing = is.na(val)) %>%
group_by(key, is.missing) %>%
summarise(num.missing = n()) %>%
filter(is.missing==T) %>%
select(-is.missing) %>%
arrange(desc(num.missing))
missing.values.t2= clinical.trial %>%
filter(time=="t2") %>%
gather(key = "key", value = "val") %>%
mutate(is.missing = is.na(val)) %>%
group_by(key, is.missing) %>%
summarise(num.missing = n()) %>%
filter(is.missing==T) %>%
select(-is.missing) %>%
arrange(desc(num.missing))
missing.values.t3= clinical.trial %>%
filter(time=="t1") %>%
gather(key = "key", value = "val") %>%
mutate(is.missing = is.na(val)) %>%
group_by(key, is.missing) %>%
summarise(num.missing = n()) %>%
filter(is.missing==T) %>%
select(-is.missing) %>%
arrange(desc(num.missing))
missing.values.t1 %>%
ggplot() +
geom_bar(aes(x=key, y=num.missing), stat = 'identity') +
labs(x='variable', y="number of missing t1", title='Number of missing values') +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
missing.values.t2 %>%
ggplot() +
geom_bar(aes(x=key, y=num.missing), stat = 'identity') +
labs(x='variable', y="number of missing t2", title='Number of missing values') +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
missing.values.t3 %>%
ggplot() +
geom_bar(aes(x=key, y=num.missing), stat = 'identity') +
labs(x='variable', y="number of missing t3", title='Number of missing values') +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
#Percentage of missing per variable, total dataframe----
View(levels)
missing.values.percentage= clinical.trial %>%
gather(key = "key", value = "val") %>%
mutate(isna = is.na(val)) %>%
group_by(key) %>%
mutate(total = n()) %>%
group_by(key, total, isna) %>%
summarise(num.isna = n()) %>%
mutate(pct = num.isna / total * 100)
levels = (missing.values.percentage %>% filter(isna == T) %>% arrange(desc(pct)))$key
percentage.plot = missing.values.percentage %>%
ggplot() +
geom_bar(aes(x = reorder(key, desc(pct)),
y = pct, fill=isna),
stat = 'identity', alpha=0.8) +
scale_x_discrete(limits = levels) +
scale_fill_manual(name = "",
values = c('steelblue', 'tomato3'), labels = c("Present", "Missing")) +
coord_flip() +
labs(title = "Percentage of missing values", x =
'Variable', y = "% of missing values")
# Total missing by id----
row.plot = clinical.trial %>%
mutate(id = row_number()) %>%
gather(-id, key = "key", value = "val") %>%
mutate(isna = is.na(val)) %>%
ggplot(aes(key, id, fill = isna)) +
geom_raster(alpha=0.8) +
scale_fill_manual(name = "",
values = c('steelblue', 'tomato3'),
labels = c("Present", "Missing")) +
scale_x_discrete(limits = levels) +
labs(x = "Variable",
y = "Row Number", title = "Missing values in rows") +
scale_y_continuous(breaks= seq(0,174,by=20)) +
coord_flip()
# Sample size by time: t1, t2, t3----
sample.by.time= clinical.trial %>%
select (id, time, group) %>%
group_by(time, group) %>%
summarise(id = n()) %>%
rename(participants=id) %>%
flextable()
# Participants dropout by time: t1, t2, t3----
dropout= clinical.trial %>%
select (id, time, group) %>%
group_by(time, group) %>%
summarise(id = n()) %>%
pivot_wider(names_from = time,values_from = id) %>%
mutate(dropout.t1.t2=t1-t2, dropout.t1.t3=t1-t3) %>%
group_by(group) %>%
mutate(drop.out.t1.t2.percentage=dropout.t1.t2/t1*100,
drop.out.t1.t3.percentage=dropout.t1.t3/t1*100) %>%
select(dropout.t1.t2, dropout.t1.t3,drop.out.t1.t2.percentage ,
drop.out.t1.t3.percentage) %>%
mutate(drop.out.t1.t2.percentage=round(drop.out.t1.t2.percentage,2),
drop.out.t1.t3.percentage=round(drop.out.t1.t3.percentage,2)) %>%
flextable() %>%
set_header_labels(drop.out.t1.t2="n t1-t2", drop.out.t1.t3 = "n t1-t3",
drop.out.t1.t2.percentage = "% t1-t2",
drop.out.t1.t3.percentage = "% t1-t3") %>%
set_caption(caption= "Dropout")
# Endometriosis-related pain and perceived stress Distribution between group----
# raw scores
#t1
clinical.trial %>%
filter(time=="t1") %>%
select(group,pelvic.pain,dysuria,dyspareunia,dyschezia,dysmenorrhea,pain.unpleasantness,PSS_10_total) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
#t2
clinical.trial %>%
filter(time=="t2") %>%
select(group,pelvic.pain,dysuria,dyspareunia,dyschezia,dysmenorrhea,pain.unpleasantness,PSS_10_total) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
#t3
clinical.trial %>%
filter(time=="t3") %>%
select(group,pelvic.pain,dysuria,dyspareunia,dyschezia,dysmenorrhea,pain.unpleasantness,PSS_10_total) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
# Endometriosis-related pain and perceived stress distribution between group on gain scores----
#compute changes in variables of interest from t1 to t2 and t1 to t3
pain.PSS.gain=clinical.trial %>%
select(id, group, time,pelvic.pain,dysuria,dyspareunia,dyschezia,dysmenorrhea,pain.unpleasantness,
PSS_10_total) %>%
pivot_wider(names_from = time,values_from = c(pelvic.pain,dysuria,dyspareunia,
dyschezia,dysmenorrhea,pain.unpleasantness,PSS_10_total)) %>%
mutate (pelvic.pain.change.t1_t2 = pelvic.pain_t2 - pelvic.pain_t1,
dysuria.change.t1_t2 = dysuria_t2 - dysuria_t1,
dyspareunia.change.t1_t2 =dyspareunia_t2- dyspareunia_t1,
dyschezia.change.t1_t2 =dyschezia_t2- dyschezia_t1,
dysmenorrhea.change.t1_t2 =dysmenorrhea_t2 - dysmenorrhea_t1,
pain.unpleasantness.change.t1_t2 =pain.unpleasantness_t2 - pain.unpleasantness_t1,
PSS.change.t1_t2 =PSS_10_total_t2 - PSS_10_total_t1,
pelvic.pain.change.t1_t3 = pelvic.pain_t3 - pelvic.pain_t1,
dysuria.change.t1_t3 =dysuria_t3 - dysuria_t1,dyspareunia.change.t1_t3 = dyspareunia_t3 - dyspareunia_t1,
dyschezia.change.t1_t3 = dyschezia_t3 - dyschezia_t1,
dysmenorrhea.change.t1_t3 = dysmenorrhea_t3 - dysmenorrhea_t1,
pain.unpleasantness.change.t1_t3 =pain.unpleasantness_t3 - pain.unpleasantness_t1,
PSS.change.t1_t3 = PSS_10_total_t3 - PSS_10_total_t1)
#t1-t2
pain.PSS.gain %>%
select(group,pelvic.pain.change.t1_t2,dysuria.change.t1_t2,dyspareunia.change.t1_t2,dyschezia.change.t1_t2,
dysmenorrhea.change.t1_t2,dysmenorrhea.change.t1_t2,pain.unpleasantness.change.t1_t2,
PSS.change.t1_t2) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
#t1-t3
pain.PSS.gain %>%
select(group,pelvic.pain.change.t1_t3,
dyspareunia.change.t1_t3,dyschezia.change.t1_t3,dysmenorrhea.change.t1_t3,
pain.unpleasantness.change.t1_t3,PSS.change.t1_t3 ) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
# Multidimensional quality of life (SF_36) distribution----
#distribution between group t1
# Summary sf-36 physical and mental health
dist.sum.sf_36.t1=clinical.trial %>%
filter(time=="t1") %>%
select(group,sf.36.physical.sum,sf.36.mental.sum) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
#subscales
dist.sub.sf_36.t1=clinical.trial %>%
filter(time=="t1") %>%
select(group,sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain,sf_36_general.health,sf_36_vitality, sf_36_social.function,
sf_36_emotional.role, sf_36_mental.health) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())# sf_36_emotional.role, sf_36_limitation.physical.functioning
# with extremely low variance in control group
#distribution between group t2
# Summary sf-36 physical and mental health
dist.sum.sf_36.t2=clinical.trial %>%
filter(time=="t2") %>%
select(group,sf.36.physical.sum,sf.36.mental.sum) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
#subscales
dist.sub.sf_36.t2=clinical.trial %>%
filter(time=="t2") %>%
select(group,sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain,sf_36_general.health,sf_36_vitality, sf_36_social.function,
sf_36_emotional.role, sf_36_mental.health) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
#distribution between group t3
# Summary sf-36 physical and mental health
dist.sum.sf_36.t3=clinical.trial %>%
filter(time=="t3") %>%
select(group,sf.36.physical.sum,sf.36.mental.sum) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
#subscales
dist.sub.sf_36.t3=clinical.trial %>%
filter(time=="t3") %>%
select(group,sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain,sf_36_general.health,sf_36_vitality, sf_36_social.function,
sf_36_emotional.role, sf_36_mental.health) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
#compute changes in variables of interest from t1 to t2 and t1 to t3----
sf.36.gain=clinical.trial %>%
select(id, group, time,sf.36.physical.sum, sf.36.mental.sum, sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain,sf_36_general.health,sf_36_vitality, sf_36_social.function,
sf_36_emotional.role, sf_36_mental.health) %>%
pivot_wider(names_from = time,values_from = c( sf.36.physical.sum, sf.36.mental.sum,sf_36_physical.functioning,
sf_36_limitations.physical.functioning,
sf_36_pain,sf_36_general.health,sf_36_vitality, sf_36_social.function,
sf_36_emotional.role, sf_36_mental.health)) %>%
mutate (sf_36.physical.t1.t2=sf.36.physical.sum_t2-sf.36.physical.sum_t1,
sf.36.mental.sum.t1.t2=sf.36.mental.sum_t2-sf.36.mental.sum_t1,
sf_36_physical.functioning.t1_t2 = sf_36_physical.functioning_t2 - sf_36_physical.functioning_t1,
sf_36_limitations.physical.t1_t2 = sf_36_limitations.physical.functioning_t2 - sf_36_limitations.physical.functioning_t1,
sf_36_pain.t1_t2 =sf_36_pain_t2- sf_36_pain_t1,
sf_36_general.health.t1_t2 =sf_36_general.health_t2- sf_36_general.health_t1,
sf_36_vitality.t1_t2 =sf_36_vitality_t2 - sf_36_vitality_t1,
sf_36_social.function.t1_t2 =sf_36_social.function_t2 - sf_36_social.function_t1,
sf_36_emotional.role.t1_t2 =sf_36_emotional.role_t2 - sf_36_emotional.role_t1,
sf_36_mental.health.t1_t2 =sf_36_mental.health_t2 - sf_36_mental.health_t1,
sf_36.physical.t1.t3=sf.36.physical.sum_t3-sf.36.physical.sum_t1,
sf.36.mental.sum.t1.t3=sf.36.mental.sum_t3-sf.36.mental.sum_t1,
sf_36_physical.functioning.t1_t3 = sf_36_physical.functioning_t3 - sf_36_physical.functioning_t1,
sf_36_limitations.physical.t1_t3 = sf_36_limitations.physical.functioning_t3 - sf_36_limitations.physical.functioning_t1,
sf_36_pain.t1_t3 =sf_36_pain_t3- sf_36_pain_t1,
sf_36_general.health.t1_t3 =sf_36_general.health_t3- sf_36_general.health_t1,
sf_36_vitality.t1_t3 =sf_36_vitality_t3 - sf_36_vitality_t1,
sf_36_social.function.t1_t3 =sf_36_social.function_t3 - sf_36_social.function_t1,
sf_36_emotional.role.t1_t3 =sf_36_emotional.role_t3 - sf_36_emotional.role_t1,
sf_36_mental.health.t1_t3 =sf_36_mental.health_t3 - sf_36_mental.health_t1)
# Mindfulness (FFMQ) distribution----
#distribution between group and time
dist.FFMQ=clinical.trial %>%
select(group,time, FFMQ_total) %>%
pivot_longer(-group & -time) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ time, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
#compute changes in variables of interest from t1 to t2 and t1 to t3
FFMQ.gain=clinical.trial %>%
select(id, group, time, FFMQ_total) %>%
pivot_wider(names_from = time,values_from = c( FFMQ_total)) %>%
mutate (FFMQ_total.t1_t2 =t2 - t1,
FFMQ_total.t1_t3 =t3 - t1)
#distribution between group on gain score t1-t2
gain.FFMQ.t1.t2=FFMQ.gain %>%
select(group,FFMQ_total.t1_t2, FFMQ_total.t1_t3 ) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
# Dropout analysis----
# Dropout analysis t2
View(drop.out.analysis)
drop.out.analysis=clinical.trial %>%
filter(time=="t1"| time== "t2") %>%
select(id, time,group,pelvic.pain,pain.unpleasantness,dysuria,dyspareunia,dyschezia,dysmenorrhea,
PSS_10_total,sf_36_physical.functioning,sf_36_limitations.physical.functioning,sf_36_pain,
sf_36_general.health,sf_36_vitality,sf_36_social.function,sf_36_emotional.role,sf_36_mental.health) %>%
pivot_wider(names_from=time, values_from = c(pelvic.pain,pain.unpleasantness,dysuria,dyspareunia,dyschezia,dysmenorrhea,
PSS_10_total,sf_36_physical.functioning,sf_36_limitations.physical.functioning,sf_36_pain,
sf_36_general.health,sf_36_vitality,sf_36_social.function,sf_36_emotional.role,sf_36_mental.health)) %>%
mutate(drop.out = is.na(pelvic.pain_t2)) %>%
mutate(drop.out = factor(drop.out, levels = c("FALSE", "TRUE"),
labels = c("no", "yes")))
# Dropout analysis t3
View(drop.out.analysis.t3)
drop.out.analysis.t3=clinical.trial %>%
filter(time=="t1"| time== "t3") %>%
select(id, time,group,pelvic.pain,pain.unpleasantness,dysuria,dyspareunia,dyschezia,dysmenorrhea,
PSS_10_total,sf_36_physical.functioning,sf_36_limitations.physical.functioning,sf_36_pain,
sf_36_general.health,sf_36_vitality,sf_36_social.function,sf_36_emotional.role,sf_36_mental.health) %>%
pivot_wider(names_from=time, values_from = c(pelvic.pain,pain.unpleasantness,dysuria,dyspareunia,dyschezia,dysmenorrhea,
PSS_10_total,sf_36_physical.functioning,sf_36_limitations.physical.functioning,sf_36_pain,
sf_36_general.health,sf_36_vitality,sf_36_social.function,sf_36_emotional.role,sf_36_mental.health)) %>%
mutate(drop.out = is.na(pelvic.pain_t3)) %>%
mutate(drop.out = factor(drop.out, levels = c("FALSE", "TRUE"),
labels = c("no", "yes")))
#Dropout analyses by variable using wilcoxon_t2
drop.out.test=drop.out.analysis %>%
select(id,group,drop.out,pelvic.pain_t1,dysuria_t1,dyspareunia_t1,
dyschezia_t1,dysmenorrhea_t1,pain.unpleasantness_t1) %>%
melt(id.vars=c("id", "drop.out","group")) %>%
group_by(variable) %>%
wilcox_test(value~drop.out, detailed = TRUE) %>%
adjust_pvalue() %>%
add_significance("p") %>%
mutate(estimate=round(estimate,3),p=round(p,3),conf.low=round(conf.low,3),
conf.high=round(conf.high,3),p.adj=round(p.adj,3)) %>%
select(variable,estimate,statistic,p,conf.low,conf.high,p.adj) %>%
flextable() %>%
autofit()
#Dropout analyses by condition using wilcoxon_t2
drop.out.test.condition=drop.out.analysis %>%
select(id,group,drop.out,pelvic.pain_t1,dysuria_t1,dyspareunia_t1,
dyschezia_t1,dysmenorrhea_t1,pain.unpleasantness_t1) %>%
melt(id.vars=c("id", "drop.out","group")) %>%
group_by(variable,group) %>%
wilcox_test(value~drop.out, detailed = TRUE) %>%
adjust_pvalue() %>%
add_significance("p") %>%
mutate(estimate=round(estimate,3),p=round(p,3),conf.low=round(conf.low,3),
conf.high=round(conf.high,3),p.adj=round(p.adj,3)) %>%
select(variable,estimate,statistic,p,conf.low,conf.high,p.adj) %>%
flextable() %>%
autofit()
#logistic regression enter outcomes and condition as predictors of dropout----
#t2----
pelvic.pain.drop.out.log=glm(drop.out~pelvic.pain_t1+group, data = drop.out.analysis, family = binomial)
summary(pelvic.pain.drop.out.log)
pelvic.pain.drop.out.log=tbl_regression(pelvic.pain.drop.out.log, exponentiate = TRUE)
unpleasantness.drop.out.log=glm(drop.out~pain.unpleasantness_t1+group, data = drop.out.analysis, family = binomial)
summary(unpleasantness.drop.out.log)
unpleasantness.drop.out.log=tbl_regression(unpleasantness.drop.out.log, exponentiate = TRUE)
dysuria.drop.out.log=glm(drop.out~dysuria_t1+group, data = drop.out.analysis, family = binomial)
summary(dysuria.drop.out.log)
dysuria.drop.out.log=tbl_regression(dysuria.drop.out.log, exponentiate = TRUE)
dyschezia.drop.out.log=glm(drop.out~dyschezia_t1+group, data = drop.out.analysis, family = binomial)
summary(dyschezia.drop.out.log)
dyschezia.drop.out.log=tbl_regression(dyschezia.drop.out.log, exponentiate = TRUE)
dyspareunia.drop.out.log=glm(drop.out~dyspareunia_t1+group, data = drop.out.analysis, family = binomial)
summary(dyspareunia.drop.out.log)
dyspareunia.drop.out.log=tbl_regression(dyspareunia.drop.out.log, exponentiate = TRUE)
dysmenorrhea.drop.out.log=glm(drop.out~dysmenorrhea_t1+group, data = drop.out.analysis, family = binomial)
summary(dysmenorrhea.drop.out.log)
dysmenorrhea.drop.out.log=tbl_regression(dysmenorrhea.drop.out.log, exponentiate = TRUE)
ps.drop.out.log=glm(drop.out~PSS_10_total_t1+group, data = drop.out.analysis, family = binomial)
summary(ps.drop.out.log)
ps.drop.out.log=tbl_regression(ps.drop.out.log, exponentiate = TRUE)
physical.drop.out.log=glm(drop.out~sf_36_physical.functioning_t1+group, data = drop.out.analysis, family = binomial)
summary(physical.drop.out.log)
physical.drop.out.log=tbl_regression(physical.drop.out.log, exponentiate = TRUE)
lim.physical.drop.out.log=glm(drop.out~sf_36_limitations.physical.functioning_t1+group, data = drop.out.analysis, family = binomial)
summary(lim.physical.drop.out.log)
lim.physical.drop.out.log=tbl_regression(lim.physical.drop.out.log, exponentiate = TRUE)
pain.drop.out.log=glm(drop.out~sf_36_pain_t1+group, data = drop.out.analysis, family = binomial)
summary(pain.drop.out.log)
pain.drop.out.log=tbl_regression(pain.drop.out.log, exponentiate = TRUE)
health.drop.out.log=glm(drop.out~sf_36_general.health_t1+group, data = drop.out.analysis, family = binomial)
summary(health.drop.out.log)
health.drop.out.log=tbl_regression(health.drop.out.log, exponentiate = TRUE)
vitality.drop.out.log=glm(drop.out~sf_36_vitality_t1+group, data = drop.out.analysis, family = binomial)
summary(vitality.drop.out.log)
vitality.drop.out.log=tbl_regression(vitality.drop.out.log, exponentiate = TRUE)
social.drop.out.log=glm(drop.out~sf_36_social.function_t1+group, data = drop.out.analysis, family = binomial)
summary(social.drop.out.log)
social.drop.out.log=tbl_regression(social.drop.out.log, exponentiate = TRUE)
emotional.drop.out.log=glm(drop.out~sf_36_emotional.role_t1+group, data = drop.out.analysis, family = binomial)
summary(emotional.drop.out.log)
emotional.drop.out.log=tbl_regression(emotional.drop.out.log, exponentiate = TRUE)
mental.drop.out.log=glm(drop.out~sf_36_mental.health_t1+group, data = drop.out.analysis, family = binomial)
summary(mental.drop.out.log)
mental.drop.out.log=tbl_regression(mental.drop.out.log, exponentiate = TRUE)
# t3----
pelvic.pain.drop.out.log3=glm(drop.out~pelvic.pain_t1+group, data = drop.out.analysis.t3, family = binomial)#pelvic pain predicted drop out
summary(pelvic.pain.drop.out.log3)
pelvic.pain.drop.out.log3=tbl_regression(pelvic.pain.drop.out.log3, exponentiate = TRUE)
unpleasantness.drop.out.log3=glm(drop.out~pain.unpleasantness_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(unpleasantness.drop.out.log3)
unpleasantness.drop.out.log3=tbl_regression(unpleasantness.drop.out.log3, exponentiate = TRUE)
dysuria.drop.out.log3=glm(drop.out~dysuria_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(dysuria.drop.out.log3)
dysuria.drop.out.log3=tbl_regression(dysuria.drop.out.log3, exponentiate = TRUE)
dyschezia.drop.out.log3=glm(drop.out~dyschezia_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(dyschezia.drop.out.log3)
dyschezia.drop.out.log3=tbl_regression(dyschezia.drop.out.log3, exponentiate = TRUE)
dyspareunia.drop.out.log3=glm(drop.out~dyspareunia_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(dyspareunia.drop.out.log3)
dyspareunia.drop.out.log3=tbl_regression(dyspareunia.drop.out.log3, exponentiate = TRUE)
dysmenorrhea.drop.out.log3=glm(drop.out~dysmenorrhea_t1+group, data = drop.out.analysis.t3, family = binomial)#dysmenorrhea predicted drop out
summary(dysmenorrhea.drop.out.log3)
dysmenorrhea.drop.out.log3=tbl_regression(dysmenorrhea.drop.out.log3, exponentiate = TRUE)
ps.drop.out.log3=glm(drop.out~PSS_10_total_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(ps.drop.out.log3)
ps.drop.out.log3=tbl_regression(ps.drop.out.log3, exponentiate = TRUE)
physical.drop.out.log3=glm(drop.out~sf_36_physical.functioning_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(physical.drop.out.log3)
physical.drop.out.log3=tbl_regression(physical.drop.out.log3, exponentiate = TRUE)
lim.physical.drop.out.log3=glm(drop.out~sf_36_limitations.physical.functioning_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(lim.physical.drop.out.log3)
lim.physical.drop.out.log3=tbl_regression(lim.physical.drop.out.log3, exponentiate = TRUE)
pain.drop.out.log3=glm(drop.out~sf_36_pain_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(pain.drop.out.log3)
pain.drop.out.log3=tbl_regression(pain.drop.out.log3, exponentiate = TRUE)
health.drop.out.log3=glm(drop.out~sf_36_general.health_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(health.drop.out.log3)
health.drop.out.log3=tbl_regression(health.drop.out.log3, exponentiate = TRUE)
vitality.drop.out.log3=glm(drop.out~sf_36_vitality_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(vitality.drop.out.log3)
vitality.drop.out.log3=tbl_regression(vitality.drop.out.log3, exponentiate = TRUE)
social.drop.out.log3=glm(drop.out~sf_36_social.function_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(social.drop.out.log3)
social.drop.out.log3=tbl_regression(social.drop.out.log3, exponentiate = TRUE)
emotional.drop.out.log3=glm(drop.out~sf_36_emotional.role_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(emotional.drop.out.log3)
emotional.drop.out.log3=tbl_regression(emotional.drop.out.log3, exponentiate = TRUE)
mental.drop.out.log3=glm(drop.out~sf_36_mental.health_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(mental.drop.out.log3)
mental.drop.out.log3=tbl_regression(mental.drop.out.log3, exponentiate = TRUE)
#table----
library(gtsummary)
tbl_p.pain =
tbl_merge(tbls = list(pelvic.pain.drop.out.log, pelvic.pain.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_unpleasantness =
tbl_merge(tbls = list(unpleasantness.drop.out.log, unpleasantness.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_dysuria =
tbl_merge(tbls = list(dysuria.drop.out.log, dysuria.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_dyschezia =
tbl_merge(tbls = list(dyschezia.drop.out.log, dyschezia.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_dyspareunia =
tbl_merge(tbls = list(dyspareunia.drop.out.log, dyspareunia.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_dysmenorrhea =
tbl_merge(tbls = list(dysmenorrhea.drop.out.log, dysmenorrhea.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_ps =
tbl_merge(tbls = list(ps.drop.out.log, ps.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_physical =
tbl_merge(tbls = list(physical.drop.out.log, physical.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_lim.physical =
tbl_merge(tbls = list(lim.physical.drop.out.log, lim.physical.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_pain =
tbl_merge(tbls = list(pain.drop.out.log, pain.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_health =
tbl_merge(tbls = list(health.drop.out.log, health.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_vitality =
tbl_merge(tbls = list(vitality.drop.out.log, vitality.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_social =
tbl_merge(tbls = list(social.drop.out.log, social.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_emotional =
tbl_merge(tbls = list(emotional.drop.out.log, emotional.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_mental =
tbl_merge(tbls = list(mental.drop.out.log, mental.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
#Join tables----
tbl_stack(list(tbl_p.pain,tbl_unpleasantness,tbl_dysuria,tbl_dyschezia,tbl_dyspareunia,tbl_dysmenorrhea,tbl_ps,tbl_physical,
tbl_lim.physical,tbl_pain,tbl_health,tbl_vitality,tbl_social,
tbl_emotional,tbl_mental))
#Sociodemographic dropout----
#t2
drop.out.analysis.socio=drop.out.analysis %>%
select(drop.out)
sociodemographic.drop.out=sociodemographic %>%
bind_cols(drop.out.analysis.socio)
#t3
drop.out.analysis.t3.socio=drop.out.analysis.t3 %>%
select(drop.out)
sociodemographic.drop.out3=sociodemographic %>%
bind_cols(drop.out.analysis.t3.socio)
#logistic regression
#t2
age.drop.out.log=glm(drop.out~age+group, data = sociodemographic.drop.out, family = binomial)
summary(age.drop.out.log)
age.drop.out.log=tbl_regression(age.drop.out.log, exponentiate = TRUE)
education.drop.out.log=glm(drop.out~education+group, data = sociodemographic.drop.out, family = binomial)
summary(education.drop.out.log)
education.drop.out.log=tbl_regression(education.drop.out.log, exponentiate = TRUE)
marital.status.drop.out.log=glm(drop.out~marital.status+group, data = sociodemographic.drop.out, family = binomial)
summary(marital.status.drop.out.log)
marital.status.drop.out.log=tbl_regression(marital.status.drop.out.log, exponentiate = TRUE)
BMI.drop.out.log=glm(drop.out~BMI+group, data = sociodemographic.drop.out, family = binomial)
summary(BMI.drop.out.log)
BMI.drop.out.log=tbl_regression(BMI.drop.out.log, exponentiate = TRUE)
physical.exercise.drop.out.log=glm(drop.out~physical.exercise+group, data = sociodemographic.drop.out, family = binomial)
summary(physical.exercise.drop.out.log)
physical.exercise.drop.out.log=tbl_regression(physical.exercise.drop.out.log, exponentiate = TRUE)
current.endometriosis.medication.drop.out.log=glm(drop.out~current.endometriosis.medication+group, data = sociodemographic.drop.out, family = binomial)
summary(current.endometriosis.medication.drop.out.log)
current.endometriosis.medication.drop.out.log=tbl_regression(current.endometriosis.medication.drop.out.log, exponentiate = TRUE)
sleep.hours.per.night.drop.out.log=glm(drop.out~sleep.hours.per.night+group, data = sociodemographic.drop.out, family = binomial)
summary(sleep.hours.per.night.drop.out.log)
sleep.hours.per.night.drop.out.log=tbl_regression(sleep.hours.per.night.drop.out.log, exponentiate = TRUE)
time.chronic.pain.drop.out.log=glm(drop.out~time.chronic.pain+group, data = sociodemographic.drop.out, family = binomial)
summary(time.chronic.pain.drop.out.log)
time.chronic.pain.drop.out.log=tbl_regression(time.chronic.pain.drop.out.log, exponentiate = TRUE)
anxiety.drop.out.log=glm(drop.out~anxiety+group, data = sociodemographic.drop.out, family = binomial)
summary(anxiety.drop.out.log)
anxiety.drop.out.log=tbl_regression(anxiety.drop.out.log, exponentiate = TRUE)
depression.drop.out.log=glm(drop.out~depression+group, data = sociodemographic.drop.out, family = binomial)
summary(depression.drop.out.log)
depression.drop.out.log=tbl_regression(depression.drop.out.log, exponentiate = TRUE)
#t3
age.drop.out.log3=glm(drop.out~age+group, data = sociodemographic.drop.out3, family = binomial)#marginally significant
summary(age.drop.out.log3)
age.drop.out.log3=tbl_regression(age.drop.out.log3, exponentiate = TRUE)
education.drop.out.log3=glm(drop.out~education+group, data = sociodemographic.drop.out3, family = binomial)
summary(education.drop.out.log3)
education.drop.out.log3=tbl_regression(education.drop.out.log3, exponentiate = TRUE)
marital.status.drop.out.log3=glm(drop.out~marital.status+group, data = sociodemographic.drop.out3, family = binomial)
summary(marital.status.drop.out.log3)
marital.status.drop.out.log3=tbl_regression(marital.status.drop.out.log3, exponentiate = TRUE)
BMI.drop.out.log3=glm(drop.out~BMI+group, data = sociodemographic.drop.out3, family = binomial)
summary(BMI.drop.out.log3)
BMI.drop.out.log3=tbl_regression(BMI.drop.out.log3, exponentiate = TRUE)
physical.exercise.drop.out.log3=glm(drop.out~physical.exercise+group, data = sociodemographic.drop.out3, family = binomial)
summary(physical.exercise.drop.out.log3)
physical.exercise.drop.out.log3=tbl_regression(physical.exercise.drop.out.log3, exponentiate = TRUE)
current.endometriosis.medication.drop.out.log3=glm(drop.out~current.endometriosis.medication+group, data = sociodemographic.drop.out3, family = binomial)
summary(current.endometriosis.medication.drop.out.log3)
current.endometriosis.medication.drop.out.log3=tbl_regression(current.endometriosis.medication.drop.out.log3, exponentiate = TRUE)
sleep.hours.per.night.drop.out.log3=glm(drop.out~sleep.hours.per.night+group, data = sociodemographic.drop.out3, family = binomial)
summary(sleep.hours.per.night.drop.out.log3)
sleep.hours.per.night.drop.out.log3=tbl_regression(sleep.hours.per.night.drop.out.log3, exponentiate = TRUE)
time.chronic.pain.drop.out.log3=glm(drop.out~time.chronic.pain+group, data = sociodemographic.drop.out3, family = binomial)
summary(time.chronic.pain.drop.out.log3)
time.chronic.pain.drop.out.log3=tbl_regression(time.chronic.pain.drop.out.log3, exponentiate = TRUE)
anxiety.drop.out.log3=glm(drop.out~anxiety+group, data = sociodemographic.drop.out3, family = binomial)
summary(anxiety.drop.out.log3)
anxiety.drop.out.log3=tbl_regression(anxiety.drop.out.log3, exponentiate = TRUE)
depression.drop.out.log3=glm(drop.out~depression+group, data = sociodemographic.drop.out3, family = binomial)
summary(depression.drop.out.log3)
depression.drop.out.log3=tbl_regression(depression.drop.out.log3, exponentiate = TRUE)
#table----
tbl_age =
tbl_merge(tbls = list(age.drop.out.log,age.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_education =
tbl_merge(tbls = list(education.drop.out.log,education.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_marital.status =
tbl_merge(tbls = list(marital.status.drop.out.log,marital.status.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_BMI =
tbl_merge(tbls = list(BMI.drop.out.log,BMI.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_physical.exercise =
tbl_merge(tbls = list(physical.exercise.drop.out.log,physical.exercise.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_sleep.hours.per.night =
tbl_merge(tbls = list(sleep.hours.per.night.drop.out.log,sleep.hours.per.night.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_time.chronic.pain =
tbl_merge(tbls = list(time.chronic.pain.drop.out.log,time.chronic.pain.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_anxiety =
tbl_merge(tbls = list(anxiety.drop.out.log,anxiety.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_depression =
tbl_merge(tbls = list(depression.drop.out.log,depression.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
#join tables----
tbl_stack(list(tbl_age ,tbl_education ,tbl_marital.status ,tbl_BMI ,
tbl_physical.exercise ,tbl_sleep.hours.per.night ,
tbl_time.chronic.pain ,tbl_anxiety ,tbl_depression))
#multiple imputation endometriosis-related pain variables----
clinical.trial.pain2=clinical.trial %>%
select(id,group,time,pelvic.pain,dysuria,dyspareunia,dyschezia, dysmenorrhea,
pain.unpleasantness) %>%
pivot_wider(names_from = time,values_from=c(pelvic.pain,dysuria,dyspareunia,dyschezia, dysmenorrhea,
pain.unpleasantness)) %>%
dummy_cols(select_columns ="group") %>%
select(-group_control,-group)
#correlation between variables
triangular.cor.pain=clinical.trial.pain2 %>%
select(-id,-group) %>%
lowerCor(use = "complete.obs", method="spearman")
#plot missing pattern
md.pattern(clinical.trial.pain2)
md.pairs(clinical.trial.pain2)
pain_plot.missing.pain = aggr(clinical.trial.pain2 , col=c('navyblue','yellow'),
numbers=TRUE, sortVars=TRUE,
labels=names(clinical.trial.pain2 ), cex.axis=.7,
gap=3, ylab=c("Missing data","Pattern"))
#construct the predictor matrix setting: -2 to indicate the cluster variable, 1 imputation model with a fixed effect and a random intercept(default)
pain.p.matrix=make.predictorMatrix(clinical.trial.pain2)
pain.p.matrix[,"group_intervention"]=-2
imputed_pain2=mice(clinical.trial.pain2, m=5, predictorMatrix = pain.p.matrix, seed=125)
summary(imputed_pain2)
#multiple imputation perceived stress variable----
clinical.trial.stress=clinical.trial %>%
select(id,group,time,pelvic.pain,pain.unpleasantness,PSS_10_total) %>%
pivot_wider(names_from = time,values_from=c(pelvic.pain,pain.unpleasantness,PSS_10_total)) %>%
dummy_cols(select_columns ="group") %>%
select(-group_control,-group)
names(clinical.trial.stress)
#correlation between variables
triangular.cor.pain.stress=clinical.trial.stress %>%
select(-id,-group) %>%
lowerCor(use = "complete.obs", method="spearman")
#plot missing pattern
md.pattern( clinical.trial.stress)
md.pairs( clinical.trial.stress)
stress_plot.missing.stress = aggr( clinical.trial.stress, col=c('navyblue','yellow'),
numbers=TRUE, sortVars=TRUE,
labels=names( clinical.trial.stress), cex.axis=.7,
gap=3, ylab=c("Missing data","Pattern"))
#construct the predictor matrix setting: -2 to indicate the cluster variable, 1 imputation model with a fixed effect and a random intercept(default)
stress.p.matrix=make.predictorMatrix(clinical.trial.stress)
stress.p.matrix[,"group_intervention"]=-2
imputed_stress=mice(clinical.trial.stress, m=5, predictorMatrix = stress.p.matrix, seed=124)
summary(imputed_stress)
#multiple imputation sf_36----
clinical.trial.impute.sf.36=clinical.trial %>%
select(id,group,time, sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain, sf_36_general.health,sf_36_vitality,sf_36_social.function,
sf_36_emotional.role,sf_36_mental.health) %>%
pivot_wider(names_from = time,values_from=c(sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain, sf_36_general.health,sf_36_vitality,sf_36_social.function,
sf_36_emotional.role,sf_36_mental.health)) %>%
dummy_cols(select_columns ="group") %>%
select(-group_control,-group)
clinical.trial.impute.sf.36.wcx=clinical.trial %>%
select(id,group,time, sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain, sf_36_general.health,sf_36_vitality,sf_36_social.function,
sf_36_emotional.role,sf_36_mental.health) %>%
pivot_wider(names_from = time,values_from=c(sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain, sf_36_general.health,sf_36_vitality,sf_36_social.function,
sf_36_emotional.role,sf_36_mental.health))
#summary physical and mental health
clinical.trial.impute.sf.36.sum=clinical.trial %>%
select(id,group,time, sf.36.physical.sum,sf.36.mental.sum) %>%
pivot_wider(names_from = time,values_from=c( sf.36.physical.sum,sf.36.mental.sum)) %>%
dummy_cols(select_columns ="group") %>%
select(-group_control,-group)
#correlation between variables
triangular.cor.sf.36=clinical.trial.impute.sf.36 %>%
select(-id,-group_intervention) %>%
lowerCor(use = "complete.obs", method="spearman")
triangular.cor.sf.36.sum=clinical.trial.impute.sf.36.sum %>%
select(-id,-group_intervention) %>%
lowerCor(use = "complete.obs", method="spearman")
#plot missing pattern
md.pattern( clinical.trial.impute.sf.36)
md.pairs( clinical.trial.impute.sf.36)
md.pattern( clinical.trial.impute.sf.36.sum)
md.pairs( clinical.trial.impute.sf.36.sum)
plot.missing.sf.36 = aggr( clinical.trial.impute.sf.36, col=c('navyblue','yellow'),
numbers=TRUE, sortVars=TRUE,
labels=names( clinical.trial.impute.sf.36), cex.axis=.7,
gap=3, ylab=c("Missing data","Pattern"))
plot.missing.sf.36.sum = aggr( clinical.trial.impute.sf.36.sum, col=c('navyblue','yellow'),
numbers=TRUE, sortVars=TRUE,
labels=names( clinical.trial.impute.sf.36.sum), cex.axis=.7,
gap=3, ylab=c("Missing data","Pattern"))
#construct the predictor matrix setting: -2 to indicate the cluster variable, 1 imputation model with a fixed effect and a random intercept(default)
sf.36.p.matrix=make.predictorMatrix( clinical.trial.impute.sf.36)
sf.36.p.matrix[,"group_intervention"]=-2
imputed_sf.36=mice(clinical.trial.impute.sf.36, m=5, predictorMatrix = sf.36.p.matrix, seed=123)
summary(imputed_sf.36)
sf.36.p.matrix.sum=make.predictorMatrix( clinical.trial.impute.sf.36.sum)
sf.36.p.matrix.sum[,"group_intervention"]=-2
imputed_sf.36.sum=mice(clinical.trial.impute.sf.36.sum, m=5, predictorMatrix = sf.36.p.matrix.sum,seed=128)
summary(imputed_sf.36)
#imputed to wilcox test
impute.sf.36.wcx=mice(clinical.trial.impute.sf.36.wcx, seed = 167)
summary(impute.sf.36.wcx)
#Robust linear (MM-type estimators) raw scores, imputed data and Ordinary Least Squares without outliers models on endometriosis-related pain ----
# Evaluate the presence of near zero variance endometriosis related pain---
#convert data from long to wide by time and group
var.test.pain= clinical.trial %>%
select(id, group, time, pelvic.pain, dysuria,dyschezia,dyspareunia,dysmenorrhea,pain.unpleasantness,PSS_10_total) %>%
pivot_wider(names_from = time|group,values_from = c( pelvic.pain, dysuria,dyschezia,dyspareunia,
dysmenorrhea,pain.unpleasantness,PSS_10_total))
nearzero.pain=var.test.pain %>%
nearZeroVar(saveMetrics = TRUE)
which(nearzero.pain$zeroVar=='TRUE') # variable with zero var
which(nearzero.pain$nzv=='TRUE') # variable with near zero var
#no near zero variable
#pelvic pain----
cor_pelvic.pain=clinical.trial %>%
select(id,group,time,pelvic.pain) %>%
pivot_wider(names_from = time, values_from = pelvic.pain)
plot.pelvic.pain=cor_pelvic.pain %>%
select(group,t1,t2,t3)
library("GGally")
install.packages("effectsize")
library(effectsize)
ggpairs(plot.pelvic.pain, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#t2
#robust
p.pain.lm.adj2=lmrob(t2~t1+group, data = cor_pelvic.pain)
summary(p.pain.lm.adj2)
effectsize(p.pain.lm.adj2)
par(mfrow=c(2,2))
plot(p.pain.lm.adj2)
tidy(p.pain.lm.adj2, conf.int = TRUE)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
cor_pelvic.pain2.out=cor_pelvic.pain[-c(48,49),]#without outlier
p.pain.lm.adj2.out=lm(t2~t1+group, data = cor_pelvic.pain2.out)
summary(p.pain.lm.adj2.out)
par(mfrow=c(2,2))
plot(p.pain.lm.adj2.out)
plot(p.pain.lm.adj2.out,4)
#t3
p.pain.lm.adj3=lmrob(t3~t1+group, data = cor_pelvic.pain)
summary(p.pain.lm.adj3)
par(mfrow=c(2,2))
plot(p.pain.lm.adj3)
#cook's distance 4/(45-2-1)=0,09
cor_pelvic.pain3.out=cor_pelvic.pain[-c(58,54),]#without outlier
p.pain.lm.adj3.out=lm(t3~t1+group, data = cor_pelvic.pain3.out)
summary(p.pain.lm.adj3.out)
par(mfrow=c(2,2))
plot(p.pain.lm.adj3.out,id.n=5)
plot(p.pain.lm.adj3.out,4,id.n=5)
#pelvic pain imputed dataframe----
names(clinical.trial.imputed.pain)
#t2
p.pain.lm.imputed=with(imputed_pain2,lmrob(pelvic.pain_t2~pelvic.pain_t1+group_intervention))
summary(p.pain.lm.imputed)
p.pain.lm.imputed.pool=summary(pool(p.pain.lm.imputed), conf.int = TRUE)
tibble(p.pain.lm.imputed.pool)
#t3
p.pain.lm.imputed.t3=with(imputed_pain2,lmrob(pelvic.pain_t3~pelvic.pain_t1+group_intervention))
summary(p.pain.lm.imputed.t3)
p.pain.lm.imputed.pool.t3=summary(pool(p.pain.lm.imputed.t3), conf.int = TRUE)
tibble(p.pain.lm.imputed.pool.t3)
#dysuria----
cor_dysuria=clinical.trial %>%
select(id,group,time,dysuria) %>%
pivot_wider(names_from = time, values_from = dysuria)
plot.dysuria=cor_dysuria %>%
select(group,t1,t2,t3)
GGally::ggpairs(plot.dysuria, ggplot2::aes(colour=group)) #evaluate correlation between group and times
#robust
#t2
dysuria.lm.adj2=lmrob(t2~t1+group, data = cor_dysuria)
summary(dysuria.lm.adj2)
plot(dysuria.lm.adj2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(53-2-1)=0,08
cor_dysuria2.out=cor_dysuria[-c(6,15,62,40),]#without outlier
dysuria.lm.adj2.out=lm(t2~t1+group, data = cor_dysuria2.out)
summary(dysuria.lm.adj2.out)
par(mfrow=c(2,2))
plot(dysuria.lm.adj2.out, id.n=5)
plot(dysuria.lm.adj2.out,4, id.n=5)
#t3
#robust
dysuria.lm.adj3=lmrob(t3~t1+group, data = cor_dysuria)
summary(dysuria.lm.adj3)
plot(dysuria.lm.adj3)
#lm
#cook's distance 4/(46-2-1)=0,09
cor_dysuria3.out=cor_dysuria[-c(15,40, 58,42),]#without outlier
dysuria.lm.adj3.out=lm(t3~t1+group, data = cor_dysuria3.out)
summary(dysuria.lm.adj3.out)
par(mfrow=c(2,2))
plot(dysuria.lm.adj3.out, id.n=5)
plot(dysuria.lm.adj3.out,4, id.n=6)
#dysuria imputed dataframe----
#t2
dysuria.lm.imputed=with(imputed_pain2,lmrob(dysuria_t2~dysuria_t1+group_intervention))
summary(dysuria.lm.imputed)
dysuria.lm.imputed.pool=summary(pool(dysuria.lm.imputed), conf.int = TRUE)
tibble(dysuria.lm.imputed.pool)
#t3
dysuria.lm.imputed.t3=with(imputed_pain2,lmrob(dysuria_t3~dysuria_t1+group_intervention))
summary(dysuria.lm.imputed.t3)
dysuria.lm.imputed.pool.t3=summary(pool(dysuria.lm.imputed.t3), conf.int = TRUE)# marginally significant
#dyspareunia----
cor_dyspareunia=clinical.trial %>%
select(id,group,time,dyspareunia) %>%
pivot_wider(names_from = time, values_from = dyspareunia)
plot.dyspareunia=cor_dyspareunia %>%
select(group,t1,t2,t3)
GGally::ggpairs(plot.dyspareunia, ggplot2::aes(colour=group)) #evaluate correlation between group and times
#model adjusted for baseline
#t2
#robust
dyspareunia.lm.adj2=lmrob(t2~t1+group, data = cor_dyspareunia)
summary(dyspareunia.lm.adj2)
plot(dyspareunia.lm.adj2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(54-2-1)=0,07
cor_dyspareunia2.out=cor_dyspareunia[-c(10,45),]#without outlier
dyspareunia.lm.adj2.out=lm(t2~t1+group, data = cor_dyspareunia2.out)
summary(dyspareunia.lm.adj2.out)
par(mfrow=c(2,2))
plot(dyspareunia.lm.adj2.out, id.n=5)
plot(dyspareunia.lm.adj2.out, 4)
#t3
#robust
dyspareunia.lm.adj3=lmrob(t3~t1+group, data = cor_dyspareunia)
summary(dyspareunia.lm.adj3)
plot(dyspareunia.lm.adj3)
#lm without outlier
#4/(46-2-1)=0,09
cor_dyspareunia3=cor_dyspareunia[-c(54,32,4),]#without outlier
dyspareunia.lm.adj3.out=lm(t3~t1+group, data = cor_dyspareunia3)
summary(dyspareunia.lm.adj3.out)
par(mfrow=c(2,2))
plot(dyspareunia.lm.adj3.out)
plot(dyspareunia.lm.adj3.out,4)
#dyspareunia imputed dataframe----
#t2
dyspareunia.lm.imputed=with(imputed_pain2,lmrob(dyspareunia_t2~dyspareunia_t1+group_intervention))
summary(dyspareunia.lm.imputed)
dyspareunia.lm.imputed.pool=summary(pool(dyspareunia.lm.imputed), conf.int = TRUE)
tibble(dyspareunia.lm.imputed.pool)
#t3
dyspareunia.lm.imputed.t3=with(imputed_pain2,lm(dyspareunia_t3~dyspareunia_t1+group_intervention))
summary(dyspareunia.lm.imputed.t3)
dyspareunia.lm.imputed.pool.t3=summary(pool(dyspareunia.lm.imputed.t3), conf.int = TRUE)
#dyschezia----
cor_dyschezia=clinical.trial %>%
select(id,group,time,dyschezia) %>%
pivot_wider(names_from = time, values_from = dyschezia)
plot.dyschezia=cor_dyschezia %>%
select(group,t1,t2,t3)
GGally::ggpairs(plot.dyschezia, ggplot2::aes(colour=group)) #evaluate correlation between group and times
#model adjusted for baseline
#t2
#robust
dyschezia.lm.adj2=lmrob(t2~t1+group, data = cor_dyschezia)
summary(dyschezia.lm.adj2)
par(mfrow=c(2,2))
plot(dyschezia.lm.adj2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(56-2-1)=0,07
cor_dyschezia2=cor_dyschezia[-c(40,32,47),]#without outlier
dyschezia.lm.adj.out=lm(t2~t1+group, data = cor_dyschezia2)
summary(dyschezia.lm.adj.out)
par(mfrow=c(2,2))
plot(dyschezia.lm.adj.out, id.n=5)
plot(dyschezia.lm.adj.out, 4)
#t3
#robust
dyschezia.lm.adj3=lmrob(t3~t1+group, data = cor_dyschezia)
summary(dyschezia.lm.adj3)
plot(dyschezia.lm.adj3)
#lm without outlier 4/(56-2-1)=0,07
cor_dyschezia3=cor_dyschezia[-c(8,13,32,40,44),]#without outlier
dyschezia.lm.adj3.out=lm(t3~t1+group, data = cor_dyschezia3)
summary(dyschezia.lm.adj3.out)
par(mfrow=c(2,2))
plot(dyschezia.lm.adj3.out, id.n=5)
plot(dyschezia.lm.adj3.out,4)
#dyschezia imputed dataframe----
#t2
dyschezia.lm.imputed=with(imputed_pain2,lmrob(dyschezia_t2~dyschezia_t1+group_intervention))
summary(dyschezia.lm.imputed)
dyschezia.lm.imputed.pool=summary(pool(dyschezia.lm.imputed), conf.int = TRUE)
tibble(dyschezia.lm.imputed.pool)
#t3
dyschezia.lm.imputed.t3=with(imputed_pain2,lm(dyschezia_t3~dyschezia_t1+group_intervention))
summary(dyschezia.lm.imputed.t3)
dyschezia.lm.imputed.pool.t3=summary(pool(dyschezia.lm.imputed.t3), conf.int = TRUE)
#dysmenorrhea----
cor_dysmenorrhea=clinical.trial %>%
select(id,group,time,dysmenorrhea) %>%
pivot_wider(names_from = time, values_from = dysmenorrhea)
plot.dysmenorrhea=cor_dysmenorrhea %>%
select(group,t1,t2,t3)
GGally::ggpairs(plot.dysmenorrhea, ggplot2::aes(colour=group)) #evaluate correlation between group and times
#model adjusted for baseline
#t2
#robust
dysmenorrhea.lm.adj2=lmrob(t2~t1+group, data = cor_dysmenorrhea)
summary(dysmenorrhea.lm.adj2)
plot(dysmenorrhea.lm.adj2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(56-2-1)=0,07
cor_dysmenorrhea2=cor_dysmenorrhea[-c(52,62,36),]#without outlier
dysmenorrhea.lm.adj.out=lm(t2~t1+group, data = cor_dysmenorrhea2)
summary(dysmenorrhea.lm.adj.out)
par(mfrow=c(2,2))
plot(dysmenorrhea.lm.adj.out, id.n=5)
plot(dysmenorrhea.lm.adj.out,4)
#t3
#robust
dysmenorrhea.lm.adj3=lmrob(t3~t1+group, data = cor_dysmenorrhea)
summary(dysmenorrhea.lm.adj3)
plot(dysmenorrhea.lm.adj3)
#lm without outlier 4/(46-2-1)=0,09
cor_dysmenorrhea3=cor_dysmenorrhea[-c(36,50,63,15),]#without outlier
dysmenorrhea.lm.adj3.out=lm(t3~t1+group, data = cor_dysmenorrhea3)
summary(dysmenorrhea.lm.adj3.out)
par(mfrow=c(2,2))
plot(dysmenorrhea.lm.adj3.out, id.n=5)
plot(dysmenorrhea.lm.adj3.out, 4)
#dysmenorrhea imputed dataframe----
#t2
dysmenorrhea.lm.imputed=with(imputed_pain2,lmrob(dysmenorrhea_t2~dysmenorrhea_t1+group_intervention))
summary(dysmenorrhea.lm.imputed)
dysmenorrhea.lm.imputed.pool=summary(pool(dysmenorrhea.lm.imputed), conf.int = TRUE)
tibble(dysmenorrhea.lm.imputed.pool)
#t3
dysmenorrhea.lm.imputed.t3=with(imputed_pain2,lmrob(dysmenorrhea_t3~dysmenorrhea_t1+group_intervention))
summary(dysmenorrhea.lm.imputed.t3)
dysmenorrhea.lm.imputed.pool.t3=summary(pool(dysmenorrhea.lm.imputed.t3), conf.int = TRUE)
#pain.unpleasantness----
cor_pain.unpleasantness=clinical.trial %>%
select(id,group,time,pain.unpleasantness) %>%
pivot_wider(names_from = time, values_from = pain.unpleasantness)
plot.pain.unpleasantness=cor_pain.unpleasantness %>%
select(group,t1,t2,t3)
GGally::ggpairs(plot.pain.unpleasantness, ggplot2::aes(colour=group)) #evaluate correlation between group and times
#model adjusted for baseline
#t2
pain.unpleasantness.lm.adj2=lmrob(t2~t1+group, data = cor_pain.unpleasantness)
summary(pain.unpleasantness.lm.adj2)
par(mfrow=c(2,2))
plot(pain.unpleasantness.lm.adj2)
#lm without outlier
# cook's distance > 0,07
cor_pain.unpleasantness2=cor_pain.unpleasantness[-c(8,39,43,50),]#without outlier
pain.unpleasantness.lm.adj.out=lm(t2~t1+group, data = cor_pain.unpleasantness2)
summary(pain.unpleasantness.lm.adj.out)
par(mfrow=c(2,2))
plot(pain.unpleasantness.lm.adj.out, id.n=5)
plot(pain.unpleasantness.lm.adj.out,4)
#t3
#robust
pain.unpleasantness.lm.adj3=lmrob(t3~t1+group, data = cor_pain.unpleasantness)
summary(pain.unpleasantness.lm.adj3)
plot(pain.unpleasantness.lm.adj3)
#lm without outlier
# cook's distance > 0,09
cor_pain.unpleasantness3=cor_pain.unpleasantness[-c(50,32),]#without outlier
pain.unpleasantness.lm.adj3.out=lm(t3~t1+group, data = cor_pain.unpleasantness3)
summary(pain.unpleasantness.lm.adj3.out)
par(mfrow=c(2,2))
plot(pain.unpleasantness.lm.adj3.out,id.n=5)
plot(pain.unpleasantness.lm.adj3.out,4)
#pain.unpleasantness imputed dataframe----
#t2
pain.unpleasantness.lm.imputed=with(imputed_pain2,lmrob(pain.unpleasantness_t2~pain.unpleasantness_t1+group_intervention))
summary(pain.unpleasantness.lm.imputed)
pain.unpleasantness.lm.imputed.pool=summary(pool(pain.unpleasantness.lm.imputed), conf.int = TRUE)
tibble(pain.unpleasantness.lm.imputed.pool)
#t3
pain.unpleasantness.lm.imputed.t3=with(imputed_pain2,lmrob(pain.unpleasantness_t3~pain.unpleasantness_t1+group_intervention))
summary(pain.unpleasantness.lm.imputed.t3)
pain.unpleasantness.lm.imputed.pool.t3=summary(pool(pain.unpleasantness.lm.imputed.t3), conf.int = TRUE)
#perceived stress----
cor_stress=clinical.trial %>%
select(id,group,time,PSS_10_total) %>%
pivot_wider(names_from = time, values_from = PSS_10_total)
plot.stress=cor_stress %>%
select(group,t1,t2,t3)
GGally::ggpairs(plot.stress, ggplot2::aes(colour=group)) #evaluate correlation between group and times
#model adjusted for baseline
#t2
stress.lm.adj2=lmrob(t2~t1+group, data = cor_stress)
summary(stress.lm.adj2)
plot(stress.lm.adj2)
#lm without outlier
# cook's distance > 0,07
cor_stress2=cor_stress[-c(39),]#without outlier
stress.lm.adj.out=lm(t2~t1+group, data = cor_stress2)
summary(stress.lm.adj.out)
par(mfrow=c(2,2))
plot(stress.lm.adj.out, id.n=5)
plot(stress.lm.adj.out, 4)
#t3
#robust
stress.lm.adj3=lmrob(t3~t1+group, data = cor_stress)
summary(stress.lm.adj3)
plot(stress.lm.adj3)
#lm without outlier
# cook's distance > 0,09
cor_stress3=cor_stress[-c(6,26),]#without outlier
stress.lm.adj3.out=lm(t3~t1+group, data = cor_stress3)
summary(stress.lm.adj3.out)
par(mfrow=c(2,2))
plot(stress.lm.adj3.out, id.n=5)
plot(stress.lm.adj3.out,4)
#stress imputed dataframe----
#t2
stress.lm.imputed=with(imputed_stress,lmrob(PSS_10_total_t2~PSS_10_total_t1+group_intervention))
summary(stress.lm.imputed)
stress.lm.imputed.pool=summary(pool(stress.lm.imputed), conf.int = TRUE)
tibble(stress.lm.imputed.pool)
#t3
stress.lm.imputed3=with(imputed_stress,lmrob(PSS_10_total_t3~PSS_10_total_t1+group_intervention))
summary(stress.lm.imputed3)
stress.lm.imputed.pool3=summary(pool(stress.lm.imputed3), conf.int = TRUE)
tibble(stress.lm.imputed.pool3)
#Table endometriosis-related pain models----
#lm robust mm-estimator method----
#t2
p.pain.lm.adj2=p.pain.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.276)
dysuria.lm.adj2=dysuria.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.707)
dyspareunia.lm.adj2=dyspareunia.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.451)
dyschezia.lm.adj2=dyschezia.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.618)
dysmenorrhea.lm.adj2=dysmenorrhea.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.633)
pain.unpleasantness.lm.adj2=pain.unpleasantness.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.474)
#t3
p.pain.lm.adj3=p.pain.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.208)
dysuria.lm.adj3=dysuria.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.493)
dyspareunia.lm.adj3=dyspareunia.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.422)
dyschezia.lm.adj3=dyschezia.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.512)
dysmenorrhea.lm.adj3=dysmenorrhea.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.364)
pain.unpleasantness.lm.adj3=pain.unpleasantness.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.278)
#lm OLS method without outliers----
#t2
p.pain.lm.adj2.out=p.pain.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.341)
dysuria.lm.adj2.out=dysuria.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.629)
dyspareunia.lm.adj2.out=dyspareunia.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.364)
dyschezia.lm.adj.out=dyschezia.lm.adj.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.657)
dysmenorrhea.lm.adj.out=dysmenorrhea.lm.adj.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.328)
pain.unpleasantness.lm.adj.out=pain.unpleasantness.lm.adj.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.538)
#t3
p.pain.lm.adj3.out=p.pain.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.236 )
dysuria.lm.adj3.out=dysuria.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.492 )
dyspareunia.lm.adj3.out=dyspareunia.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.461)
dyschezia.lm.adj3.out=dyschezia.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.651)
dysmenorrhea.lm.adj3.out=dysmenorrhea.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.460)
pain.unpleasantness.lm.adj3.out=pain.unpleasantness.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.403)
#lm using multiple imputed dataset----
#t2
p.pain.lm.imputed.pool=p.pain.lm.imputed.pool %>%
filter(term=="group_intervention")
dysuria.lm.imputed.pool=dysuria.lm.imputed.pool %>%
filter(term=="group_intervention")
dyspareunia.lm.imputed.pool=dyspareunia.lm.imputed.pool %>%
filter(term=="group_intervention")
dyschezia.lm.imputed.pool=dyschezia.lm.imputed.pool %>%
filter(term=="group_intervention")
dysmenorrhea.lm.imputed.pool=dysmenorrhea.lm.imputed.pool %>%
filter(term=="group_intervention")
pain.unpleasantness.lm.imputed.pool=pain.unpleasantness.lm.imputed.pool %>%
filter(term=="group_intervention")
#t3
p.pain.lm.imputed.pool.t3=p.pain.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")
dysuria.lm.imputed.pool.t3=dysuria.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")
dyspareunia.lm.imputed.pool.t3=dyspareunia.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")
dyschezia.lm.imputed.pool.t3=dyschezia.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")
dysmenorrhea.lm.imputed.pool.t3=dysmenorrhea.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")
pain.unpleasantness.lm.imputed.pool.t3=pain.unpleasantness.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")
#table lm.rob----
hist(clinical.trial$FFMQ_total)
endo.related.pain.lm.rob=p.pain.lm.adj2 %>%
bind_rows(dysuria.lm.adj2,dyspareunia.lm.adj2,
dyschezia.lm.adj2,dysmenorrhea.lm.adj2,
pain.unpleasantness.lm.adj2,p.pain.lm.adj3,
dysuria.lm.adj3,dyspareunia.lm.adj3, dyschezia.lm.adj3,
dysmenorrhea.lm.adj3,pain.unpleasantness.lm.adj3) %>%
mutate(estimate=round(estimate,3),std.error=round(std.error,3),
statistic=round(statistic,3),p.value=round(p.value,3),
conf.low=round(conf.low,3),conf.high=round(conf.high,3)) %>%
relocate(any_of(c("term", "estimate", "std.error","statistic","conf.low","conf.high","R2.adj.","p.value"))) %>%
rename(variable=term) %>%
flextable() %>%
compose(i=c(1,2,3,4,5,6,7,8,9,10,11,12),j=1, value = as_paragraph (c("Pelvic pain t2", "Dysuria t2",
"Dyspareunia t2","Dyschezia t2",
"Dysmenorrhea t2","Pain unpleasantness t2",
"Pelvic pain t3", "Dysuria t3",
"Dyspareunia t3","Dyschezia t3",
"Dysmenorrhea t3","Pain unpleasantness t3"))) %>%
autofit()
#table lm.ols----
endo.related.pain.lm.ols=p.pain.lm.adj2.out %>%
bind_rows(dysuria.lm.adj2.out,dyspareunia.lm.adj2.out,
dyschezia.lm.adj.out,dysmenorrhea.lm.adj.out, pain.unpleasantness.lm.adj.out,
p.pain.lm.adj3.out,dysuria.lm.adj3.out,dyspareunia.lm.adj3.out,
dyschezia.lm.adj3.out,dysmenorrhea.lm.adj3.out,pain.unpleasantness.lm.adj3.out) %>%
mutate(estimate=round(estimate,3),std.error=round(std.error,3),
statistic=round(statistic,3),p.value=round(p.value,3),
conf.low=round(conf.low,3),conf.high=round(conf.high,3)) %>%
relocate(any_of(c("term", "estimate", "std.error","statistic","conf.low","conf.high","R2.adj.","p.value"))) %>%
rename(variable=term) %>%
flextable() %>%
compose(i=c(1,2,3,4,5,6,7,8,9,10,11,12),j=1, value = as_paragraph (c("Pelvic pain t2", "Dysuria t2",
"Dyspareunia t2","Dyschezia t2",
"Dysmenorrhea t2","Pain unpleasantness t2",
"Pelvic pain t3", "Dysuria t3",
"Dyspareunia t3","Dyschezia t3",
"Dysmenorrhea t3","Pain unpleasantness t3"))) %>%
autofit()
#table lm.imputaded.rob----
endo.related.pain.lm.imputed.rob=p.pain.lm.imputed.pool %>%
bind_rows(dysuria.lm.imputed.pool,
dyspareunia.lm.imputed.pool,dyschezia.lm.imputed.pool,
dysmenorrhea.lm.imputed.pool,pain.unpleasantness.lm.imputed.pool,
p.pain.lm.imputed.pool.t3,dysuria.lm.imputed.pool.t3,
dyspareunia.lm.imputed.pool.t3,dyschezia.lm.imputed.pool.t3,
dysmenorrhea.lm.imputed.pool.t3,pain.unpleasantness.lm.imputed.pool.t3) %>%
mutate(estimate=round(estimate,3),std.error=round(std.error,3),
statistic=round(statistic,3),conf.low=round(`2.5 %`,3),conf.high=round(`97.5 %`,3) ,
df=round(df,3),p.value=round(p.value,3)) %>%
select(-df,-`2.5 %`,-`97.5 %`) %>%
relocate(any_of(c("term", "estimate", "std.error","statistic","conf.low","conf.high","p.value","R2.adj."))) %>%
rename(variable=term) %>%
flextable() %>%
compose(i=c(1,2,3,4,5,6,7,8,9,10,11,12),j=1, value = as_paragraph (c("Pelvic pain t2", "Dysuria t2",
"Dyspareunia t2","Dyschezia t2",
"Dysmenorrhea t2","Pain unpleasantness t2",
"Pelvic pain t3", "Dysuria t3",
"Dyspareunia t3","Dyschezia t3",
"Dysmenorrhea t3","Pain unpleasantness t3"))) %>%
autofit()
effect.size
#Table perceived stress models----
#lm robust mm-estimator method----
#t2
stress.lm.adj2=stress.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.271)
#t3
stress.lm.adj3=stress.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj= 0.338)
#lm OLS method without outliers----
#t2
stress.lm.adj.out= stress.lm.adj.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.243)
#t3
stress.lm.adj3.out=stress.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.306)
#lm using multiple imputed dataset----
#t2
stress.lm.imputed.pool=stress.lm.imputed.pool %>%
filter(term=="group_intervention")
#t3
stress.lm.imputed.pool3=stress.lm.imputed.pool3 %>%
filter(term=="group_intervention")
#table lm.rob----
stress.lm.rob=stress.lm.adj2 %>%
bind_rows(stress.lm.adj3) %>%
mutate(estimate=round(estimate,3),std.error=round(std.error,3),
statistic=round(statistic,3),p.value=round(p.value,3),
conf.low=round(conf.low,3),conf.high=round(conf.high,3)) %>%
relocate(any_of(c("term", "estimate", "std.error","statistic","conf.low","conf.high","R2.adj.","p.value"))) %>%
rename(variable=term) %>%
flextable() %>%
compose(i=c(1,2),j=1, value = as_paragraph (c("Perceived strress t2", "Perceived stress t3"))) %>%
autofit()
#table lm.OLS----
stress.lm.ols=stress.lm.adj.out %>%
bind_rows(stress.lm.adj3.out) %>%
mutate(estimate=round(estimate,3),std.error=round(std.error,3),
statistic=round(statistic,3),p.value=round(p.value,3),
conf.low=round(conf.low,3),conf.high=round(conf.high,3)) %>%
relocate(any_of(c("term", "estimate", "std.error","statistic","conf.low","conf.high","R2.adj.","p.value"))) %>%
rename(variable=term) %>%
flextable() %>%
compose(i=c(1,2),j=1, value = as_paragraph (c("Perceived strress t2", "Perceived stress t3"))) %>%
autofit()
#table lm.imputaded.rob----
stress.lm.imputed.rob=stress.lm.imputed.pool %>%
bind_rows(stress.lm.imputed.pool3) %>%
mutate(estimate=round(estimate,3),std.error=round(std.error,3),
statistic=round(statistic,3),conf.low=round(`2.5 %`,3),conf.high=round(`97.5 %`,3) ,
df=round(df,3),p.value=round(p.value,3)) %>%
select(-df,-`2.5 %`,-`97.5 %`) %>%
relocate(any_of(c("term", "estimate", "std.error","statistic","conf.low","conf.high","p.value","R2.adj."))) %>%
rename(variable=term) %>%
flextable() %>%
compose(i=c(1,2),j=1, value = as_paragraph (c("Perceived strress t2", "Perceived stress t3"))) %>%
autofit()
#sf_36 subscales with low variance Wilcox test----
#subscales
sf.36.wcx.t2=sf.36.gain %>%
select(id,group, sf_36_limitations.physical.t1_t2 ,
sf_36_pain.t1_t2 ,sf_36_general.health.t1_t2,
sf_36_emotional.role.t1_t2) %>%
pivot_longer(-c("id","group")) %>%
group_by(name) %>%
wilcox_test(value~group ,detailed = TRUE) %>%
adjust_pvalue()
sf.36.wcx.t3=sf.36.gain %>%
select(id,group,
sf_36_limitations.physical.t1_t3,sf_36_pain.t1_t3 ,
sf_36_general.health.t1_t3 ,
sf_36_emotional.role.t1_t3) %>%
pivot_longer(-c("id","group")) %>%
group_by(name) %>%
wilcox_test(value~group ,detailed = TRUE) %>%
adjust_pvalue()
# table wilcox sf-36-----
sf.36.wcx=sf.36.wcx.t2 %>%
bind_rows(sf.36.wcx.t3) %>%
select(name, estimate,statistic, p, conf.low, conf.high, p.adj) %>%
mutate(estimate=round(estimate,3),statistic=round(statistic,3),
p=round(p,3),p.adj=round(p.adj,3), conf.low=round(conf.low,3),conf.high=round(conf.high,3)) %>%
relocate(any_of(c("name", "estimate","statistic","conf.low","conf.high","p","p.adj"))) %>%
flextable() %>%
autofit()
View(sf.36.wcx)
#imputed
sf.36.wcx.t2.imputed= sf.36.gain.imputed %>%
select(id,group, sf_36_limitations.physical.t1_t2 ,
sf_36_pain.t1_t2 ,sf_36_general.health.t1_t2 ,
sf_36_emotional.role.t1_t2) %>%
pivot_longer(-c("id","group")) %>%
group_by(name) %>%
wilcox_test(value~group ,detailed = TRUE) %>%
adjust_pvalue()
sf.36.wcx.t3.imputed=sf.36.gain.imputed %>%
select(id,group,
sf_36_limitations.physical.t1_t3,sf_36_pain.t1_t3 ,
sf_36_general.health.t1_t3,
sf_36_emotional.role.t1_t3) %>%
pivot_longer(-c("id","group")) %>%
group_by(name) %>%
wilcox_test(value~group ,detailed = TRUE) %>%
adjust_pvalue()
# table wilcox sf-36 imputed-----
sf.36.wcx.imputed=sf.36.wcx.t2.imputed %>%
bind_rows(sf.36.wcx.t3.imputed) %>%
select(name, estimate,statistic, p, conf.low, conf.high, p.adj) %>%
mutate(estimate=round(estimate,3),statistic=round(statistic,3),
p=round(p,3),p.adj=round(p.adj,3), conf.low=round(conf.low,3),conf.high=round(conf.high,3)) %>%
relocate(any_of(c("name", "estimate","statistic","conf.low","conf.high","p","p.adj"))) %>%
flextable() %>%
autofit()
# compare trimmed means of the sf_36 subscales with low variance using two sample Yuen's test between group pain change score----
library("WRS2")
#sf_36_limitations.physical.t1_t2
robust.lim.phys= yuenbt(sf_36_limitations.physical.t1_t2~group, data = sf.36.gain)
robust.lim.phys=data.frame(t(sapply(robust.lim.phys,c)))
robust.lim.phys=robust.lim.phys %>%
select(test,conf.int,p.value,diff)
#sf_36_pain.t1_t2
robust.pain= yuenbt(sf_36_pain.t1_t2~group, data = sf.36.gain)
robust.pain=data.frame(t(sapply(robust.pain,c)))
robust.pain=robust.pain %>%
select(test,conf.int,p.value,diff)
#sf_36_general.health.t1_t2
robust.general.health= yuenbt(sf_36_general.health.t1_t2~group, data = sf.36.gain)
robust.general.health=data.frame(t(sapply(robust.general.health,c)))
robust.general.health=robust.general.health %>%
select(test,conf.int,p.value,diff)
#sf_36_emotional.role.t1_t2
robust.emotional.role= yuenbt(sf_36_emotional.role.t1_t2~group, data = sf.36.gain)
robust.emotional.role=data.frame(t(sapply(robust.emotional.role,c)))
robust.emotional.role=robust.emotional.role %>%
select(test,conf.int,p.value,diff)
#sf_36_limitations.physical.t1_t3
robust.lim.phys3= yuenbt(sf_36_limitations.physical.t1_t3~group, data = sf.36.gain)
robust.lim.phys3=data.frame(t(sapply(robust.lim.phys3,c)))
robust.lim.phys3=robust.lim.phys3 %>%
select(test,conf.int,p.value,diff)
#sf_36_pain.t1_t3
robust.pain3= yuenbt(sf_36_pain.t1_t3~group, data = sf.36.gain)
robust.pain3=data.frame(t(sapply(robust.pain3,c)))
robust.pain3=robust.pain3 %>%
select(test,conf.int,p.value,diff)
#sf_36_general.health.t1_t3
robust.general.health3= yuenbt(sf_36_general.health.t1_t3~group, data = sf.36.gain)
robust.general.health3=data.frame(t(sapply(robust.general.health3,c)))
robust.general.health3=robust.general.health3 %>%
select(test,conf.int,p.value,diff)
#sf_36_emotional.role.t1_t3
robust.emotional.role3= yuenbt(sf_36_emotional.role.t1_t3~group, data = sf.36.gain)
robust.emotional.role3=data.frame(t(sapply(robust.emotional.role3,c)))
robust.emotional.role3=robust.emotional.role3 %>%
select(test,conf.int,p.value,diff)
#table Yuen's test----
#t2
sf.36.Yuen.test=robust.lim.phys %>%
bind_rows( robust.pain,robust.general.health,robust.emotional.role) %>%
add_column(variable=c("SF-36.limitation.physical.functioning","SF-36.pain", "SF-36.general.health", "SF-36.emotional.role")) %>%
relocate(any_of(c("variable","diff" ,"test", "conf.int", "p.value"))) %>%
flextable() %>%
autofit()
#t3
sf.36.Yuen.test3=robust.lim.phys3 %>%
bind_rows( robust.pain3,robust.general.health3,robust.emotional.role3) %>%
add_column(variable=c("SF-36.limitation.physical.functioning","SF-36.pain", "SF-36.general.health", "SF-36.emotional.role")) %>%
relocate(any_of(c("variable","diff" ,"test", "conf.int", "p.value"))) %>%
flextable() %>%
autofit()
# Robust linear (MM-type estimators) raw scores, imputed data and Ordinary Least Squares without outliers models on sf 36 raw score----
# Evaluate the presence of sf 36 near zero variance----
#convert data from long to wide by time and group
var.test.sf36= clinical.trial %>%
select(id, group, time,sf.36.physical.sum,sf.36.mental.sum,sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain,sf_36_general.health,sf_36_vitality, sf_36_social.function,
sf_36_emotional.role, sf_36_mental.health) %>%
pivot_wider(names_from = time|group,values_from = c( sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain,sf_36_general.health,sf_36_vitality, sf_36_social.function,
sf_36_emotional.role, sf_36_mental.health))
nearzero.sf36=var.test.sf36 %>%
nearZeroVar(saveMetrics = TRUE)
which(nearzero.sf36$zeroVar=='TRUE') # variable with zero var
which(nearzero.sf36$nzv=='TRUE') # variable with near zero var
# sf.36.physical.sum----
cor_physical.sum=clinical.trial %>%
select(id,group,time,sf.36.physical.sum) %>%
pivot_wider(names_from = time, values_from = sf.36.physical.sum)
plot.physical.sum=cor_physical.sum %>%
select(group,t1,t2,t3)
ggpairs( plot.physical.sum, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#model adjusted for baseline
#t2
#robust
physical.sum.lm.rob2=lmrob(t2~t1+group, data = cor_physical.sum)
summary(physical.sum.lm.rob2)
par(mfrow=c(2,2))
plot(physical.sum.lm.rob2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
cor_physical.sum.out=cor_physical.sum[-c(55,24),]
physical.sum.lm2=lm(t2~t1+group, data = cor_physical.sum.out)
summary(physical.sum.lm2)
par(mfrow=c(2,2))
plot(physical.sum.lm2, id.n=5)
plot(physical.sum.lm2, 4)
#imputed data
physical.sum.lm2.imputed=with(imputed_sf.36.sum,lmrob(sf.36.physical.sum_t2~sf.36.physical.sum_t1+group_intervention))
summary(physical.sum.lm2.imputed)
physical.sum.lm2.imputed.pool=summary(pool(physical.sum.lm2.imputed), conf.int = TRUE)
tibble(physical.sum.lm2.imputed.pool)
#t3
#robust
physical.sum.lm.adj3=lmrob(t3~t1+group, data = cor_physical.sum)
summary(physical.sum.lm.adj3)
par(mfrow=c(2,2))
plot(physical.sum.lm.adj3)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(45-2-1)=0,09
cor_physical.sum.out3=cor_physical.sum[-c(55,24),]
physical.sum.lm3=lm(t3~t1+group, data = cor_physical.sum.out3)
summary(physical.sum.lm3)
par(mfrow=c(2,2))
plot(physical.sum.lm3, id.n=5)
plot(physical.sum.lm3, 4)
#imputed data
physical.sum.lm3.imputed=with(imputed_sf.36.sum,lmrob(sf.36.physical.sum_t3~sf.36.physical.sum_t1+group_intervention))
summary(physical.sum.lm3.imputed)
physical.sum.lm3.imputed.pool=summary(pool(physical.sum.lm3.imputed), conf.int = TRUE)
tibble(physical.sum.lm3.imputed.pool)
# sf.36.mental.sum----
cor_mental.sum=clinical.trial %>%
select(id,group,time,sf.36.mental.sum) %>%
pivot_wider(names_from = time, values_from = sf.36.mental.sum)
plot.mental.sum=cor_mental.sum %>%
select(group,t1,t2,t3)
ggpairs( plot.mental.sum, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#model adjusted for baseline
#t2
#robust
cor_mental.sum.out2=cor_mental.sum[-c(60,33,47,59,26),]
mental.sum.lm.rob2=lmrob(t2~t1+group, data = cor_mental.sum.out2) #poor lm model fit, robust model fit affected by outliers
summary(mental.sum.lm.rob2)
par(mfrow=c(2,2))
plot(mental.sum.lm.rob2, id.n=5)
plot(mental.sum.lm.rob2,3)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
cor_mental.sum.out2=cor_mental.sum[-c(60,33,47,59,26),]
mental.sum.lm2=lm(t2~t1+group, data = cor_mental.sum.out2) #OLS without outliers better fitted than robust lm
summary(mental.sum.lm2)
par(mfrow=c(2,2))
plot(mental.sum.lm2, id.n=5)
plot(mental.sum.lm2,4)
#imputed
mental.sum.lm2.imputed=with(imputed_sf.36.sum,lm(sf.36.mental.sum_t2~sf.36.mental.sum_t1+group_intervention))
summary(mental.sum.lm2.imputed)
mental.sum.lm2.imputed.pool=summary(pool(mental.sum.lm2.imputed), conf.int = TRUE)
tibble(mental.sum.lm2.imputed.pool)
#t3
mental.sum.lm.adj3=lmrob(t3~t1+group, data = cor_mental.sum)
summary(mental.sum.lm.adj3)
par(mfrow=c(2,2))
plot(mental.sum.lm.adj3, id.n=5)
plot(mental.sum.lm.adj3,4)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(45-2-1)=0,09
cor_mental.sum.out3=cor_mental.sum[-c(26),]
mental.sum.lm3=lm(t3~t1+group, data = cor_mental.sum.out3)
summary(mental.sum.lm3)
par(mfrow=c(2,2))
plot(mental.sum.lm3, id.n=5)
plot(mental.sum.lm3,4)
#imputed
mental.sum.lm3.imputed=with(imputed_sf.36.sum,lmrob(sf.36.mental.sum_t3~sf.36.mental.sum_t1+group_intervention))
summary(mental.sum.lm3.imputed)
mental.sum.lm3.imputed.pool=summary(pool(mental.sum.lm3.imputed), conf.int = TRUE)
tibble(mental.sum.lm3.imputed.pool)
# sf_36_physical functioning----
cor_phys.functioning=clinical.trial %>%
select(id,group,time,sf_36_physical.functioning) %>%
pivot_wider(names_from = time, values_from = sf_36_physical.functioning)
plot.phys.functioning=cor_phys.functioning %>%
select(group,t1,t2,t3)
ggpairs( plot.phys.functioning, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#model adjusted for baseline
#t2
#robust
phys.functioning.lm.adj2=lmrob(t2~t1+group, data = cor_phys.functioning)
summary(phys.functioning.lm.adj2)
par(mfrow=c(2,2))
plot(phys.functioning.lm.adj2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
cor_phys.functioning.out=cor_phys.functioning[-c(10),]
phys.functioning.lm.adj2.out=lm(t2~t1+group, data = cor_phys.functioning.out)
summary(phys.functioning.lm.adj2.out)
par(mfrow=c(2,2))
plot(phys.functioning.lm.adj2.out, id.n=5)
plot(phys.functioning.lm.adj2.out, 4)
#imputed data
phys.functioning.lm2.imputed=with(imputed_sf.36,lmrob(sf_36_physical.functioning_t2~sf_36_physical.functioning_t1+group_intervention))
summary(phys.functioning.lm2.imputed)
phys.functioning.lm2.imputed.pool=summary(pool(phys.functioning.lm2.imputed), conf.int = TRUE)
tibble(phys.functioning.lm2.imputed.pool)
#t3
#robust
phys.functioning.lm.adj3=lmrob(t3~t1+group, data = cor_phys.functioning)
summary(phys.functioning.lm.adj3)
par(mfrow=c(2,2))
plot(phys.functioning.lm.adj3)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(45-2-1)=0,09
phys.functioning.out3=cor_phys.functioning[-c(10,31,30),]
phys.functioning.lm.adj3.out=lm(t3~t1+group, data = phys.functioning.out3)
summary(phys.functioning.lm.adj3.out)
par(mfrow=c(2,2))
plot(phys.functioning.lm.adj3.out,id.n=5)
plot(phys.functioning.lm.adj3.out, 4)
#imputed data
phys.functioning.lm3.imputed=with(imputed_sf.36,lmrob(sf_36_physical.functioning_t3~sf_36_physical.functioning_t1+group_intervention))
summary(phys.functioning.lm3.imputed)
phys.functioning.lm3.imputed.pool=summary(pool(phys.functioning.lm3.imputed), conf.int = TRUE)
tibble(phys.functioning.lm.imputed.pool)
# sf_36_limitations physical functioning----
cor_lim.phys.functioning=clinical.trial %>%
select(id,group,time,sf_36_limitations.physical.functioning) %>%
pivot_wider(names_from = time, values_from = sf_36_limitations.physical.functioning)
plot.lim.phys.functioning=cor_lim.phys.functioning %>%
select(group,t1,t2,t3)
ggpairs(plot.lim.phys.functioning, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#model adjusted for baseline
#t2
#robust
lim.phys.functioning.lm.rob2=lmrob(t2~t1+group, data = cor_lim.phys.functioning)# Very poor fit because of little variance in
#intervention group t1 and no linear relation
summary(lim.phys.functioning.lm.rob2)
par(mfrow=c(2,2))
plot(lim.phys.functioning.lm.rob2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
lim.phys.functioning.lm.adj2=lm(t2~t1+group, data = cor_lim.phys.functioning)
summary(lim.phys.functioning.lm.adj2)
par(mfrow=c(2,2))
plot(phys.functioning.lm.adj2, id.n=5)
plot(phys.functioning.lm.adj2,4)
#t3
lim.phys.functioning.lm.adj3=lmrob(t3~t1+group, data = cor_lim.phys.functioning) # Very poor fit because of little variance in
#intervention group t1
summary(lim.phys.functioning.lm.adj3)
par(mfrow=c(2,2))
plot(lim.phys.functioning.lm.adj3)
# sf_36_pain----
cor_sf36_pain=clinical.trial %>%
select(id,group,time,sf_36_pain) %>%
pivot_wider(names_from = time, values_from = sf_36_pain)
plot.sf36_pain=cor_sf36_pain %>%
select(group,t1,t2,t3)
ggpairs( plot.sf36_pain, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#model adjusted for baseline
#t2
#robust
sf36_pain.lm.rob2=lm(t2~t1+group, data = cor_sf36_pain)
summary(sf36_pain.lm.rob2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
cor_sf36_pain.out=cor_sf36_pain[-55,]
sf36_pain.lm.adj2=lm(t2~t1+group, data = cor_sf36_pain.out)
summary(sf36_pain.lm.adj2)
par(mfrow=c(2,2))
plot(sf36_pain.lm.adj2, id.n = 5)
plot(sf36_pain.lm.adj2,4)
#imputed
pain.lm2.imputed=with(imputed_sf.36,lmrob(sf_36_pain_t2~sf_36_pain_t1+group_intervention))
summary(phys.functioning.lm2.imputed)
pain.lm2.imputed.pool=summary(pool(pain.lm2.imputed))
tibble(pain.lm2.imputed.pool)
#t3
#robust
sf36_pain.lm.rob3.out=lm(t3~t1+group, data = cor_sf36_pain) #relation does not fitted by robust
summary(sf36_pain.lm.rob3.out)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(45-2-1)=0,09
cor_sf36_pain.out=cor_sf36_pain[-55,]
sf36_pain.lm.adj3.out=lm(t3~t1+group, data = cor_sf36_pain.out) #relation does not fitted by OLS lm
summary(sf36_pain.lm.adj3.out)
par(mfrow=c(2,2))
plot(sf36_pain.lm.adj3.out, id.n = 5)
plot(sf36_pain.lm.adj3,4)
#imputed
pain.lm3.imputed=with(imputed_sf.36,lm(sf_36_pain_t3~sf_36_pain_t1+group_intervention))
summary(phys.functioning.lm3.imputed)
pain.lm3.imputed.pool=summary(pool(pain.lm3.imputed))
tibble(pain.lm3.imputed.pool)
# sf_36_general.health----
cor_general.health=clinical.trial %>%
select(id,group,time,sf_36_general.health) %>%
pivot_wider(names_from = time, values_from = sf_36_general.health)
plot.general.health=cor_general.health %>%
select(group,t1,t2,t3)
ggpairs( plot.general.health, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#model adjusted for baseline
#t2
#robust
general.health.lm.rob2=lmrob(t2~t1+group, data = cor_general.health) # very poor lm fit
summary(general.health.lm.rob2)
par(mfrow=c(2,2))
plot(general.health.lm.rob2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
cor_general.health.out=cor_general.health[-c(4,40,48,54),]
general.health.lm.adj2=lm(t2~t1+group, data = cor_general.health.out) # very poor lm fit
summary(general.health.lm.adj2)
par(mfrow=c(2,2))
plot(general.health.lm.adj2, id.n=5)
plot(general.health.lm.adj2,4)
#imputed
general.health.lm2.imputed=with(imputed_sf.36,lmrob(sf_36_general.health_t2~sf_36_general.health_t1+group_intervention))
summary(general.health.lm2.imputed)
general.health.lm2.imputed.pool=summary(pool(general.health.lm2.imputed))
tibble(general.health.lm2.imputed.pool)
#t3
#Robust
general.health.lm.adj3=lmrob(t3~t1+group, data = cor_general.health)
summary(general.health.lm.adj3)
par(mfrow=c(2,2))
plot(general.health.lm.adj3)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(45-2-1)=0,09
cor_general.health.out3=cor_general.health[-c(26,22),]
general.health.out3=lm(t3~t1+group, data = cor_general.health.out3)
summary(general.health.out3)
par(mfrow=c(2,2))
plot(general.health.out3, id.n=5)
plot(general.health.out3, 4)
#imputed
general.health.lm3.imputed=with(imputed_sf.36,lmrob(sf_36_general.health_t3~sf_36_general.health_t1+group_intervention))
summary(general.health.lm3.imputed)
general.health.lm3.imputed.pool=summary(pool(general.health.lm3.imputed))
tibble(general.health.lm3.imputed.pool)
# sf_36_vitality----
cor_vitality=clinical.trial %>%
select(id,group,time,sf_36_vitality) %>%
pivot_wider(names_from = time, values_from = sf_36_vitality)
plot.vitality=cor_vitality %>%
select(group,t1,t2,t3)
ggpairs( plot.vitality, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#model adjusted for baseline
#t2
#robust
cor_vitality.rob.out=cor_vitality[-c(59,21),]
vitality.lm.rob2=lmrob(t2~t1+group, data = cor_vitality.rob.out)
summary(vitality.lm.rob2)
par(mfrow=c(2,2))
plot(vitality.lm.rob2)
tidy(vitality.lm.rob2, conf.int = TRUE)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
vitality.out=cor_vitality[-c(21,59),]
vitality.lm.adj2.out=lm(t2~t1+group, data = vitality.out)
summary(vitality.lm.adj2.out)
par(mfrow=c(2,2))
plot(vitality.lm.adj2.out, id.n=5)
plot(vitality.lm.adj2.out,4)
#imputed
vitality.lm2.imputed=with(imputed_sf.36,lmrob(sf_36_vitality_t2~sf_36_vitality_t1+group_intervention))
summary(vitality.lm2.imputed)
vitality.lm2.imputed.pool=summary(pool(vitality.lm2.imputed), conf.int = TRUE)
tibble(vitality.lm2.imputed.pool)
#t3
vitality.lm.rob3=lmrob(t3~t1+group, data = cor_vitality)
summary(vitality.lm.rob3)
par(mfrow=c(2,2))
plot(vitality.lm.rob3)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(45-2-1)=0,09
cor_vitality.out=cor_vitality[-c(59),]
vitality.lm.adj3.out=lm(t3~t1+group, data = cor_vitality.out)
summary(vitality.lm.adj3.out)
par(mfrow=c(2,2))
plot(vitality.lm.adj3.out,id.n=5)
plot(vitality.lm.adj3.out,4)
#imputed
vitality.lm3.imputed=with(imputed_sf.36,lmrob(sf_36_vitality_t3~sf_36_vitality_t1+group_intervention))
summary(vitality.lm3.imputed)
vitality.lm3.imputed.pool=summary(pool(vitality.lm3.imputed), conf.int = TRUE)
tibble(vitality.lm3.imputed.pool)
# sf_36_social.function----
cor_social.function=clinical.trial %>%
select(id,group,time,sf_36_social.function) %>%
pivot_wider(names_from = time, values_from = sf_36_social.function)
plot.social.function=cor_social.function %>%
select(group,t1,t2,t3)
ggpairs( plot.social.function, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#model adjusted for baseline
#t2
#robust
social.function.lm.rob2=lmrob(t2~t1+group, data = cor_social.function)
summary(social.function.lm.rob2)
par(mfrow=c(2,2))
plot(social.function.lm.rob2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
cor_social.function.out=cor_social.function[-c(26,15,34),]
social.function.lm.adj2.out=lm(t2~t1+group, data = cor_social.function.out)
summary(social.function.lm.adj2.out)
par(mfrow=c(2,2))
plot(social.function.lm.adj2.out, id.n=5)
plot(social.function.lm.adj2.out,4)
#imputed
social.function.lm2.imputed=with(imputed_sf.36,lmrob(sf_36_social.function_t2~sf_36_social.function_t1+group_intervention))
summary(social.function.lm2.imputed)
social.function.lm2.imputed.pool=summary(pool(social.function.lm2.imputed), conf.int = TRUE)
tibble(social.function.lm2.imputed.pool)
#t3
#robust
social.function.lm.rob3=lmrob(t3~t1+group, data = cor_social.function)
summary(social.function.lm.rob3)
par(mfrow=c(2,2))
plot(social.function.lm.rob3)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(45-2-1)=0,09
cor_social.function.out3=cor_social.function[-c(26,34,55),]
social.function.lm.rob3.out=lm(t3~t1+group, data = cor_social.function.out3)
summary(social.function.lm.rob3.out)
par(mfrow=c(2,2))
plot(social.function.lm.rob3.out, id.n=5)
plot(social.function.lm.rob3.out, 4)
#imputed
social.function.lm3.imputed=with(imputed_sf.36,lmrob(sf_36_social.function_t3~sf_36_social.function_t1+group_intervention))
summary(social.function.lm3.imputed)
social.function.lm3.imputed.pool=summary(pool(social.function.lm3.imputed), conf.int = TRUE)
tibble(social.function.lm3.imputed.pool)
#sf_36_emotional.role----
cor_emotional.role=clinical.trial %>%
select(id,group,time,sf_36_emotional.role) %>%
pivot_wider(names_from = time, values_from = sf_36_emotional.role)
plot.emotional.role=cor_emotional.role %>%
select(group,t1,t2,t3)
ggpairs( plot.emotional.role, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#baseline intervention with very low variance and absence of linear relation. Distribution inappropriated to lm. Use wilcoxon test instead.
#t2
#robust
emotional.role.rob2=lmrob(t2~group, data= cor_emotional.role)
summary(emotional.role.rob2)
#OLS
emotional.role.lm2=lm(t2~group, data= cor_emotional.role)
summary(emotional.role.lm2)
# sf_36_mental.health----
cor_mental.health=clinical.trial %>%
select(id,group,time,sf_36_mental.health) %>%
pivot_wider(names_from = time, values_from = sf_36_mental.health)
plot.mental.health=cor_mental.health %>%
select(group,t1,t2,t3)
ggpairs( plot.mental.health, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#model adjusted for baseline
#t2
#robust
mental.health.lm.rob2=lmrob(t2~t1+group, data = cor_mental.health)
summary(mental.health.lm.rob2)
par(mfrow=c(2,2))
plot(mental.health.lm.rob2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
cor_mental.health.out=cor_mental.health[-c(60,59,22),]
mental.health.lm.adj2.out=lm(t2~t1+group, data = cor_mental.health.out)
summary(mental.health.lm.adj2.out)
par(mfrow=c(2,2))
plot(mental.health.lm.adj2.out, id.n=5)
plot(mental.health.lm.adj2.out, 4)
#imputed
mental.health.lm2.imputed=with(imputed_sf.36,lmrob(sf_36_mental.health_t2~sf_36_mental.health_t1+group_intervention))
summary(mental.health.lm2.imputed)
mental.health.lm2.imputed.pool=summary(pool(mental.health.lm2.imputed), conf.int = TRUE)
tibble(mental.health.lm2.imputed.pool)
#t3
mental.health.lm.rob3=lmrob(t3~t1+group, data = cor_mental.health)
summary(mental.health.lm.adj3)
par(mfrow=c(2,2))
plot(mental.health.lm.adj3)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(45-2-1)=0,09
mental.health.lm3=lm(t3~t1+group, data = cor_mental.health)
summary(mental.health.lm3)
par(mfrow=c(2,2))
plot(mental.health.lm3, id.n=5)
plot(mental.health.lm3,4)
#imputed
mental.health.lm3.imputed=with(imputed_sf.36,lmrob(sf_36_mental.health_t3~sf_36_mental.health_t1+group_intervention))
summary(mental.health.lm3.imputed)
mental.health.lm3.imputed.pool=summary(pool(mental.health.lm3.imputed), conf.int = TRUE)
tibble(mental.health.lm3.imputed.pool)
#Table sf_36 models----
#lm robust mm-estimator method----
physical.sum.lm.rob2.tab=physical.sum.lm.rob2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.226)
mental.sum.lm.rob2.tab=mental.sum.lm.rob2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.24 )
phys.functioning.lm.adj2.tab=phys.functioning.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.590)
vitality.lm.rob2.tab=vitality.lm.rob2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.175)
social.function.lm.rob2.tab=social.function.lm.rob2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.188)
mental.health.lm.rob2.tab=mental.health.lm.rob2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.256)
#t3
physical.sum.lm.adj3.tab= physical.sum.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.190)
mental.sum.lm.adj3.tab=mental.sum.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.310)
phys.functioning.lm.adj3.tab=phys.functioning.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.676)
vitality.lm.rob3.tab=vitality.lm.rob3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.304)
social.function.lm.rob3.tab=social.function.lm.rob3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.185)
mental.health.lm.rob3.tab=mental.health.lm.rob3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.399)
#lm OLS method without outliers----
#t2
physical.sum.lm2.tab=physical.sum.lm2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.312)
mental.sum.lm2.tab=mental.sum.lm2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.215)
phys.functioning.lm.adj2.out.tab=phys.functioning.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.644)
vitality.lm.adj2.out.tab=vitality.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.644)
social.function.lm.adj2.out.tab=social.function.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.315)
mental.health.lm.adj2.out.tab=mental.health.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.406)
#t3
physical.sum.lm3.tab= physical.sum.lm3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.253)
mental.sum.lm3.tab=mental.sum.lm3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.290)
phys.functioning.lm.adj3.out.tab=phys.functioning.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj= 0.719)
vitality.lm.adj3.out.tab=vitality.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.240)
social.function.lm.rob3.out.tab= social.function.lm.rob3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.221)
mental.health.lm3.tab=mental.health.lm3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.411)
#lm.imputaded.rob----
#t2
physical.sum.lm2.imputed.pool.tab=physical.sum.lm2.imputed.pool %>%
filter(term=="group_intervention")
mental.sum.lm2.imputed.pool.tab=mental.sum.lm2.imputed.pool %>%
filter(term=="group_intervention")
phys.functioning.lm2.imputed.pool.tab=phys.functioning.lm2.imputed.pool %>%
filter(term=="group_intervention")
vitality.lm2.imputed.pool.tab=vitality.lm2.imputed.pool %>%
filter(term=="group_intervention")
social.function.lm2.imputed.pool.tab=social.function.lm2.imputed.pool %>%
filter(term=="group_intervention")
mental.health.lm2.imputed.pool.tab=mental.health.lm2.imputed.pool %>%
filter(term=="group_intervention")
#t3
physical.sum.lm3.imputed.pool.tab=physical.sum.lm3.imputed.pool %>%
filter(term=="group_intervention")
mental.sum.lm3.imputed.pool.tab =mental.sum.lm3.imputed.pool %>%
filter(term=="group_intervention")
phys.functioning.lm3.imputed.pool.tab=phys.functioning.lm3.imputed.pool %>%
filter(term=="group_intervention")
vitality.lm3.imputed.pool.tab=vitality.lm3.imputed.pool %>%
filter(term=="group_intervention")
social.function.lm3.imputed.pool.tab=social.function.lm3.imputed.pool %>%
filter(term=="group_intervention")
mental.health.lm3.imputed.pool.tab=mental.health.lm3.imputed.pool %>%
filter(term=="group_intervention")
##table lm.robust----
sf.36.robust=physical.sum.lm.rob2.tab %>%
bind_rows( mental.sum.lm.rob2.tab,phys.functioning.lm.adj2.tab,
vitality.lm.rob2.tab,social.function.lm.rob2.tab,
mental.health.lm.rob2.tab,physical.sum.lm.adj3.tab,
mental.sum.lm.adj3.tab,phys.functioning.lm.adj3.tab,
vitality.lm.rob3.tab, social.function.lm.rob3.tab,
mental.health.lm.rob3.tab) %>%
mutate(estimate=round(estimate,3),std.error=round(std.error,3),
statistic=round(statistic,3),p.value=round(p.value,3),
conf.low=round(conf.low,3),conf.high=round(conf.high,3)) %>%
relocate(any_of(c("term", "estimate", "std.error","statistic","conf.low","conf.high","R2.adj.","p.value"))) %>%
rename(variable=term) %>%
flextable() %>%
compose(i=c(1,2,3,4,5,6,7,8,9,10,11,12),j=1, value = as_paragraph (c("physical summary t2", "mental summary t2",
"physical functioning t2", "vitality t2", "social functioning t2", "mental health t2","physical summary t3",
"mental summary t3", "physical functioning t3", "vitality t3", "social functioning t3", "mental health t3"))) %>%
autofit()
#table lm.OLS----
sf.36.ols=physical.sum.lm2.tab %>%
bind_rows(mental.sum.lm2.tab,phys.functioning.lm.adj2.out.tab,
vitality.lm.adj2.out.tab,
social.function.lm.adj2.out.tab,mental.health.lm.adj2.out.tab,
physical.sum.lm3.tab,mental.sum.lm3.tab,phys.functioning.lm.adj3.out.tab,
vitality.lm.adj3.out.tab,social.function.lm.rob3.out.tab,
mental.health.lm3.tab) %>%
mutate(estimate=round(estimate,3),std.error=round(std.error,3),
statistic=round(statistic,3),p.value=round(p.value,3),
conf.low=round(conf.low,3),conf.high=round(conf.high,3)) %>%
relocate(any_of(c("term", "estimate", "std.error","statistic","conf.low","conf.high","R2.adj.","p.value"))) %>%
rename(variable=term) %>%
flextable() %>%
compose(i=c(1,2,3,4,5,6,7,8,9,10,11,12),j=1, value = as_paragraph (c("physical summary t2", "mental summary t2",
"physical functioning t2", "vitality t2", "social functioning t2", "mental health t2","physical summary t3",
"mental summary t3", "physical functioning t3", "vitality t3", "social functioning t3", "mental health t3"))) %>%
autofit()
#table lm.imputaded.rob----
sf.36.lm.imputed.rob=physical.sum.lm2.imputed.pool.tab %>%
bind_rows(mental.sum.lm2.imputed.pool.tab,phys.functioning.lm2.imputed.pool.tab,
vitality.lm2.imputed.pool.tab,social.function.lm2.imputed.pool.tab,
mental.health.lm2.imputed.pool.tab, physical.sum.lm3.imputed.pool.tab,
mental.sum.lm3.imputed.pool.tab,phys.functioning.lm3.imputed.pool.tab,
vitality.lm3.imputed.pool.tab,social.function.lm3.imputed.pool.tab,
mental.health.lm3.imputed.pool.tab) %>%
mutate(estimate=round(estimate,3),std.error=round(std.error,3),
statistic=round(statistic,3),conf.low=round(`2.5 %`,3),conf.high=round(`97.5 %`,3) ,
df=round(df,3),p.value=round(p.value,3)) %>%
select(-df,-`2.5 %`,-`97.5 %`) %>%
relocate(any_of(c("term", "estimate", "std.error","statistic","conf.low","conf.high","p.value","R2.adj."))) %>%
rename(variable=term) %>%
flextable() %>%
compose(i=c(1,2,3,4,5,6,7,8,9,10,11,12),j=1, value = as_paragraph (c("physical summary t2", "mental summary t2",
"physical functioning t2", "vitality t2", "social functioning t2", "mental health t2","physical summary t3",
"mental summary t3", "physical functioning t3", "vitality t3", "social functioning t3", "mental health t3"))) %>%
autofit()
#PLOT MLR SIGNIFICANT----
#T2 ----
#pelvic pain
p.pain.lm.adj2=lmrob(t2~t1+group, data = cor_pelvic.pain)
p.pain.lm.adj2.out=lm(t2~t1+group, data = cor_pelvic.pain2.out)
p.pain.lm.imputed.pool=summary(pool(p.pain.lm.imputed), conf.int = TRUE)
#dyschezia
dyschezia.lm.adj2=lmrob(t2~t1+group, data = cor_dyschezia)
dyschezia.lm.adj.out=lm(t2~t1+group, data = cor_dyschezia2)
dyschezia.lm.imputed.pool=summary(pool(dyschezia.lm.imputed), conf.int = TRUE)
#pain unpleasantness
pain.unpleasantness.lm.adj2=lmrob(t2~t1+group, data = cor_pain.unpleasantness)
pain.unpleasantness.lm.adj.out=lm(t2~t1+group, data = cor_pain.unpleasantness2)
pain.unpleasantness.lm.imputed.pool=summary(pool(pain.unpleasantness.lm.imputed), conf.int = TRUE)
#t3----
p.pain.lm.adj3
p.pain.lm.adj3.out
p.pain.lm.imputed.pool.t3
dysuria.lm.adj3
dysuria.lm.adj3.out
dysuria.lm.imputed.pool.t3
dyspareunia.lm.adj3
dyspareunia.lm.adj3.out
dyspareunia.lm.imputed.pool.t3
dyschezia.lm.adj3
dyschezia.lm.adj3.out
dyschezia.lm.imputed.pool.t3
dysmenorrhea.lm.adj3
dysmenorrhea.lm.adj3.out
dysmenorrhea.lm.imputed.pool.t3
pain.unpleasantness.lm.adj3
pain.unpleasantness.lm.adj3.out
pain.unpleasantness.lm.imputed.pool.t3
#Plot primary outcome t2----
p.pain.rob=p.pain.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
p.pain.lm=p.pain.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
p.pain.lm.imputed.pool=p.pain.lm.imputed.pool%>%
filter(term=="group_intervention") %>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
dyschezia.lm=dyschezia.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dyschezia.lm.out=dyschezia.lm.adj.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dyschezia.lm.imputed.pool=dyschezia.lm.imputed.pool %>%
filter(term=="group_intervention") %>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
pain.unpleasantness.lm= pain.unpleasantness.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
pain.unpleasantness.lm.out=pain.unpleasantness.lm.adj.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
pain.unpleasantness.lm.imputed=pain.unpleasantness.lm.imputed.pool %>%
filter(term=="group_intervention") %>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
pains.mlrb=p.pain.rob %>%
bind_rows(p.pain.lm,p.pain.lm.imputed.pool,dyschezia.lm,
dyschezia.lm.out,dyschezia.lm.imputed.pool,
pain.unpleasantness.lm,pain.unpleasantness.lm.out,
pain.unpleasantness.lm.imputed) %>%
select(estimate,conf.low,conf.high) %>%
add_column(Posttreatment=c("Pelvic.pain.primary","Pelvic.pain.OLS","Pelvic.pain.imputed",
"Dyschezia.primary","Dyschezia.OLS","Dyschezia.imputed",
"Pain.unpleasantness.primary","Pain.unpleasantness.OLS","Pain.unpleasantness.imputed"),
.before = "estimate", analyses=c("Primary","Sensitivity.OLS.without.outliers", "Sensitivity.imputed.data",
"Primary","Sensitivity.OLS.without.outliers", "Sensitivity.imputed.data",
"Primary","Sensitivity.OLS.without.outliers", "Sensitivity.imputed.data"))
#t3----
p.pain.lm3=p.pain.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
p.pain.lm.out3=p.pain.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
p.pain.lm.imputed.pool.t3 = p.pain.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")%>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
dysuria.lm.adj3=dysuria.lm.adj3%>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dysuria.lm.out3=dysuria.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dysuria.lm.imputed.pool.t3 =dysuria.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")%>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
dyspareunia.lm3=dyspareunia.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dyspareunia.lm.out3=dyspareunia.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dyspareunia.lm.imputed.pool.t3 = dyspareunia.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")%>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
dyschezia.lm3=dyschezia.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dyschezia.lm.out3=dyschezia.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dyschezia.lm.imputed.pool.t3=dyschezia.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")%>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
dysmenorrhea.lm3=dysmenorrhea.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dysmenorrhea.lm.out3=dysmenorrhea.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dysmenorrhea.lm.imputed.pool.t3=dysmenorrhea.lm.imputed.pool.t3%>%
filter(term=="group_intervention")%>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
pain.unpleasantness.lm3= pain.unpleasantness.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
pain.unpleasantness.lm.out=pain.unpleasantness.lm.adj.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
pain.unpleasantness.lm.imputed.t3=pain.unpleasantness.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")%>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
pain.mlr3b=p.pain.lm3 %>%
bind_rows(p.pain.lm.out3,p.pain.lm.imputed.pool.t3,
dysuria.lm.adj3,dysuria.lm.out3, dysuria.lm.imputed.pool.t3,
dyspareunia.lm3,dyspareunia.lm.out3,dyspareunia.lm.imputed.pool.t3,
dyschezia.lm3,dyschezia.lm.out3, dyschezia.lm.imputed.pool.t3,
dysmenorrhea.lm3,dysmenorrhea.lm.out3,dysmenorrhea.lm.imputed.pool.t3,
pain.unpleasantness.lm3,pain.unpleasantness.lm.out,pain.unpleasantness.lm.imputed.t3) %>%
select(estimate,conf.low,conf.high) %>%
add_column(Follow.up=c("Pelvic.pain.primary","Pelvic.pain.OLS","Pelvic.pain.imputed",
"Dysuria.primary","Dysuria.OLS","Dysuria.imputed",
"Dyspareunia.primary","Dyspareunia.OLS","Dyspareunia.imputed",
"Dyschezia.primary","Dyschezia.OLS","Dyschezia.imputed",
"Dysmenorrhea.primary","Dysmenorrhea.OLS","Dysmenorrhea.imputed",
"Pain.unpleasantness.primary","Pain.unpleasantness.OLS","Pain.unpleasantness.imputed"),
.before = "estimate", analyses=c("Primary","Sensitivity.OLS.without.outliers", "Sensitivity.imputed.data",
"Primary","Sensitivity.OLS.without.outliers", "Sensitivity.imputed.data",
"Primary","Sensitivity.OLS.without.outliers", "Sensitivity.imputed.data",
"Primary","Sensitivity.OLS.without.outliers", "Sensitivity.imputed.data",
"Primary","Sensitivity.OLS.without.outliers", "Sensitivity.imputed.data",
"Primary","Sensitivity.OLS.without.outliers", "Sensitivity.imputed.data"))
#Plot secondary outcome----
#t2
mental.health.rob2=mental.health.lm.rob2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
mental.health.lm.adj2.out.tab=mental.health.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
mental.health.lm2.imputed.pool.tab=mental.health.lm2.imputed.pool %>%
filter(term=="group_intervention") %>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
vitality.lm2.out.tab=vitality.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
stress.lm.adj.out= stress.lm.adj.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
Sf.36.MLRb=vitality.lm2.out.tab %>%
bind_rows(mental.health.rob2,mental.health.lm.adj2.out.tab,mental.health.lm2.imputed.pool.tab) %>%
select(estimate,conf.low,conf.high) %>%
add_column(Posttreatment=c("SF-36.Vitality.primary","SF-36.Mental.Health.primary","SF-36.Mental.Health.OLS","SF-36.Mental.Health.imputed"),
.before = "estimate", analyses=c("Sensitivity.OLS.without.outliers","Primary","Sensitivity.OLS.without.outliers",
"Sensitivity.imputed.data"))
#t3----
vitality.rob3.tab=vitality.lm.rob3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
vitality.lm.out3=vitality.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
mental.health.rob3.tab=mental.health.lm.rob3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
mental.health.lm3.tab= mental.health.lm3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
mental.health.lm3.imputed.pool.tab= mental.health.lm3.imputed.pool %>%
filter(term=="group_intervention") %>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
Sf.36.MLR3=vitality.rob3.tab %>%
bind_rows(vitality.lm.out3,mental.health.rob3.tab,mental.health.lm3.tab,mental.health.lm3.imputed.pool.tab) %>%
select(estimate,conf.low,conf.high) %>%
add_column(Follow.up=c("SF-36.Vitality.primary","SF-36.Vitality.OLS",
"SF-36.Mental.Health.primary","SF-36.Mental.Health.OLS","SF-36.Mental.Health.imputed"),
.before = "estimate", analyses=c("Primary","Sensitivity.OLS.without.outliers",
"Primary","Sensitivity.OLS.without.outliers","sensitivity.imputed.data"))
#t2----
#pain----
library("scales")
pain.mlr.plot1.2=ggplot(pains.mlrb, mapping = aes(x = Posttreatment, y = estimate,ymin = conf.low, ymax = conf.high, group=analyses)) +
geom_pointrange(aes(shape=analyses,color=analyses)) +
scale_size_manual(values=c(5,5,5))+
theme(axis.text.x = element_text(face="bold"),
axis.text.y = element_text(face="bold"),
axis.text = element_text(size = rel(0.7))) +
theme(legend.position="none") +
scale_y_continuous(breaks=seq(-5.0,0,0.5),labels = label_number(accuracy = 0.1)) +
scale_x_discrete(labels = c("Pelvic.pain.primary"="Pelvic pain","Pelvic.pain.OLS"="Pelvic pain","Pelvic.pain.imputed"="Pelvic pain",
"Dyschezia.primary"="Dyschezia","Dyschezia.OLS"="Dyschezia","Dyschezia.imputed"="Dyschezia",
"Pain.unpleasantness.primary"="Pain unpleasantness","Pain.unpleasantness.OLS"="Pain unpleasantness",
"Pain.unpleasantness.imputed"="Pain unpleasantness"))+
labs(y="",x="")+
annotate("text", x = 9.3, y = -4.5, label= "A", size = 4)+
coord_flip()
#SF-36----
Sf.36.MLR2.1b.plot=ggplot(Sf.36.MLRb, mapping = aes(x = Posttreatment, y = estimate,ymin = conf.low, ymax = conf.high, group=analyses)) +
geom_pointrange(aes(shape=analyses,color=analyses)) +
scale_size_manual(values=c(5,5,5))+
theme(axis.text.x = element_text(face="bold"),
axis.text.y = element_text(face="bold"),
axis.text = element_text(size = rel(0.8))) +
theme(legend.position="none") +
scale_y_continuous(breaks=c (0.2, 4.5, 6.5,13,10,16,21,27 ,31)) +
scale_x_discrete(labels = c("SF-36.Vitality.primary"="SF-36 Vitality",
"SF-36.Mental.Health.primary"="SF-36 Mental health",
"SF-36.Mental.Health.OLS"="SF-36 Mental health",
"SF-36.Mental.Health.imputed"="SF-36 Mental health"))+
labs(y="",x="")+
annotate("text", x = 4.47, y = 0.2, label= "A", size = 4)+
coord_flip()
#t3----
#Pain----
pain.mlr.plot2=ggplot(pain.mlr3, mapping = aes(x = Follow.up, y = estimate, group=analyses)) +
geom_point(aes(shape=analyses,color=analyses)) +
scale_size_manual(values=c(5,5,5))+
theme(axis.text.x = element_text(face="bold"),
axis.text.y = element_text(face="bold"),
axis.text = element_text(size = rel(0.7))) +
theme(legend.position="none") +
scale_y_continuous(breaks=c (-3.2, -2.7,-2.2, -1.7, -1.4)) +
scale_x_discrete(labels = c("Pelvic.pain"="Pelvic pain",
"Pain.unpleasantness"="Pain unpleasantness"))+
labs(y="")+
annotate("text", x = 9.3, y = -4.5, label= "B", size = 4)+
coord_flip()
pain.mlr.plot2.2=ggplot(pain.mlr3b, mapping = aes(x = Follow.up, y = estimate,ymin = conf.low, ymax = conf.high, group=analyses)) +
geom_pointrange(aes(shape=analyses,color=analyses)) +
scale_size_manual(values=c(5,5,5))+
theme(axis.text.x = element_text(face="bold"),
axis.text.y = element_text(face="bold"),
axis.text = element_text(size = rel(0.7))) +
theme(legend.position="none") +
scale_y_continuous(breaks=seq(-6.0,0,0.5)) +
scale_x_discrete(labels = c("Pelvic.pain.primary"="Pelvic pain","Pelvic.pain.OLS"="Pelvic pain","Pelvic.pain.imputed"="Pelvic pain",
"Dysuria.primary"="Dysuria","Dysuria.OLS"="Dysuria","Dysuria.imputed"="Dysuria",
"Dyspareunia.primary"="Dyspareunia","Dyspareunia.OLS"="Dyspareunia","Dyspareunia.imputed"="Dyspareunia",
"Dyschezia.primary"="Dyschezia","Dyschezia.OLS"="Dyschezia","Dyschezia.imputed"="Dyschezia",
"Dysmenorrhea.primary"="Dysmenorrhea","Dysmenorrhea.OLS"="Dysmenorrhea","Dysmenorrhea.imputed"="Dysmenorrhea",
"Pain.unpleasantness.primary"="Pain unpleasantness","Pain.unpleasantness.OLS"="Pain unpleasantness",
"Pain.unpleasantness.imputed"="Pain unpleasantness"))+
labs(y="",x="")+
annotate("text", x = 18.3, y = -6, label= "B", size = 4)+
coord_flip()
#SF-36---
Sf.36.MLR3.plot=ggplot(Sf.36.MLR3, mapping = aes(x = Follow.up,ymin = conf.low, ymax = conf.high, y = estimate, group=analyses)) +
geom_pointrange(aes(shape=analyses,color=analyses)) +
scale_size_manual(values=c(5,5,5))+
theme(axis.text.x = element_text(face="bold"),
axis.text.y = element_text(face="bold"),
axis.text = element_text(size = rel(0.8))) +
scale_x_discrete(labels = c("SF-36.Vitality.primary"="SF-36 Vitality",
"SF-36.Vitality.OLS"="SF-36 Vitality",
"SF-36.Mental.Health.primary"="SF-36 Mental health",
"SF-36.Mental.Health.OLS"="SF-36 Mental health",
"SF-36.Mental.Health.imputed"="SF-36 Mental health"))+
labs(y="",x="")+
theme(legend.position="none") +
scale_y_continuous(breaks=c (2.5,7, 9, 16, 19,28,31)) +
annotate("text", x = 5.5, y = 2, label= "B", size = 4)+
coord_flip()
#grade plots----
grid.arrange(pain.mlr.plot1.2, pain.mlr.plot2.2 , ncol=1)
grid.arrange(Sf.36.MLR2.plot, Sf.36.MLR3.plot , ncol=1)
| /paper.mbi.pain.qol.R | no_license | mfmmindfulness/Mindfulness_Endometriosis-related_pain | R | false | false | 131,743 | r | #prepare package to be used----
packages.paper= c("openxlsx""dplyr","tidyverse","knitr", "tidyr",
"fastDummies", "openxlsx", "rstatix","ggpubr","caret","flextable", "officer", "plotly",
"gtable","egg", "gridExtra","grid","lavaan","robustbase","robustlmm","arsenal","GGally","mice","WRS2")
invisible(lapply(packages.paper, library, character.only = TRUE))
#load data frame----
names(clinical.trial)
clinical.trial=read.xlsx("RCT.bMBI.endo.pain.xlsx")
View(clinical.trial)
# Data frame structure----
glimpse(clinical.trial)
headTail(clinical.trial)
#Socio demographics and baseline outcome variables table----
sociodemographic=read.xlsx("sociodemographic.sub.xlsx")
names(sociodemographic)
#collapse education categories
sociodemographic = sociodemographic %>%
mutate(education=case_when
(education %in% c("incomplete elementary school","elementary school","incomplete high school") ~ "< high school",
education %in% c("high school")~"high school", education %in% c("incomplete higher education","higher educationl","graduate")
~ "university")) %>%
select(id,group,age,race,marital.status,education,BMI,physical.exercise,sleep.hours.per.night,current.endometriosis.medication,n.surgeries,time.chronic.pain,
analgesic,anxiety,depression)
#table sociodemographic
#change default statistic in tableby
mycontrols= tableby.control( test=FALSE,
numeric.stats=c("mean","sd", "median", "q1q3"),
cat.stats=c("countpct"),
stats.labels=list(mean="Mean",sd="SD", median="Median", q1q3="Q1,Q3"))
mycontrols2= tableby.control(test=TRUE, total=FALSE,
numeric.test="wt", cat.test="chisq",
numeric.stats=c("mean","sd", "median", "q1q3"),
cat.stats=c("countpct"),
stats.labels=list(mean="Mean",sd="SD", median="Median", q1q3="Q1,Q3"))
#table1
tab1=tableby(group~age+race+marital.status+education+BMI+physical.exercise+sleep.hours.per.night+current.endometriosis.medication+n.surgeries+
time.chronic.pain+analgesic+anxiety+depression , data=sociodemographic, control=mycontrols2)
mylabels = list(age="age (y)", BMI="BMI (kg/m2)")
tab1=summary(tab1,text=TRUE, labelTranslations = mylabels)
tab1=as.data.frame(tab1)
tab1= tab1 %>%
rename(variables="") %>%
flextable(col_keys = c("variables","control (N=32)","intervention (N=31)","Total (N=63)","p value")) %>%
colformat_num(digits=2,j=c("variables","control (N=32)","intervention (N=31)","Total (N=63)","p value")) %>%
autofit()
#test difference in baseline per outcome variable
clinical.trial %>%
filter(time=="t1") %>%
select(id,group, sf.36.physical.sum,sf.36.mental.sum,sf_36_physical.functioning,
sf_36_limitations.physical.functioning,
sf_36_pain, sf_36_general.health, sf_36_vitality, sf_36_social.function, sf_36_emotional.role,
sf_36_mental.health) %>%
melt(id.vars=c("id","group")) %>%
group_by(variable) %>%
wilcox_test(value~group, detailed = TRUE) %>%
adjust_pvalue() %>%
add_significance("p") %>%
mutate(estimate=round(estimate,3),p=round(p,3),conf.low=round(conf.low,3),
conf.high=round(conf.high,3),p.adj=round(p.adj,3)) %>%
select(variable,estimate,statistic,p,conf.low,conf.high,p.adj) %>%
flextable() %>%
autofit()
clinical.trial %>%
filter(time=="t1") %>%
select(id,group, pelvic.pain,pain.unpleasantness,dysuria,dyspareunia,dyschezia,dysmenorrhea,PSS_10_total) %>%
melt(id.vars=c("id","group")) %>%
group_by(variable) %>%
wilcox_test(value~group, detailed = TRUE) %>%
adjust_pvalue() %>%
add_significance("p") %>%
mutate(estimate=round(estimate,3),p=round(p,3),conf.low=round(conf.low,3),
conf.high=round(conf.high,3),p.adj=round(p.adj,3)) %>%
select(variable,estimate,statistic,p,conf.low,conf.high,p.adj) %>%
flextable() %>%
autofit()
clinical.trial %>%
filter(time=="t1") %>%
select(id,group, FFMQ_total) %>%
melt(id.vars=c("id","group")) %>%
group_by(variable) %>%
t_test(value~group, detailed = TRUE) %>%
adjust_pvalue() %>%
add_significance("p") %>%
mutate(estimate=round(estimate,3),p=round(p,3),conf.low=round(conf.low,3),
conf.high=round(conf.high,3),p.adj=round(p.adj,3)) %>%
select(variable,estimate,statistic,p,conf.low,conf.high,p.adj) %>%
flextable() %>%
autofit()
#outcome variables between time and group
outcome.table=clinical.trial %>%
select(id,group,time, pelvic.pain,pain.unpleasantness,dysuria,dyspareunia,dyschezia,dysmenorrhea,PSS_10_total,
sf.36.physical.sum,sf.36.mental.sum,sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain, sf_36_general.health, sf_36_vitality, sf_36_social.function, sf_36_emotional.role,
sf_36_mental.health, FFMQ_total) %>%
pivot_wider(names_from = time,values_from=c(pelvic.pain,pain.unpleasantness,dysuria,dyspareunia,dyschezia,dysmenorrhea,PSS_10_total,
sf.36.physical.sum,sf.36.mental.sum,sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain, sf_36_general.health, sf_36_vitality, sf_36_social.function, sf_36_emotional.role,
sf_36_mental.health, FFMQ_total))
#table
tab2=tableby(group~pelvic.pain_t1+pelvic.pain_t2+pelvic.pain_t3+pain.unpleasantness_t1+pain.unpleasantness_t2+
pain.unpleasantness_t3+dysuria_t1+dysuria_t1+dysuria_t2+dysuria_t3+dyspareunia_t1+dyspareunia_t2+
dyspareunia_t3+dyschezia_t1+dyschezia_t2+dyschezia_t3+dysmenorrhea_t1+dysmenorrhea_t2+dysmenorrhea_t3+
PSS_10_total_t1+PSS_10_total_t2+PSS_10_total_t3+sf.36.physical.sum_t1+sf.36.physical.sum_t2+sf.36.physical.sum_t3+
sf.36.mental.sum_t1+sf.36.mental.sum_t2+sf.36.mental.sum_t3+sf_36_physical.functioning_t1+sf_36_physical.functioning_t2+
sf_36_physical.functioning_t1+sf_36_physical.functioning_t2+sf_36_physical.functioning_t3+
sf_36_limitations.physical.functioning_t1+sf_36_limitations.physical.functioning_t2+sf_36_limitations.physical.functioning_t3+
sf_36_pain_t1+sf_36_pain_t2+sf_36_pain_t3+sf_36_general.health_t1+sf_36_general.health_t2+sf_36_general.health_t3+
sf_36_general.health_t1+sf_36_general.health_t2+sf_36_general.health_t3+sf_36_vitality_t1+sf_36_vitality_t2+sf_36_vitality_t3+
sf_36_social.function_t1+sf_36_social.function_t2+sf_36_social.function_t3+sf_36_emotional.role_t1+sf_36_emotional.role_t2+
sf_36_emotional.role_t3+sf_36_mental.health_t1+sf_36_mental.health_t2+sf_36_mental.health_t3+FFMQ_total_t1+FFMQ_total_t2+
FFMQ_total_t3, data=outcome.table, control=mycontrols)
tab2=summary(tab2,text=TRUE)
tab2=as.data.frame(tab2)
tab2= tab2 %>%
rename(variables="") %>%
flextable(col_keys = c("variables","control (N=32)","intervention (N=31)","Total (N=63)","p value")) %>%
colformat_num(digits=2,j=c("variables","control (N=32)","intervention (N=31)","Total (N=63)","p value")) %>%
autofit()
#Meditation diary, mean of meditation time by week and total----
meditation.diary=read.xlsx("meditation.diary.all.xlsx")
names(meditation.diary)
hist(meditation.diary$mean_total)
meditation.diary %>%
pivot_longer(-id) %>%
group_by(name) %>%
filter(!is.na(value)) %>%
summarise(mean=mean(value),sd=sd(value))
# Missing by Time: T1, T2, T3 per variable----
missing.values.t1= clinical.trial %>%
filter(time=="t1") %>%
gather(key = "key", value = "val") %>%
mutate(is.missing = is.na(val)) %>%
group_by(key, is.missing) %>%
summarise(num.missing = n()) %>%
filter(is.missing==T) %>%
select(-is.missing) %>%
arrange(desc(num.missing))
missing.values.t2= clinical.trial %>%
filter(time=="t2") %>%
gather(key = "key", value = "val") %>%
mutate(is.missing = is.na(val)) %>%
group_by(key, is.missing) %>%
summarise(num.missing = n()) %>%
filter(is.missing==T) %>%
select(-is.missing) %>%
arrange(desc(num.missing))
missing.values.t3= clinical.trial %>%
filter(time=="t1") %>%
gather(key = "key", value = "val") %>%
mutate(is.missing = is.na(val)) %>%
group_by(key, is.missing) %>%
summarise(num.missing = n()) %>%
filter(is.missing==T) %>%
select(-is.missing) %>%
arrange(desc(num.missing))
missing.values.t1 %>%
ggplot() +
geom_bar(aes(x=key, y=num.missing), stat = 'identity') +
labs(x='variable', y="number of missing t1", title='Number of missing values') +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
missing.values.t2 %>%
ggplot() +
geom_bar(aes(x=key, y=num.missing), stat = 'identity') +
labs(x='variable', y="number of missing t2", title='Number of missing values') +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
missing.values.t3 %>%
ggplot() +
geom_bar(aes(x=key, y=num.missing), stat = 'identity') +
labs(x='variable', y="number of missing t3", title='Number of missing values') +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
#Percentage of missing per variable, total dataframe----
View(levels)
missing.values.percentage= clinical.trial %>%
gather(key = "key", value = "val") %>%
mutate(isna = is.na(val)) %>%
group_by(key) %>%
mutate(total = n()) %>%
group_by(key, total, isna) %>%
summarise(num.isna = n()) %>%
mutate(pct = num.isna / total * 100)
levels = (missing.values.percentage %>% filter(isna == T) %>% arrange(desc(pct)))$key
percentage.plot = missing.values.percentage %>%
ggplot() +
geom_bar(aes(x = reorder(key, desc(pct)),
y = pct, fill=isna),
stat = 'identity', alpha=0.8) +
scale_x_discrete(limits = levels) +
scale_fill_manual(name = "",
values = c('steelblue', 'tomato3'), labels = c("Present", "Missing")) +
coord_flip() +
labs(title = "Percentage of missing values", x =
'Variable', y = "% of missing values")
# Total missing by id----
row.plot = clinical.trial %>%
mutate(id = row_number()) %>%
gather(-id, key = "key", value = "val") %>%
mutate(isna = is.na(val)) %>%
ggplot(aes(key, id, fill = isna)) +
geom_raster(alpha=0.8) +
scale_fill_manual(name = "",
values = c('steelblue', 'tomato3'),
labels = c("Present", "Missing")) +
scale_x_discrete(limits = levels) +
labs(x = "Variable",
y = "Row Number", title = "Missing values in rows") +
scale_y_continuous(breaks= seq(0,174,by=20)) +
coord_flip()
# Sample size by time: t1, t2, t3----
sample.by.time= clinical.trial %>%
select (id, time, group) %>%
group_by(time, group) %>%
summarise(id = n()) %>%
rename(participants=id) %>%
flextable()
# Participants dropout by time: t1, t2, t3----
dropout= clinical.trial %>%
select (id, time, group) %>%
group_by(time, group) %>%
summarise(id = n()) %>%
pivot_wider(names_from = time,values_from = id) %>%
mutate(dropout.t1.t2=t1-t2, dropout.t1.t3=t1-t3) %>%
group_by(group) %>%
mutate(drop.out.t1.t2.percentage=dropout.t1.t2/t1*100,
drop.out.t1.t3.percentage=dropout.t1.t3/t1*100) %>%
select(dropout.t1.t2, dropout.t1.t3,drop.out.t1.t2.percentage ,
drop.out.t1.t3.percentage) %>%
mutate(drop.out.t1.t2.percentage=round(drop.out.t1.t2.percentage,2),
drop.out.t1.t3.percentage=round(drop.out.t1.t3.percentage,2)) %>%
flextable() %>%
set_header_labels(drop.out.t1.t2="n t1-t2", drop.out.t1.t3 = "n t1-t3",
drop.out.t1.t2.percentage = "% t1-t2",
drop.out.t1.t3.percentage = "% t1-t3") %>%
set_caption(caption= "Dropout")
# Endometriosis-related pain and perceived stress Distribution between group----
# raw scores
#t1
clinical.trial %>%
filter(time=="t1") %>%
select(group,pelvic.pain,dysuria,dyspareunia,dyschezia,dysmenorrhea,pain.unpleasantness,PSS_10_total) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
#t2
clinical.trial %>%
filter(time=="t2") %>%
select(group,pelvic.pain,dysuria,dyspareunia,dyschezia,dysmenorrhea,pain.unpleasantness,PSS_10_total) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
#t3
clinical.trial %>%
filter(time=="t3") %>%
select(group,pelvic.pain,dysuria,dyspareunia,dyschezia,dysmenorrhea,pain.unpleasantness,PSS_10_total) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
# Endometriosis-related pain and perceived stress distribution between group on gain scores----
#compute changes in variables of interest from t1 to t2 and t1 to t3
pain.PSS.gain=clinical.trial %>%
select(id, group, time,pelvic.pain,dysuria,dyspareunia,dyschezia,dysmenorrhea,pain.unpleasantness,
PSS_10_total) %>%
pivot_wider(names_from = time,values_from = c(pelvic.pain,dysuria,dyspareunia,
dyschezia,dysmenorrhea,pain.unpleasantness,PSS_10_total)) %>%
mutate (pelvic.pain.change.t1_t2 = pelvic.pain_t2 - pelvic.pain_t1,
dysuria.change.t1_t2 = dysuria_t2 - dysuria_t1,
dyspareunia.change.t1_t2 =dyspareunia_t2- dyspareunia_t1,
dyschezia.change.t1_t2 =dyschezia_t2- dyschezia_t1,
dysmenorrhea.change.t1_t2 =dysmenorrhea_t2 - dysmenorrhea_t1,
pain.unpleasantness.change.t1_t2 =pain.unpleasantness_t2 - pain.unpleasantness_t1,
PSS.change.t1_t2 =PSS_10_total_t2 - PSS_10_total_t1,
pelvic.pain.change.t1_t3 = pelvic.pain_t3 - pelvic.pain_t1,
dysuria.change.t1_t3 =dysuria_t3 - dysuria_t1,dyspareunia.change.t1_t3 = dyspareunia_t3 - dyspareunia_t1,
dyschezia.change.t1_t3 = dyschezia_t3 - dyschezia_t1,
dysmenorrhea.change.t1_t3 = dysmenorrhea_t3 - dysmenorrhea_t1,
pain.unpleasantness.change.t1_t3 =pain.unpleasantness_t3 - pain.unpleasantness_t1,
PSS.change.t1_t3 = PSS_10_total_t3 - PSS_10_total_t1)
#t1-t2
pain.PSS.gain %>%
select(group,pelvic.pain.change.t1_t2,dysuria.change.t1_t2,dyspareunia.change.t1_t2,dyschezia.change.t1_t2,
dysmenorrhea.change.t1_t2,dysmenorrhea.change.t1_t2,pain.unpleasantness.change.t1_t2,
PSS.change.t1_t2) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
#t1-t3
pain.PSS.gain %>%
select(group,pelvic.pain.change.t1_t3,
dyspareunia.change.t1_t3,dyschezia.change.t1_t3,dysmenorrhea.change.t1_t3,
pain.unpleasantness.change.t1_t3,PSS.change.t1_t3 ) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
# Multidimensional quality of life (SF_36) distribution----
#distribution between group t1
# Summary sf-36 physical and mental health
dist.sum.sf_36.t1=clinical.trial %>%
filter(time=="t1") %>%
select(group,sf.36.physical.sum,sf.36.mental.sum) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
#subscales
dist.sub.sf_36.t1=clinical.trial %>%
filter(time=="t1") %>%
select(group,sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain,sf_36_general.health,sf_36_vitality, sf_36_social.function,
sf_36_emotional.role, sf_36_mental.health) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())# sf_36_emotional.role, sf_36_limitation.physical.functioning
# with extremely low variance in control group
#distribution between group t2
# Summary sf-36 physical and mental health
dist.sum.sf_36.t2=clinical.trial %>%
filter(time=="t2") %>%
select(group,sf.36.physical.sum,sf.36.mental.sum) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
#subscales
dist.sub.sf_36.t2=clinical.trial %>%
filter(time=="t2") %>%
select(group,sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain,sf_36_general.health,sf_36_vitality, sf_36_social.function,
sf_36_emotional.role, sf_36_mental.health) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
#distribution between group t3
# Summary sf-36 physical and mental health
dist.sum.sf_36.t3=clinical.trial %>%
filter(time=="t3") %>%
select(group,sf.36.physical.sum,sf.36.mental.sum) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
#subscales
dist.sub.sf_36.t3=clinical.trial %>%
filter(time=="t3") %>%
select(group,sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain,sf_36_general.health,sf_36_vitality, sf_36_social.function,
sf_36_emotional.role, sf_36_mental.health) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
#compute changes in variables of interest from t1 to t2 and t1 to t3----
sf.36.gain=clinical.trial %>%
select(id, group, time,sf.36.physical.sum, sf.36.mental.sum, sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain,sf_36_general.health,sf_36_vitality, sf_36_social.function,
sf_36_emotional.role, sf_36_mental.health) %>%
pivot_wider(names_from = time,values_from = c( sf.36.physical.sum, sf.36.mental.sum,sf_36_physical.functioning,
sf_36_limitations.physical.functioning,
sf_36_pain,sf_36_general.health,sf_36_vitality, sf_36_social.function,
sf_36_emotional.role, sf_36_mental.health)) %>%
mutate (sf_36.physical.t1.t2=sf.36.physical.sum_t2-sf.36.physical.sum_t1,
sf.36.mental.sum.t1.t2=sf.36.mental.sum_t2-sf.36.mental.sum_t1,
sf_36_physical.functioning.t1_t2 = sf_36_physical.functioning_t2 - sf_36_physical.functioning_t1,
sf_36_limitations.physical.t1_t2 = sf_36_limitations.physical.functioning_t2 - sf_36_limitations.physical.functioning_t1,
sf_36_pain.t1_t2 =sf_36_pain_t2- sf_36_pain_t1,
sf_36_general.health.t1_t2 =sf_36_general.health_t2- sf_36_general.health_t1,
sf_36_vitality.t1_t2 =sf_36_vitality_t2 - sf_36_vitality_t1,
sf_36_social.function.t1_t2 =sf_36_social.function_t2 - sf_36_social.function_t1,
sf_36_emotional.role.t1_t2 =sf_36_emotional.role_t2 - sf_36_emotional.role_t1,
sf_36_mental.health.t1_t2 =sf_36_mental.health_t2 - sf_36_mental.health_t1,
sf_36.physical.t1.t3=sf.36.physical.sum_t3-sf.36.physical.sum_t1,
sf.36.mental.sum.t1.t3=sf.36.mental.sum_t3-sf.36.mental.sum_t1,
sf_36_physical.functioning.t1_t3 = sf_36_physical.functioning_t3 - sf_36_physical.functioning_t1,
sf_36_limitations.physical.t1_t3 = sf_36_limitations.physical.functioning_t3 - sf_36_limitations.physical.functioning_t1,
sf_36_pain.t1_t3 =sf_36_pain_t3- sf_36_pain_t1,
sf_36_general.health.t1_t3 =sf_36_general.health_t3- sf_36_general.health_t1,
sf_36_vitality.t1_t3 =sf_36_vitality_t3 - sf_36_vitality_t1,
sf_36_social.function.t1_t3 =sf_36_social.function_t3 - sf_36_social.function_t1,
sf_36_emotional.role.t1_t3 =sf_36_emotional.role_t3 - sf_36_emotional.role_t1,
sf_36_mental.health.t1_t3 =sf_36_mental.health_t3 - sf_36_mental.health_t1)
# Mindfulness (FFMQ) distribution----
#distribution between group and time
dist.FFMQ=clinical.trial %>%
select(group,time, FFMQ_total) %>%
pivot_longer(-group & -time) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ time, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
#compute changes in variables of interest from t1 to t2 and t1 to t3
FFMQ.gain=clinical.trial %>%
select(id, group, time, FFMQ_total) %>%
pivot_wider(names_from = time,values_from = c( FFMQ_total)) %>%
mutate (FFMQ_total.t1_t2 =t2 - t1,
FFMQ_total.t1_t3 =t3 - t1)
#distribution between group on gain score t1-t2
gain.FFMQ.t1.t2=FFMQ.gain %>%
select(group,FFMQ_total.t1_t2, FFMQ_total.t1_t3 ) %>%
pivot_longer(-group) %>%
ggplot(aes(x = group, y = value, color = group)) + facet_wrap(~ name, scales = "free", nrow = 4) +
coord_flip ()+
geom_boxplot() +
geom_jitter(shape = 10, position = position_jitter(0.1)) +
stat_summary(fun = median, geom ="pointrange",color = "black") +
ylab("Value") +
theme( axis.text.y = element_blank())
# Dropout analysis----
# Dropout analysis t2
View(drop.out.analysis)
drop.out.analysis=clinical.trial %>%
filter(time=="t1"| time== "t2") %>%
select(id, time,group,pelvic.pain,pain.unpleasantness,dysuria,dyspareunia,dyschezia,dysmenorrhea,
PSS_10_total,sf_36_physical.functioning,sf_36_limitations.physical.functioning,sf_36_pain,
sf_36_general.health,sf_36_vitality,sf_36_social.function,sf_36_emotional.role,sf_36_mental.health) %>%
pivot_wider(names_from=time, values_from = c(pelvic.pain,pain.unpleasantness,dysuria,dyspareunia,dyschezia,dysmenorrhea,
PSS_10_total,sf_36_physical.functioning,sf_36_limitations.physical.functioning,sf_36_pain,
sf_36_general.health,sf_36_vitality,sf_36_social.function,sf_36_emotional.role,sf_36_mental.health)) %>%
mutate(drop.out = is.na(pelvic.pain_t2)) %>%
mutate(drop.out = factor(drop.out, levels = c("FALSE", "TRUE"),
labels = c("no", "yes")))
# Dropout analysis t3
View(drop.out.analysis.t3)
drop.out.analysis.t3=clinical.trial %>%
filter(time=="t1"| time== "t3") %>%
select(id, time,group,pelvic.pain,pain.unpleasantness,dysuria,dyspareunia,dyschezia,dysmenorrhea,
PSS_10_total,sf_36_physical.functioning,sf_36_limitations.physical.functioning,sf_36_pain,
sf_36_general.health,sf_36_vitality,sf_36_social.function,sf_36_emotional.role,sf_36_mental.health) %>%
pivot_wider(names_from=time, values_from = c(pelvic.pain,pain.unpleasantness,dysuria,dyspareunia,dyschezia,dysmenorrhea,
PSS_10_total,sf_36_physical.functioning,sf_36_limitations.physical.functioning,sf_36_pain,
sf_36_general.health,sf_36_vitality,sf_36_social.function,sf_36_emotional.role,sf_36_mental.health)) %>%
mutate(drop.out = is.na(pelvic.pain_t3)) %>%
mutate(drop.out = factor(drop.out, levels = c("FALSE", "TRUE"),
labels = c("no", "yes")))
#Dropout analyses by variable using wilcoxon_t2
drop.out.test=drop.out.analysis %>%
select(id,group,drop.out,pelvic.pain_t1,dysuria_t1,dyspareunia_t1,
dyschezia_t1,dysmenorrhea_t1,pain.unpleasantness_t1) %>%
melt(id.vars=c("id", "drop.out","group")) %>%
group_by(variable) %>%
wilcox_test(value~drop.out, detailed = TRUE) %>%
adjust_pvalue() %>%
add_significance("p") %>%
mutate(estimate=round(estimate,3),p=round(p,3),conf.low=round(conf.low,3),
conf.high=round(conf.high,3),p.adj=round(p.adj,3)) %>%
select(variable,estimate,statistic,p,conf.low,conf.high,p.adj) %>%
flextable() %>%
autofit()
#Dropout analyses by condition using wilcoxon_t2
drop.out.test.condition=drop.out.analysis %>%
select(id,group,drop.out,pelvic.pain_t1,dysuria_t1,dyspareunia_t1,
dyschezia_t1,dysmenorrhea_t1,pain.unpleasantness_t1) %>%
melt(id.vars=c("id", "drop.out","group")) %>%
group_by(variable,group) %>%
wilcox_test(value~drop.out, detailed = TRUE) %>%
adjust_pvalue() %>%
add_significance("p") %>%
mutate(estimate=round(estimate,3),p=round(p,3),conf.low=round(conf.low,3),
conf.high=round(conf.high,3),p.adj=round(p.adj,3)) %>%
select(variable,estimate,statistic,p,conf.low,conf.high,p.adj) %>%
flextable() %>%
autofit()
#logistic regression enter outcomes and condition as predictors of dropout----
#t2----
pelvic.pain.drop.out.log=glm(drop.out~pelvic.pain_t1+group, data = drop.out.analysis, family = binomial)
summary(pelvic.pain.drop.out.log)
pelvic.pain.drop.out.log=tbl_regression(pelvic.pain.drop.out.log, exponentiate = TRUE)
unpleasantness.drop.out.log=glm(drop.out~pain.unpleasantness_t1+group, data = drop.out.analysis, family = binomial)
summary(unpleasantness.drop.out.log)
unpleasantness.drop.out.log=tbl_regression(unpleasantness.drop.out.log, exponentiate = TRUE)
dysuria.drop.out.log=glm(drop.out~dysuria_t1+group, data = drop.out.analysis, family = binomial)
summary(dysuria.drop.out.log)
dysuria.drop.out.log=tbl_regression(dysuria.drop.out.log, exponentiate = TRUE)
dyschezia.drop.out.log=glm(drop.out~dyschezia_t1+group, data = drop.out.analysis, family = binomial)
summary(dyschezia.drop.out.log)
dyschezia.drop.out.log=tbl_regression(dyschezia.drop.out.log, exponentiate = TRUE)
dyspareunia.drop.out.log=glm(drop.out~dyspareunia_t1+group, data = drop.out.analysis, family = binomial)
summary(dyspareunia.drop.out.log)
dyspareunia.drop.out.log=tbl_regression(dyspareunia.drop.out.log, exponentiate = TRUE)
dysmenorrhea.drop.out.log=glm(drop.out~dysmenorrhea_t1+group, data = drop.out.analysis, family = binomial)
summary(dysmenorrhea.drop.out.log)
dysmenorrhea.drop.out.log=tbl_regression(dysmenorrhea.drop.out.log, exponentiate = TRUE)
ps.drop.out.log=glm(drop.out~PSS_10_total_t1+group, data = drop.out.analysis, family = binomial)
summary(ps.drop.out.log)
ps.drop.out.log=tbl_regression(ps.drop.out.log, exponentiate = TRUE)
physical.drop.out.log=glm(drop.out~sf_36_physical.functioning_t1+group, data = drop.out.analysis, family = binomial)
summary(physical.drop.out.log)
physical.drop.out.log=tbl_regression(physical.drop.out.log, exponentiate = TRUE)
lim.physical.drop.out.log=glm(drop.out~sf_36_limitations.physical.functioning_t1+group, data = drop.out.analysis, family = binomial)
summary(lim.physical.drop.out.log)
lim.physical.drop.out.log=tbl_regression(lim.physical.drop.out.log, exponentiate = TRUE)
pain.drop.out.log=glm(drop.out~sf_36_pain_t1+group, data = drop.out.analysis, family = binomial)
summary(pain.drop.out.log)
pain.drop.out.log=tbl_regression(pain.drop.out.log, exponentiate = TRUE)
health.drop.out.log=glm(drop.out~sf_36_general.health_t1+group, data = drop.out.analysis, family = binomial)
summary(health.drop.out.log)
health.drop.out.log=tbl_regression(health.drop.out.log, exponentiate = TRUE)
vitality.drop.out.log=glm(drop.out~sf_36_vitality_t1+group, data = drop.out.analysis, family = binomial)
summary(vitality.drop.out.log)
vitality.drop.out.log=tbl_regression(vitality.drop.out.log, exponentiate = TRUE)
social.drop.out.log=glm(drop.out~sf_36_social.function_t1+group, data = drop.out.analysis, family = binomial)
summary(social.drop.out.log)
social.drop.out.log=tbl_regression(social.drop.out.log, exponentiate = TRUE)
emotional.drop.out.log=glm(drop.out~sf_36_emotional.role_t1+group, data = drop.out.analysis, family = binomial)
summary(emotional.drop.out.log)
emotional.drop.out.log=tbl_regression(emotional.drop.out.log, exponentiate = TRUE)
mental.drop.out.log=glm(drop.out~sf_36_mental.health_t1+group, data = drop.out.analysis, family = binomial)
summary(mental.drop.out.log)
mental.drop.out.log=tbl_regression(mental.drop.out.log, exponentiate = TRUE)
# t3----
pelvic.pain.drop.out.log3=glm(drop.out~pelvic.pain_t1+group, data = drop.out.analysis.t3, family = binomial)#pelvic pain predicted drop out
summary(pelvic.pain.drop.out.log3)
pelvic.pain.drop.out.log3=tbl_regression(pelvic.pain.drop.out.log3, exponentiate = TRUE)
unpleasantness.drop.out.log3=glm(drop.out~pain.unpleasantness_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(unpleasantness.drop.out.log3)
unpleasantness.drop.out.log3=tbl_regression(unpleasantness.drop.out.log3, exponentiate = TRUE)
dysuria.drop.out.log3=glm(drop.out~dysuria_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(dysuria.drop.out.log3)
dysuria.drop.out.log3=tbl_regression(dysuria.drop.out.log3, exponentiate = TRUE)
dyschezia.drop.out.log3=glm(drop.out~dyschezia_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(dyschezia.drop.out.log3)
dyschezia.drop.out.log3=tbl_regression(dyschezia.drop.out.log3, exponentiate = TRUE)
dyspareunia.drop.out.log3=glm(drop.out~dyspareunia_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(dyspareunia.drop.out.log3)
dyspareunia.drop.out.log3=tbl_regression(dyspareunia.drop.out.log3, exponentiate = TRUE)
dysmenorrhea.drop.out.log3=glm(drop.out~dysmenorrhea_t1+group, data = drop.out.analysis.t3, family = binomial)#dysmenorrhea predicted drop out
summary(dysmenorrhea.drop.out.log3)
dysmenorrhea.drop.out.log3=tbl_regression(dysmenorrhea.drop.out.log3, exponentiate = TRUE)
ps.drop.out.log3=glm(drop.out~PSS_10_total_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(ps.drop.out.log3)
ps.drop.out.log3=tbl_regression(ps.drop.out.log3, exponentiate = TRUE)
physical.drop.out.log3=glm(drop.out~sf_36_physical.functioning_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(physical.drop.out.log3)
physical.drop.out.log3=tbl_regression(physical.drop.out.log3, exponentiate = TRUE)
lim.physical.drop.out.log3=glm(drop.out~sf_36_limitations.physical.functioning_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(lim.physical.drop.out.log3)
lim.physical.drop.out.log3=tbl_regression(lim.physical.drop.out.log3, exponentiate = TRUE)
pain.drop.out.log3=glm(drop.out~sf_36_pain_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(pain.drop.out.log3)
pain.drop.out.log3=tbl_regression(pain.drop.out.log3, exponentiate = TRUE)
health.drop.out.log3=glm(drop.out~sf_36_general.health_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(health.drop.out.log3)
health.drop.out.log3=tbl_regression(health.drop.out.log3, exponentiate = TRUE)
vitality.drop.out.log3=glm(drop.out~sf_36_vitality_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(vitality.drop.out.log3)
vitality.drop.out.log3=tbl_regression(vitality.drop.out.log3, exponentiate = TRUE)
social.drop.out.log3=glm(drop.out~sf_36_social.function_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(social.drop.out.log3)
social.drop.out.log3=tbl_regression(social.drop.out.log3, exponentiate = TRUE)
emotional.drop.out.log3=glm(drop.out~sf_36_emotional.role_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(emotional.drop.out.log3)
emotional.drop.out.log3=tbl_regression(emotional.drop.out.log3, exponentiate = TRUE)
mental.drop.out.log3=glm(drop.out~sf_36_mental.health_t1+group, data = drop.out.analysis.t3, family = binomial)
summary(mental.drop.out.log3)
mental.drop.out.log3=tbl_regression(mental.drop.out.log3, exponentiate = TRUE)
#table----
library(gtsummary)
tbl_p.pain =
tbl_merge(tbls = list(pelvic.pain.drop.out.log, pelvic.pain.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_unpleasantness =
tbl_merge(tbls = list(unpleasantness.drop.out.log, unpleasantness.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_dysuria =
tbl_merge(tbls = list(dysuria.drop.out.log, dysuria.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_dyschezia =
tbl_merge(tbls = list(dyschezia.drop.out.log, dyschezia.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_dyspareunia =
tbl_merge(tbls = list(dyspareunia.drop.out.log, dyspareunia.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_dysmenorrhea =
tbl_merge(tbls = list(dysmenorrhea.drop.out.log, dysmenorrhea.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_ps =
tbl_merge(tbls = list(ps.drop.out.log, ps.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_physical =
tbl_merge(tbls = list(physical.drop.out.log, physical.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_lim.physical =
tbl_merge(tbls = list(lim.physical.drop.out.log, lim.physical.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_pain =
tbl_merge(tbls = list(pain.drop.out.log, pain.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_health =
tbl_merge(tbls = list(health.drop.out.log, health.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_vitality =
tbl_merge(tbls = list(vitality.drop.out.log, vitality.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_social =
tbl_merge(tbls = list(social.drop.out.log, social.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_emotional =
tbl_merge(tbls = list(emotional.drop.out.log, emotional.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_mental =
tbl_merge(tbls = list(mental.drop.out.log, mental.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
#Join tables----
tbl_stack(list(tbl_p.pain,tbl_unpleasantness,tbl_dysuria,tbl_dyschezia,tbl_dyspareunia,tbl_dysmenorrhea,tbl_ps,tbl_physical,
tbl_lim.physical,tbl_pain,tbl_health,tbl_vitality,tbl_social,
tbl_emotional,tbl_mental))
#Sociodemographic dropout----
#t2
drop.out.analysis.socio=drop.out.analysis %>%
select(drop.out)
sociodemographic.drop.out=sociodemographic %>%
bind_cols(drop.out.analysis.socio)
#t3
drop.out.analysis.t3.socio=drop.out.analysis.t3 %>%
select(drop.out)
sociodemographic.drop.out3=sociodemographic %>%
bind_cols(drop.out.analysis.t3.socio)
#logistic regression
#t2
age.drop.out.log=glm(drop.out~age+group, data = sociodemographic.drop.out, family = binomial)
summary(age.drop.out.log)
age.drop.out.log=tbl_regression(age.drop.out.log, exponentiate = TRUE)
education.drop.out.log=glm(drop.out~education+group, data = sociodemographic.drop.out, family = binomial)
summary(education.drop.out.log)
education.drop.out.log=tbl_regression(education.drop.out.log, exponentiate = TRUE)
marital.status.drop.out.log=glm(drop.out~marital.status+group, data = sociodemographic.drop.out, family = binomial)
summary(marital.status.drop.out.log)
marital.status.drop.out.log=tbl_regression(marital.status.drop.out.log, exponentiate = TRUE)
BMI.drop.out.log=glm(drop.out~BMI+group, data = sociodemographic.drop.out, family = binomial)
summary(BMI.drop.out.log)
BMI.drop.out.log=tbl_regression(BMI.drop.out.log, exponentiate = TRUE)
physical.exercise.drop.out.log=glm(drop.out~physical.exercise+group, data = sociodemographic.drop.out, family = binomial)
summary(physical.exercise.drop.out.log)
physical.exercise.drop.out.log=tbl_regression(physical.exercise.drop.out.log, exponentiate = TRUE)
current.endometriosis.medication.drop.out.log=glm(drop.out~current.endometriosis.medication+group, data = sociodemographic.drop.out, family = binomial)
summary(current.endometriosis.medication.drop.out.log)
current.endometriosis.medication.drop.out.log=tbl_regression(current.endometriosis.medication.drop.out.log, exponentiate = TRUE)
sleep.hours.per.night.drop.out.log=glm(drop.out~sleep.hours.per.night+group, data = sociodemographic.drop.out, family = binomial)
summary(sleep.hours.per.night.drop.out.log)
sleep.hours.per.night.drop.out.log=tbl_regression(sleep.hours.per.night.drop.out.log, exponentiate = TRUE)
time.chronic.pain.drop.out.log=glm(drop.out~time.chronic.pain+group, data = sociodemographic.drop.out, family = binomial)
summary(time.chronic.pain.drop.out.log)
time.chronic.pain.drop.out.log=tbl_regression(time.chronic.pain.drop.out.log, exponentiate = TRUE)
anxiety.drop.out.log=glm(drop.out~anxiety+group, data = sociodemographic.drop.out, family = binomial)
summary(anxiety.drop.out.log)
anxiety.drop.out.log=tbl_regression(anxiety.drop.out.log, exponentiate = TRUE)
depression.drop.out.log=glm(drop.out~depression+group, data = sociodemographic.drop.out, family = binomial)
summary(depression.drop.out.log)
depression.drop.out.log=tbl_regression(depression.drop.out.log, exponentiate = TRUE)
#t3
age.drop.out.log3=glm(drop.out~age+group, data = sociodemographic.drop.out3, family = binomial)#marginally significant
summary(age.drop.out.log3)
age.drop.out.log3=tbl_regression(age.drop.out.log3, exponentiate = TRUE)
education.drop.out.log3=glm(drop.out~education+group, data = sociodemographic.drop.out3, family = binomial)
summary(education.drop.out.log3)
education.drop.out.log3=tbl_regression(education.drop.out.log3, exponentiate = TRUE)
marital.status.drop.out.log3=glm(drop.out~marital.status+group, data = sociodemographic.drop.out3, family = binomial)
summary(marital.status.drop.out.log3)
marital.status.drop.out.log3=tbl_regression(marital.status.drop.out.log3, exponentiate = TRUE)
BMI.drop.out.log3=glm(drop.out~BMI+group, data = sociodemographic.drop.out3, family = binomial)
summary(BMI.drop.out.log3)
BMI.drop.out.log3=tbl_regression(BMI.drop.out.log3, exponentiate = TRUE)
physical.exercise.drop.out.log3=glm(drop.out~physical.exercise+group, data = sociodemographic.drop.out3, family = binomial)
summary(physical.exercise.drop.out.log3)
physical.exercise.drop.out.log3=tbl_regression(physical.exercise.drop.out.log3, exponentiate = TRUE)
current.endometriosis.medication.drop.out.log3=glm(drop.out~current.endometriosis.medication+group, data = sociodemographic.drop.out3, family = binomial)
summary(current.endometriosis.medication.drop.out.log3)
current.endometriosis.medication.drop.out.log3=tbl_regression(current.endometriosis.medication.drop.out.log3, exponentiate = TRUE)
sleep.hours.per.night.drop.out.log3=glm(drop.out~sleep.hours.per.night+group, data = sociodemographic.drop.out3, family = binomial)
summary(sleep.hours.per.night.drop.out.log3)
sleep.hours.per.night.drop.out.log3=tbl_regression(sleep.hours.per.night.drop.out.log3, exponentiate = TRUE)
time.chronic.pain.drop.out.log3=glm(drop.out~time.chronic.pain+group, data = sociodemographic.drop.out3, family = binomial)
summary(time.chronic.pain.drop.out.log3)
time.chronic.pain.drop.out.log3=tbl_regression(time.chronic.pain.drop.out.log3, exponentiate = TRUE)
anxiety.drop.out.log3=glm(drop.out~anxiety+group, data = sociodemographic.drop.out3, family = binomial)
summary(anxiety.drop.out.log3)
anxiety.drop.out.log3=tbl_regression(anxiety.drop.out.log3, exponentiate = TRUE)
depression.drop.out.log3=glm(drop.out~depression+group, data = sociodemographic.drop.out3, family = binomial)
summary(depression.drop.out.log3)
depression.drop.out.log3=tbl_regression(depression.drop.out.log3, exponentiate = TRUE)
#table----
tbl_age =
tbl_merge(tbls = list(age.drop.out.log,age.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_education =
tbl_merge(tbls = list(education.drop.out.log,education.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_marital.status =
tbl_merge(tbls = list(marital.status.drop.out.log,marital.status.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_BMI =
tbl_merge(tbls = list(BMI.drop.out.log,BMI.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_physical.exercise =
tbl_merge(tbls = list(physical.exercise.drop.out.log,physical.exercise.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_sleep.hours.per.night =
tbl_merge(tbls = list(sleep.hours.per.night.drop.out.log,sleep.hours.per.night.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_time.chronic.pain =
tbl_merge(tbls = list(time.chronic.pain.drop.out.log,time.chronic.pain.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_anxiety =
tbl_merge(tbls = list(anxiety.drop.out.log,anxiety.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
tbl_depression =
tbl_merge(tbls = list(depression.drop.out.log,depression.drop.out.log3),
tab_spanner = c("**Time 1**", "**Time 2**"))
#join tables----
tbl_stack(list(tbl_age ,tbl_education ,tbl_marital.status ,tbl_BMI ,
tbl_physical.exercise ,tbl_sleep.hours.per.night ,
tbl_time.chronic.pain ,tbl_anxiety ,tbl_depression))
#multiple imputation endometriosis-related pain variables----
clinical.trial.pain2=clinical.trial %>%
select(id,group,time,pelvic.pain,dysuria,dyspareunia,dyschezia, dysmenorrhea,
pain.unpleasantness) %>%
pivot_wider(names_from = time,values_from=c(pelvic.pain,dysuria,dyspareunia,dyschezia, dysmenorrhea,
pain.unpleasantness)) %>%
dummy_cols(select_columns ="group") %>%
select(-group_control,-group)
#correlation between variables
triangular.cor.pain=clinical.trial.pain2 %>%
select(-id,-group) %>%
lowerCor(use = "complete.obs", method="spearman")
#plot missing pattern
md.pattern(clinical.trial.pain2)
md.pairs(clinical.trial.pain2)
pain_plot.missing.pain = aggr(clinical.trial.pain2 , col=c('navyblue','yellow'),
numbers=TRUE, sortVars=TRUE,
labels=names(clinical.trial.pain2 ), cex.axis=.7,
gap=3, ylab=c("Missing data","Pattern"))
#construct the predictor matrix setting: -2 to indicate the cluster variable, 1 imputation model with a fixed effect and a random intercept(default)
pain.p.matrix=make.predictorMatrix(clinical.trial.pain2)
pain.p.matrix[,"group_intervention"]=-2
imputed_pain2=mice(clinical.trial.pain2, m=5, predictorMatrix = pain.p.matrix, seed=125)
summary(imputed_pain2)
#multiple imputation perceived stress variable----
clinical.trial.stress=clinical.trial %>%
select(id,group,time,pelvic.pain,pain.unpleasantness,PSS_10_total) %>%
pivot_wider(names_from = time,values_from=c(pelvic.pain,pain.unpleasantness,PSS_10_total)) %>%
dummy_cols(select_columns ="group") %>%
select(-group_control,-group)
names(clinical.trial.stress)
#correlation between variables
triangular.cor.pain.stress=clinical.trial.stress %>%
select(-id,-group) %>%
lowerCor(use = "complete.obs", method="spearman")
#plot missing pattern
md.pattern( clinical.trial.stress)
md.pairs( clinical.trial.stress)
stress_plot.missing.stress = aggr( clinical.trial.stress, col=c('navyblue','yellow'),
numbers=TRUE, sortVars=TRUE,
labels=names( clinical.trial.stress), cex.axis=.7,
gap=3, ylab=c("Missing data","Pattern"))
#construct the predictor matrix setting: -2 to indicate the cluster variable, 1 imputation model with a fixed effect and a random intercept(default)
stress.p.matrix=make.predictorMatrix(clinical.trial.stress)
stress.p.matrix[,"group_intervention"]=-2
imputed_stress=mice(clinical.trial.stress, m=5, predictorMatrix = stress.p.matrix, seed=124)
summary(imputed_stress)
#multiple imputation sf_36----
clinical.trial.impute.sf.36=clinical.trial %>%
select(id,group,time, sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain, sf_36_general.health,sf_36_vitality,sf_36_social.function,
sf_36_emotional.role,sf_36_mental.health) %>%
pivot_wider(names_from = time,values_from=c(sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain, sf_36_general.health,sf_36_vitality,sf_36_social.function,
sf_36_emotional.role,sf_36_mental.health)) %>%
dummy_cols(select_columns ="group") %>%
select(-group_control,-group)
clinical.trial.impute.sf.36.wcx=clinical.trial %>%
select(id,group,time, sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain, sf_36_general.health,sf_36_vitality,sf_36_social.function,
sf_36_emotional.role,sf_36_mental.health) %>%
pivot_wider(names_from = time,values_from=c(sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain, sf_36_general.health,sf_36_vitality,sf_36_social.function,
sf_36_emotional.role,sf_36_mental.health))
#summary physical and mental health
clinical.trial.impute.sf.36.sum=clinical.trial %>%
select(id,group,time, sf.36.physical.sum,sf.36.mental.sum) %>%
pivot_wider(names_from = time,values_from=c( sf.36.physical.sum,sf.36.mental.sum)) %>%
dummy_cols(select_columns ="group") %>%
select(-group_control,-group)
#correlation between variables
triangular.cor.sf.36=clinical.trial.impute.sf.36 %>%
select(-id,-group_intervention) %>%
lowerCor(use = "complete.obs", method="spearman")
triangular.cor.sf.36.sum=clinical.trial.impute.sf.36.sum %>%
select(-id,-group_intervention) %>%
lowerCor(use = "complete.obs", method="spearman")
#plot missing pattern
md.pattern( clinical.trial.impute.sf.36)
md.pairs( clinical.trial.impute.sf.36)
md.pattern( clinical.trial.impute.sf.36.sum)
md.pairs( clinical.trial.impute.sf.36.sum)
plot.missing.sf.36 = aggr( clinical.trial.impute.sf.36, col=c('navyblue','yellow'),
numbers=TRUE, sortVars=TRUE,
labels=names( clinical.trial.impute.sf.36), cex.axis=.7,
gap=3, ylab=c("Missing data","Pattern"))
plot.missing.sf.36.sum = aggr( clinical.trial.impute.sf.36.sum, col=c('navyblue','yellow'),
numbers=TRUE, sortVars=TRUE,
labels=names( clinical.trial.impute.sf.36.sum), cex.axis=.7,
gap=3, ylab=c("Missing data","Pattern"))
#construct the predictor matrix setting: -2 to indicate the cluster variable, 1 imputation model with a fixed effect and a random intercept(default)
sf.36.p.matrix=make.predictorMatrix( clinical.trial.impute.sf.36)
sf.36.p.matrix[,"group_intervention"]=-2
imputed_sf.36=mice(clinical.trial.impute.sf.36, m=5, predictorMatrix = sf.36.p.matrix, seed=123)
summary(imputed_sf.36)
sf.36.p.matrix.sum=make.predictorMatrix( clinical.trial.impute.sf.36.sum)
sf.36.p.matrix.sum[,"group_intervention"]=-2
imputed_sf.36.sum=mice(clinical.trial.impute.sf.36.sum, m=5, predictorMatrix = sf.36.p.matrix.sum,seed=128)
summary(imputed_sf.36)
#imputed to wilcox test
impute.sf.36.wcx=mice(clinical.trial.impute.sf.36.wcx, seed = 167)
summary(impute.sf.36.wcx)
#Robust linear (MM-type estimators) raw scores, imputed data and Ordinary Least Squares without outliers models on endometriosis-related pain ----
# Evaluate the presence of near zero variance endometriosis related pain---
#convert data from long to wide by time and group
var.test.pain= clinical.trial %>%
select(id, group, time, pelvic.pain, dysuria,dyschezia,dyspareunia,dysmenorrhea,pain.unpleasantness,PSS_10_total) %>%
pivot_wider(names_from = time|group,values_from = c( pelvic.pain, dysuria,dyschezia,dyspareunia,
dysmenorrhea,pain.unpleasantness,PSS_10_total))
nearzero.pain=var.test.pain %>%
nearZeroVar(saveMetrics = TRUE)
which(nearzero.pain$zeroVar=='TRUE') # variable with zero var
which(nearzero.pain$nzv=='TRUE') # variable with near zero var
#no near zero variable
#pelvic pain----
cor_pelvic.pain=clinical.trial %>%
select(id,group,time,pelvic.pain) %>%
pivot_wider(names_from = time, values_from = pelvic.pain)
plot.pelvic.pain=cor_pelvic.pain %>%
select(group,t1,t2,t3)
library("GGally")
install.packages("effectsize")
library(effectsize)
ggpairs(plot.pelvic.pain, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#t2
#robust
p.pain.lm.adj2=lmrob(t2~t1+group, data = cor_pelvic.pain)
summary(p.pain.lm.adj2)
effectsize(p.pain.lm.adj2)
par(mfrow=c(2,2))
plot(p.pain.lm.adj2)
tidy(p.pain.lm.adj2, conf.int = TRUE)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
cor_pelvic.pain2.out=cor_pelvic.pain[-c(48,49),]#without outlier
p.pain.lm.adj2.out=lm(t2~t1+group, data = cor_pelvic.pain2.out)
summary(p.pain.lm.adj2.out)
par(mfrow=c(2,2))
plot(p.pain.lm.adj2.out)
plot(p.pain.lm.adj2.out,4)
#t3
p.pain.lm.adj3=lmrob(t3~t1+group, data = cor_pelvic.pain)
summary(p.pain.lm.adj3)
par(mfrow=c(2,2))
plot(p.pain.lm.adj3)
#cook's distance 4/(45-2-1)=0,09
cor_pelvic.pain3.out=cor_pelvic.pain[-c(58,54),]#without outlier
p.pain.lm.adj3.out=lm(t3~t1+group, data = cor_pelvic.pain3.out)
summary(p.pain.lm.adj3.out)
par(mfrow=c(2,2))
plot(p.pain.lm.adj3.out,id.n=5)
plot(p.pain.lm.adj3.out,4,id.n=5)
#pelvic pain imputed dataframe----
names(clinical.trial.imputed.pain)
#t2
p.pain.lm.imputed=with(imputed_pain2,lmrob(pelvic.pain_t2~pelvic.pain_t1+group_intervention))
summary(p.pain.lm.imputed)
p.pain.lm.imputed.pool=summary(pool(p.pain.lm.imputed), conf.int = TRUE)
tibble(p.pain.lm.imputed.pool)
#t3
p.pain.lm.imputed.t3=with(imputed_pain2,lmrob(pelvic.pain_t3~pelvic.pain_t1+group_intervention))
summary(p.pain.lm.imputed.t3)
p.pain.lm.imputed.pool.t3=summary(pool(p.pain.lm.imputed.t3), conf.int = TRUE)
tibble(p.pain.lm.imputed.pool.t3)
#dysuria----
cor_dysuria=clinical.trial %>%
select(id,group,time,dysuria) %>%
pivot_wider(names_from = time, values_from = dysuria)
plot.dysuria=cor_dysuria %>%
select(group,t1,t2,t3)
GGally::ggpairs(plot.dysuria, ggplot2::aes(colour=group)) #evaluate correlation between group and times
#robust
#t2
dysuria.lm.adj2=lmrob(t2~t1+group, data = cor_dysuria)
summary(dysuria.lm.adj2)
plot(dysuria.lm.adj2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(53-2-1)=0,08
cor_dysuria2.out=cor_dysuria[-c(6,15,62,40),]#without outlier
dysuria.lm.adj2.out=lm(t2~t1+group, data = cor_dysuria2.out)
summary(dysuria.lm.adj2.out)
par(mfrow=c(2,2))
plot(dysuria.lm.adj2.out, id.n=5)
plot(dysuria.lm.adj2.out,4, id.n=5)
#t3
#robust
dysuria.lm.adj3=lmrob(t3~t1+group, data = cor_dysuria)
summary(dysuria.lm.adj3)
plot(dysuria.lm.adj3)
#lm
#cook's distance 4/(46-2-1)=0,09
cor_dysuria3.out=cor_dysuria[-c(15,40, 58,42),]#without outlier
dysuria.lm.adj3.out=lm(t3~t1+group, data = cor_dysuria3.out)
summary(dysuria.lm.adj3.out)
par(mfrow=c(2,2))
plot(dysuria.lm.adj3.out, id.n=5)
plot(dysuria.lm.adj3.out,4, id.n=6)
#dysuria imputed dataframe----
#t2
dysuria.lm.imputed=with(imputed_pain2,lmrob(dysuria_t2~dysuria_t1+group_intervention))
summary(dysuria.lm.imputed)
dysuria.lm.imputed.pool=summary(pool(dysuria.lm.imputed), conf.int = TRUE)
tibble(dysuria.lm.imputed.pool)
#t3
dysuria.lm.imputed.t3=with(imputed_pain2,lmrob(dysuria_t3~dysuria_t1+group_intervention))
summary(dysuria.lm.imputed.t3)
dysuria.lm.imputed.pool.t3=summary(pool(dysuria.lm.imputed.t3), conf.int = TRUE)# marginally significant
#dyspareunia----
cor_dyspareunia=clinical.trial %>%
select(id,group,time,dyspareunia) %>%
pivot_wider(names_from = time, values_from = dyspareunia)
plot.dyspareunia=cor_dyspareunia %>%
select(group,t1,t2,t3)
GGally::ggpairs(plot.dyspareunia, ggplot2::aes(colour=group)) #evaluate correlation between group and times
#model adjusted for baseline
#t2
#robust
dyspareunia.lm.adj2=lmrob(t2~t1+group, data = cor_dyspareunia)
summary(dyspareunia.lm.adj2)
plot(dyspareunia.lm.adj2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(54-2-1)=0,07
cor_dyspareunia2.out=cor_dyspareunia[-c(10,45),]#without outlier
dyspareunia.lm.adj2.out=lm(t2~t1+group, data = cor_dyspareunia2.out)
summary(dyspareunia.lm.adj2.out)
par(mfrow=c(2,2))
plot(dyspareunia.lm.adj2.out, id.n=5)
plot(dyspareunia.lm.adj2.out, 4)
#t3
#robust
dyspareunia.lm.adj3=lmrob(t3~t1+group, data = cor_dyspareunia)
summary(dyspareunia.lm.adj3)
plot(dyspareunia.lm.adj3)
#lm without outlier
#4/(46-2-1)=0,09
cor_dyspareunia3=cor_dyspareunia[-c(54,32,4),]#without outlier
dyspareunia.lm.adj3.out=lm(t3~t1+group, data = cor_dyspareunia3)
summary(dyspareunia.lm.adj3.out)
par(mfrow=c(2,2))
plot(dyspareunia.lm.adj3.out)
plot(dyspareunia.lm.adj3.out,4)
#dyspareunia imputed dataframe----
#t2
dyspareunia.lm.imputed=with(imputed_pain2,lmrob(dyspareunia_t2~dyspareunia_t1+group_intervention))
summary(dyspareunia.lm.imputed)
dyspareunia.lm.imputed.pool=summary(pool(dyspareunia.lm.imputed), conf.int = TRUE)
tibble(dyspareunia.lm.imputed.pool)
#t3
dyspareunia.lm.imputed.t3=with(imputed_pain2,lm(dyspareunia_t3~dyspareunia_t1+group_intervention))
summary(dyspareunia.lm.imputed.t3)
dyspareunia.lm.imputed.pool.t3=summary(pool(dyspareunia.lm.imputed.t3), conf.int = TRUE)
#dyschezia----
cor_dyschezia=clinical.trial %>%
select(id,group,time,dyschezia) %>%
pivot_wider(names_from = time, values_from = dyschezia)
plot.dyschezia=cor_dyschezia %>%
select(group,t1,t2,t3)
GGally::ggpairs(plot.dyschezia, ggplot2::aes(colour=group)) #evaluate correlation between group and times
#model adjusted for baseline
#t2
#robust
dyschezia.lm.adj2=lmrob(t2~t1+group, data = cor_dyschezia)
summary(dyschezia.lm.adj2)
par(mfrow=c(2,2))
plot(dyschezia.lm.adj2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(56-2-1)=0,07
cor_dyschezia2=cor_dyschezia[-c(40,32,47),]#without outlier
dyschezia.lm.adj.out=lm(t2~t1+group, data = cor_dyschezia2)
summary(dyschezia.lm.adj.out)
par(mfrow=c(2,2))
plot(dyschezia.lm.adj.out, id.n=5)
plot(dyschezia.lm.adj.out, 4)
#t3
#robust
dyschezia.lm.adj3=lmrob(t3~t1+group, data = cor_dyschezia)
summary(dyschezia.lm.adj3)
plot(dyschezia.lm.adj3)
#lm without outlier 4/(56-2-1)=0,07
cor_dyschezia3=cor_dyschezia[-c(8,13,32,40,44),]#without outlier
dyschezia.lm.adj3.out=lm(t3~t1+group, data = cor_dyschezia3)
summary(dyschezia.lm.adj3.out)
par(mfrow=c(2,2))
plot(dyschezia.lm.adj3.out, id.n=5)
plot(dyschezia.lm.adj3.out,4)
#dyschezia imputed dataframe----
#t2
dyschezia.lm.imputed=with(imputed_pain2,lmrob(dyschezia_t2~dyschezia_t1+group_intervention))
summary(dyschezia.lm.imputed)
dyschezia.lm.imputed.pool=summary(pool(dyschezia.lm.imputed), conf.int = TRUE)
tibble(dyschezia.lm.imputed.pool)
#t3
dyschezia.lm.imputed.t3=with(imputed_pain2,lm(dyschezia_t3~dyschezia_t1+group_intervention))
summary(dyschezia.lm.imputed.t3)
dyschezia.lm.imputed.pool.t3=summary(pool(dyschezia.lm.imputed.t3), conf.int = TRUE)
#dysmenorrhea----
cor_dysmenorrhea=clinical.trial %>%
select(id,group,time,dysmenorrhea) %>%
pivot_wider(names_from = time, values_from = dysmenorrhea)
plot.dysmenorrhea=cor_dysmenorrhea %>%
select(group,t1,t2,t3)
GGally::ggpairs(plot.dysmenorrhea, ggplot2::aes(colour=group)) #evaluate correlation between group and times
#model adjusted for baseline
#t2
#robust
dysmenorrhea.lm.adj2=lmrob(t2~t1+group, data = cor_dysmenorrhea)
summary(dysmenorrhea.lm.adj2)
plot(dysmenorrhea.lm.adj2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(56-2-1)=0,07
cor_dysmenorrhea2=cor_dysmenorrhea[-c(52,62,36),]#without outlier
dysmenorrhea.lm.adj.out=lm(t2~t1+group, data = cor_dysmenorrhea2)
summary(dysmenorrhea.lm.adj.out)
par(mfrow=c(2,2))
plot(dysmenorrhea.lm.adj.out, id.n=5)
plot(dysmenorrhea.lm.adj.out,4)
#t3
#robust
dysmenorrhea.lm.adj3=lmrob(t3~t1+group, data = cor_dysmenorrhea)
summary(dysmenorrhea.lm.adj3)
plot(dysmenorrhea.lm.adj3)
#lm without outlier 4/(46-2-1)=0,09
cor_dysmenorrhea3=cor_dysmenorrhea[-c(36,50,63,15),]#without outlier
dysmenorrhea.lm.adj3.out=lm(t3~t1+group, data = cor_dysmenorrhea3)
summary(dysmenorrhea.lm.adj3.out)
par(mfrow=c(2,2))
plot(dysmenorrhea.lm.adj3.out, id.n=5)
plot(dysmenorrhea.lm.adj3.out, 4)
#dysmenorrhea imputed dataframe----
#t2
dysmenorrhea.lm.imputed=with(imputed_pain2,lmrob(dysmenorrhea_t2~dysmenorrhea_t1+group_intervention))
summary(dysmenorrhea.lm.imputed)
dysmenorrhea.lm.imputed.pool=summary(pool(dysmenorrhea.lm.imputed), conf.int = TRUE)
tibble(dysmenorrhea.lm.imputed.pool)
#t3
dysmenorrhea.lm.imputed.t3=with(imputed_pain2,lmrob(dysmenorrhea_t3~dysmenorrhea_t1+group_intervention))
summary(dysmenorrhea.lm.imputed.t3)
dysmenorrhea.lm.imputed.pool.t3=summary(pool(dysmenorrhea.lm.imputed.t3), conf.int = TRUE)
#pain.unpleasantness----
cor_pain.unpleasantness=clinical.trial %>%
select(id,group,time,pain.unpleasantness) %>%
pivot_wider(names_from = time, values_from = pain.unpleasantness)
plot.pain.unpleasantness=cor_pain.unpleasantness %>%
select(group,t1,t2,t3)
GGally::ggpairs(plot.pain.unpleasantness, ggplot2::aes(colour=group)) #evaluate correlation between group and times
#model adjusted for baseline
#t2
pain.unpleasantness.lm.adj2=lmrob(t2~t1+group, data = cor_pain.unpleasantness)
summary(pain.unpleasantness.lm.adj2)
par(mfrow=c(2,2))
plot(pain.unpleasantness.lm.adj2)
#lm without outlier
# cook's distance > 0,07
cor_pain.unpleasantness2=cor_pain.unpleasantness[-c(8,39,43,50),]#without outlier
pain.unpleasantness.lm.adj.out=lm(t2~t1+group, data = cor_pain.unpleasantness2)
summary(pain.unpleasantness.lm.adj.out)
par(mfrow=c(2,2))
plot(pain.unpleasantness.lm.adj.out, id.n=5)
plot(pain.unpleasantness.lm.adj.out,4)
#t3
#robust
pain.unpleasantness.lm.adj3=lmrob(t3~t1+group, data = cor_pain.unpleasantness)
summary(pain.unpleasantness.lm.adj3)
plot(pain.unpleasantness.lm.adj3)
#lm without outlier
# cook's distance > 0,09
cor_pain.unpleasantness3=cor_pain.unpleasantness[-c(50,32),]#without outlier
pain.unpleasantness.lm.adj3.out=lm(t3~t1+group, data = cor_pain.unpleasantness3)
summary(pain.unpleasantness.lm.adj3.out)
par(mfrow=c(2,2))
plot(pain.unpleasantness.lm.adj3.out,id.n=5)
plot(pain.unpleasantness.lm.adj3.out,4)
#pain.unpleasantness imputed dataframe----
#t2
pain.unpleasantness.lm.imputed=with(imputed_pain2,lmrob(pain.unpleasantness_t2~pain.unpleasantness_t1+group_intervention))
summary(pain.unpleasantness.lm.imputed)
pain.unpleasantness.lm.imputed.pool=summary(pool(pain.unpleasantness.lm.imputed), conf.int = TRUE)
tibble(pain.unpleasantness.lm.imputed.pool)
#t3
pain.unpleasantness.lm.imputed.t3=with(imputed_pain2,lmrob(pain.unpleasantness_t3~pain.unpleasantness_t1+group_intervention))
summary(pain.unpleasantness.lm.imputed.t3)
pain.unpleasantness.lm.imputed.pool.t3=summary(pool(pain.unpleasantness.lm.imputed.t3), conf.int = TRUE)
#perceived stress----
cor_stress=clinical.trial %>%
select(id,group,time,PSS_10_total) %>%
pivot_wider(names_from = time, values_from = PSS_10_total)
plot.stress=cor_stress %>%
select(group,t1,t2,t3)
GGally::ggpairs(plot.stress, ggplot2::aes(colour=group)) #evaluate correlation between group and times
#model adjusted for baseline
#t2
stress.lm.adj2=lmrob(t2~t1+group, data = cor_stress)
summary(stress.lm.adj2)
plot(stress.lm.adj2)
#lm without outlier
# cook's distance > 0,07
cor_stress2=cor_stress[-c(39),]#without outlier
stress.lm.adj.out=lm(t2~t1+group, data = cor_stress2)
summary(stress.lm.adj.out)
par(mfrow=c(2,2))
plot(stress.lm.adj.out, id.n=5)
plot(stress.lm.adj.out, 4)
#t3
#robust
stress.lm.adj3=lmrob(t3~t1+group, data = cor_stress)
summary(stress.lm.adj3)
plot(stress.lm.adj3)
#lm without outlier
# cook's distance > 0,09
cor_stress3=cor_stress[-c(6,26),]#without outlier
stress.lm.adj3.out=lm(t3~t1+group, data = cor_stress3)
summary(stress.lm.adj3.out)
par(mfrow=c(2,2))
plot(stress.lm.adj3.out, id.n=5)
plot(stress.lm.adj3.out,4)
#stress imputed dataframe----
#t2
stress.lm.imputed=with(imputed_stress,lmrob(PSS_10_total_t2~PSS_10_total_t1+group_intervention))
summary(stress.lm.imputed)
stress.lm.imputed.pool=summary(pool(stress.lm.imputed), conf.int = TRUE)
tibble(stress.lm.imputed.pool)
#t3
stress.lm.imputed3=with(imputed_stress,lmrob(PSS_10_total_t3~PSS_10_total_t1+group_intervention))
summary(stress.lm.imputed3)
stress.lm.imputed.pool3=summary(pool(stress.lm.imputed3), conf.int = TRUE)
tibble(stress.lm.imputed.pool3)
#Table endometriosis-related pain models----
#lm robust mm-estimator method----
#t2
p.pain.lm.adj2=p.pain.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.276)
dysuria.lm.adj2=dysuria.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.707)
dyspareunia.lm.adj2=dyspareunia.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.451)
dyschezia.lm.adj2=dyschezia.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.618)
dysmenorrhea.lm.adj2=dysmenorrhea.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.633)
pain.unpleasantness.lm.adj2=pain.unpleasantness.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.474)
#t3
p.pain.lm.adj3=p.pain.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.208)
dysuria.lm.adj3=dysuria.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.493)
dyspareunia.lm.adj3=dyspareunia.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.422)
dyschezia.lm.adj3=dyschezia.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.512)
dysmenorrhea.lm.adj3=dysmenorrhea.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.364)
pain.unpleasantness.lm.adj3=pain.unpleasantness.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.278)
#lm OLS method without outliers----
#t2
p.pain.lm.adj2.out=p.pain.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.341)
dysuria.lm.adj2.out=dysuria.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.629)
dyspareunia.lm.adj2.out=dyspareunia.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.364)
dyschezia.lm.adj.out=dyschezia.lm.adj.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.657)
dysmenorrhea.lm.adj.out=dysmenorrhea.lm.adj.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.328)
pain.unpleasantness.lm.adj.out=pain.unpleasantness.lm.adj.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.538)
#t3
p.pain.lm.adj3.out=p.pain.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.236 )
dysuria.lm.adj3.out=dysuria.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.492 )
dyspareunia.lm.adj3.out=dyspareunia.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.461)
dyschezia.lm.adj3.out=dyschezia.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.651)
dysmenorrhea.lm.adj3.out=dysmenorrhea.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.460)
pain.unpleasantness.lm.adj3.out=pain.unpleasantness.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.403)
#lm using multiple imputed dataset----
#t2
p.pain.lm.imputed.pool=p.pain.lm.imputed.pool %>%
filter(term=="group_intervention")
dysuria.lm.imputed.pool=dysuria.lm.imputed.pool %>%
filter(term=="group_intervention")
dyspareunia.lm.imputed.pool=dyspareunia.lm.imputed.pool %>%
filter(term=="group_intervention")
dyschezia.lm.imputed.pool=dyschezia.lm.imputed.pool %>%
filter(term=="group_intervention")
dysmenorrhea.lm.imputed.pool=dysmenorrhea.lm.imputed.pool %>%
filter(term=="group_intervention")
pain.unpleasantness.lm.imputed.pool=pain.unpleasantness.lm.imputed.pool %>%
filter(term=="group_intervention")
#t3
p.pain.lm.imputed.pool.t3=p.pain.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")
dysuria.lm.imputed.pool.t3=dysuria.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")
dyspareunia.lm.imputed.pool.t3=dyspareunia.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")
dyschezia.lm.imputed.pool.t3=dyschezia.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")
dysmenorrhea.lm.imputed.pool.t3=dysmenorrhea.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")
pain.unpleasantness.lm.imputed.pool.t3=pain.unpleasantness.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")
#table lm.rob----
hist(clinical.trial$FFMQ_total)
endo.related.pain.lm.rob=p.pain.lm.adj2 %>%
bind_rows(dysuria.lm.adj2,dyspareunia.lm.adj2,
dyschezia.lm.adj2,dysmenorrhea.lm.adj2,
pain.unpleasantness.lm.adj2,p.pain.lm.adj3,
dysuria.lm.adj3,dyspareunia.lm.adj3, dyschezia.lm.adj3,
dysmenorrhea.lm.adj3,pain.unpleasantness.lm.adj3) %>%
mutate(estimate=round(estimate,3),std.error=round(std.error,3),
statistic=round(statistic,3),p.value=round(p.value,3),
conf.low=round(conf.low,3),conf.high=round(conf.high,3)) %>%
relocate(any_of(c("term", "estimate", "std.error","statistic","conf.low","conf.high","R2.adj.","p.value"))) %>%
rename(variable=term) %>%
flextable() %>%
compose(i=c(1,2,3,4,5,6,7,8,9,10,11,12),j=1, value = as_paragraph (c("Pelvic pain t2", "Dysuria t2",
"Dyspareunia t2","Dyschezia t2",
"Dysmenorrhea t2","Pain unpleasantness t2",
"Pelvic pain t3", "Dysuria t3",
"Dyspareunia t3","Dyschezia t3",
"Dysmenorrhea t3","Pain unpleasantness t3"))) %>%
autofit()
#table lm.ols----
endo.related.pain.lm.ols=p.pain.lm.adj2.out %>%
bind_rows(dysuria.lm.adj2.out,dyspareunia.lm.adj2.out,
dyschezia.lm.adj.out,dysmenorrhea.lm.adj.out, pain.unpleasantness.lm.adj.out,
p.pain.lm.adj3.out,dysuria.lm.adj3.out,dyspareunia.lm.adj3.out,
dyschezia.lm.adj3.out,dysmenorrhea.lm.adj3.out,pain.unpleasantness.lm.adj3.out) %>%
mutate(estimate=round(estimate,3),std.error=round(std.error,3),
statistic=round(statistic,3),p.value=round(p.value,3),
conf.low=round(conf.low,3),conf.high=round(conf.high,3)) %>%
relocate(any_of(c("term", "estimate", "std.error","statistic","conf.low","conf.high","R2.adj.","p.value"))) %>%
rename(variable=term) %>%
flextable() %>%
compose(i=c(1,2,3,4,5,6,7,8,9,10,11,12),j=1, value = as_paragraph (c("Pelvic pain t2", "Dysuria t2",
"Dyspareunia t2","Dyschezia t2",
"Dysmenorrhea t2","Pain unpleasantness t2",
"Pelvic pain t3", "Dysuria t3",
"Dyspareunia t3","Dyschezia t3",
"Dysmenorrhea t3","Pain unpleasantness t3"))) %>%
autofit()
#table lm.imputaded.rob----
endo.related.pain.lm.imputed.rob=p.pain.lm.imputed.pool %>%
bind_rows(dysuria.lm.imputed.pool,
dyspareunia.lm.imputed.pool,dyschezia.lm.imputed.pool,
dysmenorrhea.lm.imputed.pool,pain.unpleasantness.lm.imputed.pool,
p.pain.lm.imputed.pool.t3,dysuria.lm.imputed.pool.t3,
dyspareunia.lm.imputed.pool.t3,dyschezia.lm.imputed.pool.t3,
dysmenorrhea.lm.imputed.pool.t3,pain.unpleasantness.lm.imputed.pool.t3) %>%
mutate(estimate=round(estimate,3),std.error=round(std.error,3),
statistic=round(statistic,3),conf.low=round(`2.5 %`,3),conf.high=round(`97.5 %`,3) ,
df=round(df,3),p.value=round(p.value,3)) %>%
select(-df,-`2.5 %`,-`97.5 %`) %>%
relocate(any_of(c("term", "estimate", "std.error","statistic","conf.low","conf.high","p.value","R2.adj."))) %>%
rename(variable=term) %>%
flextable() %>%
compose(i=c(1,2,3,4,5,6,7,8,9,10,11,12),j=1, value = as_paragraph (c("Pelvic pain t2", "Dysuria t2",
"Dyspareunia t2","Dyschezia t2",
"Dysmenorrhea t2","Pain unpleasantness t2",
"Pelvic pain t3", "Dysuria t3",
"Dyspareunia t3","Dyschezia t3",
"Dysmenorrhea t3","Pain unpleasantness t3"))) %>%
autofit()
effect.size
#Table perceived stress models----
#lm robust mm-estimator method----
#t2
stress.lm.adj2=stress.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.271)
#t3
stress.lm.adj3=stress.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj= 0.338)
#lm OLS method without outliers----
#t2
stress.lm.adj.out= stress.lm.adj.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.243)
#t3
stress.lm.adj3.out=stress.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.306)
#lm using multiple imputed dataset----
#t2
stress.lm.imputed.pool=stress.lm.imputed.pool %>%
filter(term=="group_intervention")
#t3
stress.lm.imputed.pool3=stress.lm.imputed.pool3 %>%
filter(term=="group_intervention")
#table lm.rob----
stress.lm.rob=stress.lm.adj2 %>%
bind_rows(stress.lm.adj3) %>%
mutate(estimate=round(estimate,3),std.error=round(std.error,3),
statistic=round(statistic,3),p.value=round(p.value,3),
conf.low=round(conf.low,3),conf.high=round(conf.high,3)) %>%
relocate(any_of(c("term", "estimate", "std.error","statistic","conf.low","conf.high","R2.adj.","p.value"))) %>%
rename(variable=term) %>%
flextable() %>%
compose(i=c(1,2),j=1, value = as_paragraph (c("Perceived strress t2", "Perceived stress t3"))) %>%
autofit()
#table lm.OLS----
stress.lm.ols=stress.lm.adj.out %>%
bind_rows(stress.lm.adj3.out) %>%
mutate(estimate=round(estimate,3),std.error=round(std.error,3),
statistic=round(statistic,3),p.value=round(p.value,3),
conf.low=round(conf.low,3),conf.high=round(conf.high,3)) %>%
relocate(any_of(c("term", "estimate", "std.error","statistic","conf.low","conf.high","R2.adj.","p.value"))) %>%
rename(variable=term) %>%
flextable() %>%
compose(i=c(1,2),j=1, value = as_paragraph (c("Perceived strress t2", "Perceived stress t3"))) %>%
autofit()
#table lm.imputaded.rob----
stress.lm.imputed.rob=stress.lm.imputed.pool %>%
bind_rows(stress.lm.imputed.pool3) %>%
mutate(estimate=round(estimate,3),std.error=round(std.error,3),
statistic=round(statistic,3),conf.low=round(`2.5 %`,3),conf.high=round(`97.5 %`,3) ,
df=round(df,3),p.value=round(p.value,3)) %>%
select(-df,-`2.5 %`,-`97.5 %`) %>%
relocate(any_of(c("term", "estimate", "std.error","statistic","conf.low","conf.high","p.value","R2.adj."))) %>%
rename(variable=term) %>%
flextable() %>%
compose(i=c(1,2),j=1, value = as_paragraph (c("Perceived strress t2", "Perceived stress t3"))) %>%
autofit()
#sf_36 subscales with low variance Wilcox test----
#subscales
sf.36.wcx.t2=sf.36.gain %>%
select(id,group, sf_36_limitations.physical.t1_t2 ,
sf_36_pain.t1_t2 ,sf_36_general.health.t1_t2,
sf_36_emotional.role.t1_t2) %>%
pivot_longer(-c("id","group")) %>%
group_by(name) %>%
wilcox_test(value~group ,detailed = TRUE) %>%
adjust_pvalue()
sf.36.wcx.t3=sf.36.gain %>%
select(id,group,
sf_36_limitations.physical.t1_t3,sf_36_pain.t1_t3 ,
sf_36_general.health.t1_t3 ,
sf_36_emotional.role.t1_t3) %>%
pivot_longer(-c("id","group")) %>%
group_by(name) %>%
wilcox_test(value~group ,detailed = TRUE) %>%
adjust_pvalue()
# table wilcox sf-36-----
sf.36.wcx=sf.36.wcx.t2 %>%
bind_rows(sf.36.wcx.t3) %>%
select(name, estimate,statistic, p, conf.low, conf.high, p.adj) %>%
mutate(estimate=round(estimate,3),statistic=round(statistic,3),
p=round(p,3),p.adj=round(p.adj,3), conf.low=round(conf.low,3),conf.high=round(conf.high,3)) %>%
relocate(any_of(c("name", "estimate","statistic","conf.low","conf.high","p","p.adj"))) %>%
flextable() %>%
autofit()
View(sf.36.wcx)
#imputed
sf.36.wcx.t2.imputed= sf.36.gain.imputed %>%
select(id,group, sf_36_limitations.physical.t1_t2 ,
sf_36_pain.t1_t2 ,sf_36_general.health.t1_t2 ,
sf_36_emotional.role.t1_t2) %>%
pivot_longer(-c("id","group")) %>%
group_by(name) %>%
wilcox_test(value~group ,detailed = TRUE) %>%
adjust_pvalue()
sf.36.wcx.t3.imputed=sf.36.gain.imputed %>%
select(id,group,
sf_36_limitations.physical.t1_t3,sf_36_pain.t1_t3 ,
sf_36_general.health.t1_t3,
sf_36_emotional.role.t1_t3) %>%
pivot_longer(-c("id","group")) %>%
group_by(name) %>%
wilcox_test(value~group ,detailed = TRUE) %>%
adjust_pvalue()
# table wilcox sf-36 imputed-----
sf.36.wcx.imputed=sf.36.wcx.t2.imputed %>%
bind_rows(sf.36.wcx.t3.imputed) %>%
select(name, estimate,statistic, p, conf.low, conf.high, p.adj) %>%
mutate(estimate=round(estimate,3),statistic=round(statistic,3),
p=round(p,3),p.adj=round(p.adj,3), conf.low=round(conf.low,3),conf.high=round(conf.high,3)) %>%
relocate(any_of(c("name", "estimate","statistic","conf.low","conf.high","p","p.adj"))) %>%
flextable() %>%
autofit()
# compare trimmed means of the sf_36 subscales with low variance using two sample Yuen's test between group pain change score----
library("WRS2")
#sf_36_limitations.physical.t1_t2
robust.lim.phys= yuenbt(sf_36_limitations.physical.t1_t2~group, data = sf.36.gain)
robust.lim.phys=data.frame(t(sapply(robust.lim.phys,c)))
robust.lim.phys=robust.lim.phys %>%
select(test,conf.int,p.value,diff)
#sf_36_pain.t1_t2
robust.pain= yuenbt(sf_36_pain.t1_t2~group, data = sf.36.gain)
robust.pain=data.frame(t(sapply(robust.pain,c)))
robust.pain=robust.pain %>%
select(test,conf.int,p.value,diff)
#sf_36_general.health.t1_t2
robust.general.health= yuenbt(sf_36_general.health.t1_t2~group, data = sf.36.gain)
robust.general.health=data.frame(t(sapply(robust.general.health,c)))
robust.general.health=robust.general.health %>%
select(test,conf.int,p.value,diff)
#sf_36_emotional.role.t1_t2
robust.emotional.role= yuenbt(sf_36_emotional.role.t1_t2~group, data = sf.36.gain)
robust.emotional.role=data.frame(t(sapply(robust.emotional.role,c)))
robust.emotional.role=robust.emotional.role %>%
select(test,conf.int,p.value,diff)
#sf_36_limitations.physical.t1_t3
robust.lim.phys3= yuenbt(sf_36_limitations.physical.t1_t3~group, data = sf.36.gain)
robust.lim.phys3=data.frame(t(sapply(robust.lim.phys3,c)))
robust.lim.phys3=robust.lim.phys3 %>%
select(test,conf.int,p.value,diff)
#sf_36_pain.t1_t3
robust.pain3= yuenbt(sf_36_pain.t1_t3~group, data = sf.36.gain)
robust.pain3=data.frame(t(sapply(robust.pain3,c)))
robust.pain3=robust.pain3 %>%
select(test,conf.int,p.value,diff)
#sf_36_general.health.t1_t3
robust.general.health3= yuenbt(sf_36_general.health.t1_t3~group, data = sf.36.gain)
robust.general.health3=data.frame(t(sapply(robust.general.health3,c)))
robust.general.health3=robust.general.health3 %>%
select(test,conf.int,p.value,diff)
#sf_36_emotional.role.t1_t3
robust.emotional.role3= yuenbt(sf_36_emotional.role.t1_t3~group, data = sf.36.gain)
robust.emotional.role3=data.frame(t(sapply(robust.emotional.role3,c)))
robust.emotional.role3=robust.emotional.role3 %>%
select(test,conf.int,p.value,diff)
#table Yuen's test----
#t2
sf.36.Yuen.test=robust.lim.phys %>%
bind_rows( robust.pain,robust.general.health,robust.emotional.role) %>%
add_column(variable=c("SF-36.limitation.physical.functioning","SF-36.pain", "SF-36.general.health", "SF-36.emotional.role")) %>%
relocate(any_of(c("variable","diff" ,"test", "conf.int", "p.value"))) %>%
flextable() %>%
autofit()
#t3
sf.36.Yuen.test3=robust.lim.phys3 %>%
bind_rows( robust.pain3,robust.general.health3,robust.emotional.role3) %>%
add_column(variable=c("SF-36.limitation.physical.functioning","SF-36.pain", "SF-36.general.health", "SF-36.emotional.role")) %>%
relocate(any_of(c("variable","diff" ,"test", "conf.int", "p.value"))) %>%
flextable() %>%
autofit()
# Robust linear (MM-type estimators) raw scores, imputed data and Ordinary Least Squares without outliers models on sf 36 raw score----
# Evaluate the presence of sf 36 near zero variance----
#convert data from long to wide by time and group
var.test.sf36= clinical.trial %>%
select(id, group, time,sf.36.physical.sum,sf.36.mental.sum,sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain,sf_36_general.health,sf_36_vitality, sf_36_social.function,
sf_36_emotional.role, sf_36_mental.health) %>%
pivot_wider(names_from = time|group,values_from = c( sf_36_physical.functioning,sf_36_limitations.physical.functioning,
sf_36_pain,sf_36_general.health,sf_36_vitality, sf_36_social.function,
sf_36_emotional.role, sf_36_mental.health))
nearzero.sf36=var.test.sf36 %>%
nearZeroVar(saveMetrics = TRUE)
which(nearzero.sf36$zeroVar=='TRUE') # variable with zero var
which(nearzero.sf36$nzv=='TRUE') # variable with near zero var
# sf.36.physical.sum----
cor_physical.sum=clinical.trial %>%
select(id,group,time,sf.36.physical.sum) %>%
pivot_wider(names_from = time, values_from = sf.36.physical.sum)
plot.physical.sum=cor_physical.sum %>%
select(group,t1,t2,t3)
ggpairs( plot.physical.sum, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#model adjusted for baseline
#t2
#robust
physical.sum.lm.rob2=lmrob(t2~t1+group, data = cor_physical.sum)
summary(physical.sum.lm.rob2)
par(mfrow=c(2,2))
plot(physical.sum.lm.rob2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
cor_physical.sum.out=cor_physical.sum[-c(55,24),]
physical.sum.lm2=lm(t2~t1+group, data = cor_physical.sum.out)
summary(physical.sum.lm2)
par(mfrow=c(2,2))
plot(physical.sum.lm2, id.n=5)
plot(physical.sum.lm2, 4)
#imputed data
physical.sum.lm2.imputed=with(imputed_sf.36.sum,lmrob(sf.36.physical.sum_t2~sf.36.physical.sum_t1+group_intervention))
summary(physical.sum.lm2.imputed)
physical.sum.lm2.imputed.pool=summary(pool(physical.sum.lm2.imputed), conf.int = TRUE)
tibble(physical.sum.lm2.imputed.pool)
#t3
#robust
physical.sum.lm.adj3=lmrob(t3~t1+group, data = cor_physical.sum)
summary(physical.sum.lm.adj3)
par(mfrow=c(2,2))
plot(physical.sum.lm.adj3)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(45-2-1)=0,09
cor_physical.sum.out3=cor_physical.sum[-c(55,24),]
physical.sum.lm3=lm(t3~t1+group, data = cor_physical.sum.out3)
summary(physical.sum.lm3)
par(mfrow=c(2,2))
plot(physical.sum.lm3, id.n=5)
plot(physical.sum.lm3, 4)
#imputed data
physical.sum.lm3.imputed=with(imputed_sf.36.sum,lmrob(sf.36.physical.sum_t3~sf.36.physical.sum_t1+group_intervention))
summary(physical.sum.lm3.imputed)
physical.sum.lm3.imputed.pool=summary(pool(physical.sum.lm3.imputed), conf.int = TRUE)
tibble(physical.sum.lm3.imputed.pool)
# sf.36.mental.sum----
cor_mental.sum=clinical.trial %>%
select(id,group,time,sf.36.mental.sum) %>%
pivot_wider(names_from = time, values_from = sf.36.mental.sum)
plot.mental.sum=cor_mental.sum %>%
select(group,t1,t2,t3)
ggpairs( plot.mental.sum, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#model adjusted for baseline
#t2
#robust
cor_mental.sum.out2=cor_mental.sum[-c(60,33,47,59,26),]
mental.sum.lm.rob2=lmrob(t2~t1+group, data = cor_mental.sum.out2) #poor lm model fit, robust model fit affected by outliers
summary(mental.sum.lm.rob2)
par(mfrow=c(2,2))
plot(mental.sum.lm.rob2, id.n=5)
plot(mental.sum.lm.rob2,3)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
cor_mental.sum.out2=cor_mental.sum[-c(60,33,47,59,26),]
mental.sum.lm2=lm(t2~t1+group, data = cor_mental.sum.out2) #OLS without outliers better fitted than robust lm
summary(mental.sum.lm2)
par(mfrow=c(2,2))
plot(mental.sum.lm2, id.n=5)
plot(mental.sum.lm2,4)
#imputed
mental.sum.lm2.imputed=with(imputed_sf.36.sum,lm(sf.36.mental.sum_t2~sf.36.mental.sum_t1+group_intervention))
summary(mental.sum.lm2.imputed)
mental.sum.lm2.imputed.pool=summary(pool(mental.sum.lm2.imputed), conf.int = TRUE)
tibble(mental.sum.lm2.imputed.pool)
#t3
mental.sum.lm.adj3=lmrob(t3~t1+group, data = cor_mental.sum)
summary(mental.sum.lm.adj3)
par(mfrow=c(2,2))
plot(mental.sum.lm.adj3, id.n=5)
plot(mental.sum.lm.adj3,4)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(45-2-1)=0,09
cor_mental.sum.out3=cor_mental.sum[-c(26),]
mental.sum.lm3=lm(t3~t1+group, data = cor_mental.sum.out3)
summary(mental.sum.lm3)
par(mfrow=c(2,2))
plot(mental.sum.lm3, id.n=5)
plot(mental.sum.lm3,4)
#imputed
mental.sum.lm3.imputed=with(imputed_sf.36.sum,lmrob(sf.36.mental.sum_t3~sf.36.mental.sum_t1+group_intervention))
summary(mental.sum.lm3.imputed)
mental.sum.lm3.imputed.pool=summary(pool(mental.sum.lm3.imputed), conf.int = TRUE)
tibble(mental.sum.lm3.imputed.pool)
# sf_36_physical functioning----
cor_phys.functioning=clinical.trial %>%
select(id,group,time,sf_36_physical.functioning) %>%
pivot_wider(names_from = time, values_from = sf_36_physical.functioning)
plot.phys.functioning=cor_phys.functioning %>%
select(group,t1,t2,t3)
ggpairs( plot.phys.functioning, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#model adjusted for baseline
#t2
#robust
phys.functioning.lm.adj2=lmrob(t2~t1+group, data = cor_phys.functioning)
summary(phys.functioning.lm.adj2)
par(mfrow=c(2,2))
plot(phys.functioning.lm.adj2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
cor_phys.functioning.out=cor_phys.functioning[-c(10),]
phys.functioning.lm.adj2.out=lm(t2~t1+group, data = cor_phys.functioning.out)
summary(phys.functioning.lm.adj2.out)
par(mfrow=c(2,2))
plot(phys.functioning.lm.adj2.out, id.n=5)
plot(phys.functioning.lm.adj2.out, 4)
#imputed data
phys.functioning.lm2.imputed=with(imputed_sf.36,lmrob(sf_36_physical.functioning_t2~sf_36_physical.functioning_t1+group_intervention))
summary(phys.functioning.lm2.imputed)
phys.functioning.lm2.imputed.pool=summary(pool(phys.functioning.lm2.imputed), conf.int = TRUE)
tibble(phys.functioning.lm2.imputed.pool)
#t3
#robust
phys.functioning.lm.adj3=lmrob(t3~t1+group, data = cor_phys.functioning)
summary(phys.functioning.lm.adj3)
par(mfrow=c(2,2))
plot(phys.functioning.lm.adj3)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(45-2-1)=0,09
phys.functioning.out3=cor_phys.functioning[-c(10,31,30),]
phys.functioning.lm.adj3.out=lm(t3~t1+group, data = phys.functioning.out3)
summary(phys.functioning.lm.adj3.out)
par(mfrow=c(2,2))
plot(phys.functioning.lm.adj3.out,id.n=5)
plot(phys.functioning.lm.adj3.out, 4)
#imputed data
phys.functioning.lm3.imputed=with(imputed_sf.36,lmrob(sf_36_physical.functioning_t3~sf_36_physical.functioning_t1+group_intervention))
summary(phys.functioning.lm3.imputed)
phys.functioning.lm3.imputed.pool=summary(pool(phys.functioning.lm3.imputed), conf.int = TRUE)
tibble(phys.functioning.lm.imputed.pool)
# sf_36_limitations physical functioning----
cor_lim.phys.functioning=clinical.trial %>%
select(id,group,time,sf_36_limitations.physical.functioning) %>%
pivot_wider(names_from = time, values_from = sf_36_limitations.physical.functioning)
plot.lim.phys.functioning=cor_lim.phys.functioning %>%
select(group,t1,t2,t3)
ggpairs(plot.lim.phys.functioning, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#model adjusted for baseline
#t2
#robust
lim.phys.functioning.lm.rob2=lmrob(t2~t1+group, data = cor_lim.phys.functioning)# Very poor fit because of little variance in
#intervention group t1 and no linear relation
summary(lim.phys.functioning.lm.rob2)
par(mfrow=c(2,2))
plot(lim.phys.functioning.lm.rob2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
lim.phys.functioning.lm.adj2=lm(t2~t1+group, data = cor_lim.phys.functioning)
summary(lim.phys.functioning.lm.adj2)
par(mfrow=c(2,2))
plot(phys.functioning.lm.adj2, id.n=5)
plot(phys.functioning.lm.adj2,4)
#t3
lim.phys.functioning.lm.adj3=lmrob(t3~t1+group, data = cor_lim.phys.functioning) # Very poor fit because of little variance in
#intervention group t1
summary(lim.phys.functioning.lm.adj3)
par(mfrow=c(2,2))
plot(lim.phys.functioning.lm.adj3)
# sf_36_pain----
cor_sf36_pain=clinical.trial %>%
select(id,group,time,sf_36_pain) %>%
pivot_wider(names_from = time, values_from = sf_36_pain)
plot.sf36_pain=cor_sf36_pain %>%
select(group,t1,t2,t3)
ggpairs( plot.sf36_pain, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#model adjusted for baseline
#t2
#robust
sf36_pain.lm.rob2=lm(t2~t1+group, data = cor_sf36_pain)
summary(sf36_pain.lm.rob2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
cor_sf36_pain.out=cor_sf36_pain[-55,]
sf36_pain.lm.adj2=lm(t2~t1+group, data = cor_sf36_pain.out)
summary(sf36_pain.lm.adj2)
par(mfrow=c(2,2))
plot(sf36_pain.lm.adj2, id.n = 5)
plot(sf36_pain.lm.adj2,4)
#imputed
pain.lm2.imputed=with(imputed_sf.36,lmrob(sf_36_pain_t2~sf_36_pain_t1+group_intervention))
summary(phys.functioning.lm2.imputed)
pain.lm2.imputed.pool=summary(pool(pain.lm2.imputed))
tibble(pain.lm2.imputed.pool)
#t3
#robust
sf36_pain.lm.rob3.out=lm(t3~t1+group, data = cor_sf36_pain) #relation does not fitted by robust
summary(sf36_pain.lm.rob3.out)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(45-2-1)=0,09
cor_sf36_pain.out=cor_sf36_pain[-55,]
sf36_pain.lm.adj3.out=lm(t3~t1+group, data = cor_sf36_pain.out) #relation does not fitted by OLS lm
summary(sf36_pain.lm.adj3.out)
par(mfrow=c(2,2))
plot(sf36_pain.lm.adj3.out, id.n = 5)
plot(sf36_pain.lm.adj3,4)
#imputed
pain.lm3.imputed=with(imputed_sf.36,lm(sf_36_pain_t3~sf_36_pain_t1+group_intervention))
summary(phys.functioning.lm3.imputed)
pain.lm3.imputed.pool=summary(pool(pain.lm3.imputed))
tibble(pain.lm3.imputed.pool)
# sf_36_general.health----
cor_general.health=clinical.trial %>%
select(id,group,time,sf_36_general.health) %>%
pivot_wider(names_from = time, values_from = sf_36_general.health)
plot.general.health=cor_general.health %>%
select(group,t1,t2,t3)
ggpairs( plot.general.health, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#model adjusted for baseline
#t2
#robust
general.health.lm.rob2=lmrob(t2~t1+group, data = cor_general.health) # very poor lm fit
summary(general.health.lm.rob2)
par(mfrow=c(2,2))
plot(general.health.lm.rob2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
cor_general.health.out=cor_general.health[-c(4,40,48,54),]
general.health.lm.adj2=lm(t2~t1+group, data = cor_general.health.out) # very poor lm fit
summary(general.health.lm.adj2)
par(mfrow=c(2,2))
plot(general.health.lm.adj2, id.n=5)
plot(general.health.lm.adj2,4)
#imputed
general.health.lm2.imputed=with(imputed_sf.36,lmrob(sf_36_general.health_t2~sf_36_general.health_t1+group_intervention))
summary(general.health.lm2.imputed)
general.health.lm2.imputed.pool=summary(pool(general.health.lm2.imputed))
tibble(general.health.lm2.imputed.pool)
#t3
#Robust
general.health.lm.adj3=lmrob(t3~t1+group, data = cor_general.health)
summary(general.health.lm.adj3)
par(mfrow=c(2,2))
plot(general.health.lm.adj3)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(45-2-1)=0,09
cor_general.health.out3=cor_general.health[-c(26,22),]
general.health.out3=lm(t3~t1+group, data = cor_general.health.out3)
summary(general.health.out3)
par(mfrow=c(2,2))
plot(general.health.out3, id.n=5)
plot(general.health.out3, 4)
#imputed
general.health.lm3.imputed=with(imputed_sf.36,lmrob(sf_36_general.health_t3~sf_36_general.health_t1+group_intervention))
summary(general.health.lm3.imputed)
general.health.lm3.imputed.pool=summary(pool(general.health.lm3.imputed))
tibble(general.health.lm3.imputed.pool)
# sf_36_vitality----
cor_vitality=clinical.trial %>%
select(id,group,time,sf_36_vitality) %>%
pivot_wider(names_from = time, values_from = sf_36_vitality)
plot.vitality=cor_vitality %>%
select(group,t1,t2,t3)
ggpairs( plot.vitality, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#model adjusted for baseline
#t2
#robust
cor_vitality.rob.out=cor_vitality[-c(59,21),]
vitality.lm.rob2=lmrob(t2~t1+group, data = cor_vitality.rob.out)
summary(vitality.lm.rob2)
par(mfrow=c(2,2))
plot(vitality.lm.rob2)
tidy(vitality.lm.rob2, conf.int = TRUE)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
vitality.out=cor_vitality[-c(21,59),]
vitality.lm.adj2.out=lm(t2~t1+group, data = vitality.out)
summary(vitality.lm.adj2.out)
par(mfrow=c(2,2))
plot(vitality.lm.adj2.out, id.n=5)
plot(vitality.lm.adj2.out,4)
#imputed
vitality.lm2.imputed=with(imputed_sf.36,lmrob(sf_36_vitality_t2~sf_36_vitality_t1+group_intervention))
summary(vitality.lm2.imputed)
vitality.lm2.imputed.pool=summary(pool(vitality.lm2.imputed), conf.int = TRUE)
tibble(vitality.lm2.imputed.pool)
#t3
vitality.lm.rob3=lmrob(t3~t1+group, data = cor_vitality)
summary(vitality.lm.rob3)
par(mfrow=c(2,2))
plot(vitality.lm.rob3)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(45-2-1)=0,09
cor_vitality.out=cor_vitality[-c(59),]
vitality.lm.adj3.out=lm(t3~t1+group, data = cor_vitality.out)
summary(vitality.lm.adj3.out)
par(mfrow=c(2,2))
plot(vitality.lm.adj3.out,id.n=5)
plot(vitality.lm.adj3.out,4)
#imputed
vitality.lm3.imputed=with(imputed_sf.36,lmrob(sf_36_vitality_t3~sf_36_vitality_t1+group_intervention))
summary(vitality.lm3.imputed)
vitality.lm3.imputed.pool=summary(pool(vitality.lm3.imputed), conf.int = TRUE)
tibble(vitality.lm3.imputed.pool)
# sf_36_social.function----
cor_social.function=clinical.trial %>%
select(id,group,time,sf_36_social.function) %>%
pivot_wider(names_from = time, values_from = sf_36_social.function)
plot.social.function=cor_social.function %>%
select(group,t1,t2,t3)
ggpairs( plot.social.function, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#model adjusted for baseline
#t2
#robust
social.function.lm.rob2=lmrob(t2~t1+group, data = cor_social.function)
summary(social.function.lm.rob2)
par(mfrow=c(2,2))
plot(social.function.lm.rob2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
cor_social.function.out=cor_social.function[-c(26,15,34),]
social.function.lm.adj2.out=lm(t2~t1+group, data = cor_social.function.out)
summary(social.function.lm.adj2.out)
par(mfrow=c(2,2))
plot(social.function.lm.adj2.out, id.n=5)
plot(social.function.lm.adj2.out,4)
#imputed
social.function.lm2.imputed=with(imputed_sf.36,lmrob(sf_36_social.function_t2~sf_36_social.function_t1+group_intervention))
summary(social.function.lm2.imputed)
social.function.lm2.imputed.pool=summary(pool(social.function.lm2.imputed), conf.int = TRUE)
tibble(social.function.lm2.imputed.pool)
#t3
#robust
social.function.lm.rob3=lmrob(t3~t1+group, data = cor_social.function)
summary(social.function.lm.rob3)
par(mfrow=c(2,2))
plot(social.function.lm.rob3)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(45-2-1)=0,09
cor_social.function.out3=cor_social.function[-c(26,34,55),]
social.function.lm.rob3.out=lm(t3~t1+group, data = cor_social.function.out3)
summary(social.function.lm.rob3.out)
par(mfrow=c(2,2))
plot(social.function.lm.rob3.out, id.n=5)
plot(social.function.lm.rob3.out, 4)
#imputed
social.function.lm3.imputed=with(imputed_sf.36,lmrob(sf_36_social.function_t3~sf_36_social.function_t1+group_intervention))
summary(social.function.lm3.imputed)
social.function.lm3.imputed.pool=summary(pool(social.function.lm3.imputed), conf.int = TRUE)
tibble(social.function.lm3.imputed.pool)
#sf_36_emotional.role----
cor_emotional.role=clinical.trial %>%
select(id,group,time,sf_36_emotional.role) %>%
pivot_wider(names_from = time, values_from = sf_36_emotional.role)
plot.emotional.role=cor_emotional.role %>%
select(group,t1,t2,t3)
ggpairs( plot.emotional.role, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#baseline intervention with very low variance and absence of linear relation. Distribution inappropriated to lm. Use wilcoxon test instead.
#t2
#robust
emotional.role.rob2=lmrob(t2~group, data= cor_emotional.role)
summary(emotional.role.rob2)
#OLS
emotional.role.lm2=lm(t2~group, data= cor_emotional.role)
summary(emotional.role.lm2)
# sf_36_mental.health----
cor_mental.health=clinical.trial %>%
select(id,group,time,sf_36_mental.health) %>%
pivot_wider(names_from = time, values_from = sf_36_mental.health)
plot.mental.health=cor_mental.health %>%
select(group,t1,t2,t3)
ggpairs( plot.mental.health, ggplot2::aes(colour=group)) #evaluate correlation between group and time
#model adjusted for baseline
#t2
#robust
mental.health.lm.rob2=lmrob(t2~t1+group, data = cor_mental.health)
summary(mental.health.lm.rob2)
par(mfrow=c(2,2))
plot(mental.health.lm.rob2)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(52-2-1)=0,08
cor_mental.health.out=cor_mental.health[-c(60,59,22),]
mental.health.lm.adj2.out=lm(t2~t1+group, data = cor_mental.health.out)
summary(mental.health.lm.adj2.out)
par(mfrow=c(2,2))
plot(mental.health.lm.adj2.out, id.n=5)
plot(mental.health.lm.adj2.out, 4)
#imputed
mental.health.lm2.imputed=with(imputed_sf.36,lmrob(sf_36_mental.health_t2~sf_36_mental.health_t1+group_intervention))
summary(mental.health.lm2.imputed)
mental.health.lm2.imputed.pool=summary(pool(mental.health.lm2.imputed), conf.int = TRUE)
tibble(mental.health.lm2.imputed.pool)
#t3
mental.health.lm.rob3=lmrob(t3~t1+group, data = cor_mental.health)
summary(mental.health.lm.adj3)
par(mfrow=c(2,2))
plot(mental.health.lm.adj3)
#lm without outlier
#considerar outlier observation that exceed 3 standard deviation in the residual vs leverage plot
#High influential points is considered cook's distance 4/(n - p - 1) (Bruce, Peter, and Andrew Bruce. 2017. Practical Statistics for Data Scientists. O’Reilly Media.)
#p is the number of predictors. 4/(45-2-1)=0,09
mental.health.lm3=lm(t3~t1+group, data = cor_mental.health)
summary(mental.health.lm3)
par(mfrow=c(2,2))
plot(mental.health.lm3, id.n=5)
plot(mental.health.lm3,4)
#imputed
mental.health.lm3.imputed=with(imputed_sf.36,lmrob(sf_36_mental.health_t3~sf_36_mental.health_t1+group_intervention))
summary(mental.health.lm3.imputed)
mental.health.lm3.imputed.pool=summary(pool(mental.health.lm3.imputed), conf.int = TRUE)
tibble(mental.health.lm3.imputed.pool)
#Table sf_36 models----
#lm robust mm-estimator method----
physical.sum.lm.rob2.tab=physical.sum.lm.rob2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.226)
mental.sum.lm.rob2.tab=mental.sum.lm.rob2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.24 )
phys.functioning.lm.adj2.tab=phys.functioning.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.590)
vitality.lm.rob2.tab=vitality.lm.rob2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.175)
social.function.lm.rob2.tab=social.function.lm.rob2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.188)
mental.health.lm.rob2.tab=mental.health.lm.rob2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.256)
#t3
physical.sum.lm.adj3.tab= physical.sum.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.190)
mental.sum.lm.adj3.tab=mental.sum.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.310)
phys.functioning.lm.adj3.tab=phys.functioning.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.676)
vitality.lm.rob3.tab=vitality.lm.rob3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.304)
social.function.lm.rob3.tab=social.function.lm.rob3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.185)
mental.health.lm.rob3.tab=mental.health.lm.rob3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.399)
#lm OLS method without outliers----
#t2
physical.sum.lm2.tab=physical.sum.lm2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.312)
mental.sum.lm2.tab=mental.sum.lm2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.215)
phys.functioning.lm.adj2.out.tab=phys.functioning.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.644)
vitality.lm.adj2.out.tab=vitality.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.644)
social.function.lm.adj2.out.tab=social.function.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.315)
mental.health.lm.adj2.out.tab=mental.health.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.406)
#t3
physical.sum.lm3.tab= physical.sum.lm3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.253)
mental.sum.lm3.tab=mental.sum.lm3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.290)
phys.functioning.lm.adj3.out.tab=phys.functioning.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj= 0.719)
vitality.lm.adj3.out.tab=vitality.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.240)
social.function.lm.rob3.out.tab= social.function.lm.rob3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.221)
mental.health.lm3.tab=mental.health.lm3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention") %>%
mutate(r2.adj=0.411)
#lm.imputaded.rob----
#t2
physical.sum.lm2.imputed.pool.tab=physical.sum.lm2.imputed.pool %>%
filter(term=="group_intervention")
mental.sum.lm2.imputed.pool.tab=mental.sum.lm2.imputed.pool %>%
filter(term=="group_intervention")
phys.functioning.lm2.imputed.pool.tab=phys.functioning.lm2.imputed.pool %>%
filter(term=="group_intervention")
vitality.lm2.imputed.pool.tab=vitality.lm2.imputed.pool %>%
filter(term=="group_intervention")
social.function.lm2.imputed.pool.tab=social.function.lm2.imputed.pool %>%
filter(term=="group_intervention")
mental.health.lm2.imputed.pool.tab=mental.health.lm2.imputed.pool %>%
filter(term=="group_intervention")
#t3
physical.sum.lm3.imputed.pool.tab=physical.sum.lm3.imputed.pool %>%
filter(term=="group_intervention")
mental.sum.lm3.imputed.pool.tab =mental.sum.lm3.imputed.pool %>%
filter(term=="group_intervention")
phys.functioning.lm3.imputed.pool.tab=phys.functioning.lm3.imputed.pool %>%
filter(term=="group_intervention")
vitality.lm3.imputed.pool.tab=vitality.lm3.imputed.pool %>%
filter(term=="group_intervention")
social.function.lm3.imputed.pool.tab=social.function.lm3.imputed.pool %>%
filter(term=="group_intervention")
mental.health.lm3.imputed.pool.tab=mental.health.lm3.imputed.pool %>%
filter(term=="group_intervention")
##table lm.robust----
sf.36.robust=physical.sum.lm.rob2.tab %>%
bind_rows( mental.sum.lm.rob2.tab,phys.functioning.lm.adj2.tab,
vitality.lm.rob2.tab,social.function.lm.rob2.tab,
mental.health.lm.rob2.tab,physical.sum.lm.adj3.tab,
mental.sum.lm.adj3.tab,phys.functioning.lm.adj3.tab,
vitality.lm.rob3.tab, social.function.lm.rob3.tab,
mental.health.lm.rob3.tab) %>%
mutate(estimate=round(estimate,3),std.error=round(std.error,3),
statistic=round(statistic,3),p.value=round(p.value,3),
conf.low=round(conf.low,3),conf.high=round(conf.high,3)) %>%
relocate(any_of(c("term", "estimate", "std.error","statistic","conf.low","conf.high","R2.adj.","p.value"))) %>%
rename(variable=term) %>%
flextable() %>%
compose(i=c(1,2,3,4,5,6,7,8,9,10,11,12),j=1, value = as_paragraph (c("physical summary t2", "mental summary t2",
"physical functioning t2", "vitality t2", "social functioning t2", "mental health t2","physical summary t3",
"mental summary t3", "physical functioning t3", "vitality t3", "social functioning t3", "mental health t3"))) %>%
autofit()
#table lm.OLS----
sf.36.ols=physical.sum.lm2.tab %>%
bind_rows(mental.sum.lm2.tab,phys.functioning.lm.adj2.out.tab,
vitality.lm.adj2.out.tab,
social.function.lm.adj2.out.tab,mental.health.lm.adj2.out.tab,
physical.sum.lm3.tab,mental.sum.lm3.tab,phys.functioning.lm.adj3.out.tab,
vitality.lm.adj3.out.tab,social.function.lm.rob3.out.tab,
mental.health.lm3.tab) %>%
mutate(estimate=round(estimate,3),std.error=round(std.error,3),
statistic=round(statistic,3),p.value=round(p.value,3),
conf.low=round(conf.low,3),conf.high=round(conf.high,3)) %>%
relocate(any_of(c("term", "estimate", "std.error","statistic","conf.low","conf.high","R2.adj.","p.value"))) %>%
rename(variable=term) %>%
flextable() %>%
compose(i=c(1,2,3,4,5,6,7,8,9,10,11,12),j=1, value = as_paragraph (c("physical summary t2", "mental summary t2",
"physical functioning t2", "vitality t2", "social functioning t2", "mental health t2","physical summary t3",
"mental summary t3", "physical functioning t3", "vitality t3", "social functioning t3", "mental health t3"))) %>%
autofit()
#table lm.imputaded.rob----
sf.36.lm.imputed.rob=physical.sum.lm2.imputed.pool.tab %>%
bind_rows(mental.sum.lm2.imputed.pool.tab,phys.functioning.lm2.imputed.pool.tab,
vitality.lm2.imputed.pool.tab,social.function.lm2.imputed.pool.tab,
mental.health.lm2.imputed.pool.tab, physical.sum.lm3.imputed.pool.tab,
mental.sum.lm3.imputed.pool.tab,phys.functioning.lm3.imputed.pool.tab,
vitality.lm3.imputed.pool.tab,social.function.lm3.imputed.pool.tab,
mental.health.lm3.imputed.pool.tab) %>%
mutate(estimate=round(estimate,3),std.error=round(std.error,3),
statistic=round(statistic,3),conf.low=round(`2.5 %`,3),conf.high=round(`97.5 %`,3) ,
df=round(df,3),p.value=round(p.value,3)) %>%
select(-df,-`2.5 %`,-`97.5 %`) %>%
relocate(any_of(c("term", "estimate", "std.error","statistic","conf.low","conf.high","p.value","R2.adj."))) %>%
rename(variable=term) %>%
flextable() %>%
compose(i=c(1,2,3,4,5,6,7,8,9,10,11,12),j=1, value = as_paragraph (c("physical summary t2", "mental summary t2",
"physical functioning t2", "vitality t2", "social functioning t2", "mental health t2","physical summary t3",
"mental summary t3", "physical functioning t3", "vitality t3", "social functioning t3", "mental health t3"))) %>%
autofit()
#PLOT MLR SIGNIFICANT----
#T2 ----
#pelvic pain
p.pain.lm.adj2=lmrob(t2~t1+group, data = cor_pelvic.pain)
p.pain.lm.adj2.out=lm(t2~t1+group, data = cor_pelvic.pain2.out)
p.pain.lm.imputed.pool=summary(pool(p.pain.lm.imputed), conf.int = TRUE)
#dyschezia
dyschezia.lm.adj2=lmrob(t2~t1+group, data = cor_dyschezia)
dyschezia.lm.adj.out=lm(t2~t1+group, data = cor_dyschezia2)
dyschezia.lm.imputed.pool=summary(pool(dyschezia.lm.imputed), conf.int = TRUE)
#pain unpleasantness
pain.unpleasantness.lm.adj2=lmrob(t2~t1+group, data = cor_pain.unpleasantness)
pain.unpleasantness.lm.adj.out=lm(t2~t1+group, data = cor_pain.unpleasantness2)
pain.unpleasantness.lm.imputed.pool=summary(pool(pain.unpleasantness.lm.imputed), conf.int = TRUE)
#t3----
p.pain.lm.adj3
p.pain.lm.adj3.out
p.pain.lm.imputed.pool.t3
dysuria.lm.adj3
dysuria.lm.adj3.out
dysuria.lm.imputed.pool.t3
dyspareunia.lm.adj3
dyspareunia.lm.adj3.out
dyspareunia.lm.imputed.pool.t3
dyschezia.lm.adj3
dyschezia.lm.adj3.out
dyschezia.lm.imputed.pool.t3
dysmenorrhea.lm.adj3
dysmenorrhea.lm.adj3.out
dysmenorrhea.lm.imputed.pool.t3
pain.unpleasantness.lm.adj3
pain.unpleasantness.lm.adj3.out
pain.unpleasantness.lm.imputed.pool.t3
#Plot primary outcome t2----
p.pain.rob=p.pain.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
p.pain.lm=p.pain.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
p.pain.lm.imputed.pool=p.pain.lm.imputed.pool%>%
filter(term=="group_intervention") %>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
dyschezia.lm=dyschezia.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dyschezia.lm.out=dyschezia.lm.adj.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dyschezia.lm.imputed.pool=dyschezia.lm.imputed.pool %>%
filter(term=="group_intervention") %>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
pain.unpleasantness.lm= pain.unpleasantness.lm.adj2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
pain.unpleasantness.lm.out=pain.unpleasantness.lm.adj.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
pain.unpleasantness.lm.imputed=pain.unpleasantness.lm.imputed.pool %>%
filter(term=="group_intervention") %>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
pains.mlrb=p.pain.rob %>%
bind_rows(p.pain.lm,p.pain.lm.imputed.pool,dyschezia.lm,
dyschezia.lm.out,dyschezia.lm.imputed.pool,
pain.unpleasantness.lm,pain.unpleasantness.lm.out,
pain.unpleasantness.lm.imputed) %>%
select(estimate,conf.low,conf.high) %>%
add_column(Posttreatment=c("Pelvic.pain.primary","Pelvic.pain.OLS","Pelvic.pain.imputed",
"Dyschezia.primary","Dyschezia.OLS","Dyschezia.imputed",
"Pain.unpleasantness.primary","Pain.unpleasantness.OLS","Pain.unpleasantness.imputed"),
.before = "estimate", analyses=c("Primary","Sensitivity.OLS.without.outliers", "Sensitivity.imputed.data",
"Primary","Sensitivity.OLS.without.outliers", "Sensitivity.imputed.data",
"Primary","Sensitivity.OLS.without.outliers", "Sensitivity.imputed.data"))
#t3----
p.pain.lm3=p.pain.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
p.pain.lm.out3=p.pain.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
p.pain.lm.imputed.pool.t3 = p.pain.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")%>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
dysuria.lm.adj3=dysuria.lm.adj3%>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dysuria.lm.out3=dysuria.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dysuria.lm.imputed.pool.t3 =dysuria.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")%>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
dyspareunia.lm3=dyspareunia.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dyspareunia.lm.out3=dyspareunia.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dyspareunia.lm.imputed.pool.t3 = dyspareunia.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")%>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
dyschezia.lm3=dyschezia.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dyschezia.lm.out3=dyschezia.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dyschezia.lm.imputed.pool.t3=dyschezia.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")%>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
dysmenorrhea.lm3=dysmenorrhea.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dysmenorrhea.lm.out3=dysmenorrhea.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
dysmenorrhea.lm.imputed.pool.t3=dysmenorrhea.lm.imputed.pool.t3%>%
filter(term=="group_intervention")%>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
pain.unpleasantness.lm3= pain.unpleasantness.lm.adj3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
pain.unpleasantness.lm.out=pain.unpleasantness.lm.adj.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
pain.unpleasantness.lm.imputed.t3=pain.unpleasantness.lm.imputed.pool.t3 %>%
filter(term=="group_intervention")%>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
pain.mlr3b=p.pain.lm3 %>%
bind_rows(p.pain.lm.out3,p.pain.lm.imputed.pool.t3,
dysuria.lm.adj3,dysuria.lm.out3, dysuria.lm.imputed.pool.t3,
dyspareunia.lm3,dyspareunia.lm.out3,dyspareunia.lm.imputed.pool.t3,
dyschezia.lm3,dyschezia.lm.out3, dyschezia.lm.imputed.pool.t3,
dysmenorrhea.lm3,dysmenorrhea.lm.out3,dysmenorrhea.lm.imputed.pool.t3,
pain.unpleasantness.lm3,pain.unpleasantness.lm.out,pain.unpleasantness.lm.imputed.t3) %>%
select(estimate,conf.low,conf.high) %>%
add_column(Follow.up=c("Pelvic.pain.primary","Pelvic.pain.OLS","Pelvic.pain.imputed",
"Dysuria.primary","Dysuria.OLS","Dysuria.imputed",
"Dyspareunia.primary","Dyspareunia.OLS","Dyspareunia.imputed",
"Dyschezia.primary","Dyschezia.OLS","Dyschezia.imputed",
"Dysmenorrhea.primary","Dysmenorrhea.OLS","Dysmenorrhea.imputed",
"Pain.unpleasantness.primary","Pain.unpleasantness.OLS","Pain.unpleasantness.imputed"),
.before = "estimate", analyses=c("Primary","Sensitivity.OLS.without.outliers", "Sensitivity.imputed.data",
"Primary","Sensitivity.OLS.without.outliers", "Sensitivity.imputed.data",
"Primary","Sensitivity.OLS.without.outliers", "Sensitivity.imputed.data",
"Primary","Sensitivity.OLS.without.outliers", "Sensitivity.imputed.data",
"Primary","Sensitivity.OLS.without.outliers", "Sensitivity.imputed.data",
"Primary","Sensitivity.OLS.without.outliers", "Sensitivity.imputed.data"))
#Plot secondary outcome----
#t2
mental.health.rob2=mental.health.lm.rob2 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
mental.health.lm.adj2.out.tab=mental.health.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
mental.health.lm2.imputed.pool.tab=mental.health.lm2.imputed.pool %>%
filter(term=="group_intervention") %>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
vitality.lm2.out.tab=vitality.lm.adj2.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
stress.lm.adj.out= stress.lm.adj.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
Sf.36.MLRb=vitality.lm2.out.tab %>%
bind_rows(mental.health.rob2,mental.health.lm.adj2.out.tab,mental.health.lm2.imputed.pool.tab) %>%
select(estimate,conf.low,conf.high) %>%
add_column(Posttreatment=c("SF-36.Vitality.primary","SF-36.Mental.Health.primary","SF-36.Mental.Health.OLS","SF-36.Mental.Health.imputed"),
.before = "estimate", analyses=c("Sensitivity.OLS.without.outliers","Primary","Sensitivity.OLS.without.outliers",
"Sensitivity.imputed.data"))
#t3----
vitality.rob3.tab=vitality.lm.rob3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
vitality.lm.out3=vitality.lm.adj3.out %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
mental.health.rob3.tab=mental.health.lm.rob3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
mental.health.lm3.tab= mental.health.lm3 %>%
tidy(conf.int = TRUE) %>%
filter(term=="groupintervention")
mental.health.lm3.imputed.pool.tab= mental.health.lm3.imputed.pool %>%
filter(term=="group_intervention") %>%
rename(conf.low='2.5 %', conf.high= '97.5 %')
Sf.36.MLR3=vitality.rob3.tab %>%
bind_rows(vitality.lm.out3,mental.health.rob3.tab,mental.health.lm3.tab,mental.health.lm3.imputed.pool.tab) %>%
select(estimate,conf.low,conf.high) %>%
add_column(Follow.up=c("SF-36.Vitality.primary","SF-36.Vitality.OLS",
"SF-36.Mental.Health.primary","SF-36.Mental.Health.OLS","SF-36.Mental.Health.imputed"),
.before = "estimate", analyses=c("Primary","Sensitivity.OLS.without.outliers",
"Primary","Sensitivity.OLS.without.outliers","sensitivity.imputed.data"))
#t2----
#pain----
library("scales")
pain.mlr.plot1.2=ggplot(pains.mlrb, mapping = aes(x = Posttreatment, y = estimate,ymin = conf.low, ymax = conf.high, group=analyses)) +
geom_pointrange(aes(shape=analyses,color=analyses)) +
scale_size_manual(values=c(5,5,5))+
theme(axis.text.x = element_text(face="bold"),
axis.text.y = element_text(face="bold"),
axis.text = element_text(size = rel(0.7))) +
theme(legend.position="none") +
scale_y_continuous(breaks=seq(-5.0,0,0.5),labels = label_number(accuracy = 0.1)) +
scale_x_discrete(labels = c("Pelvic.pain.primary"="Pelvic pain","Pelvic.pain.OLS"="Pelvic pain","Pelvic.pain.imputed"="Pelvic pain",
"Dyschezia.primary"="Dyschezia","Dyschezia.OLS"="Dyschezia","Dyschezia.imputed"="Dyschezia",
"Pain.unpleasantness.primary"="Pain unpleasantness","Pain.unpleasantness.OLS"="Pain unpleasantness",
"Pain.unpleasantness.imputed"="Pain unpleasantness"))+
labs(y="",x="")+
annotate("text", x = 9.3, y = -4.5, label= "A", size = 4)+
coord_flip()
#SF-36----
Sf.36.MLR2.1b.plot=ggplot(Sf.36.MLRb, mapping = aes(x = Posttreatment, y = estimate,ymin = conf.low, ymax = conf.high, group=analyses)) +
geom_pointrange(aes(shape=analyses,color=analyses)) +
scale_size_manual(values=c(5,5,5))+
theme(axis.text.x = element_text(face="bold"),
axis.text.y = element_text(face="bold"),
axis.text = element_text(size = rel(0.8))) +
theme(legend.position="none") +
scale_y_continuous(breaks=c (0.2, 4.5, 6.5,13,10,16,21,27 ,31)) +
scale_x_discrete(labels = c("SF-36.Vitality.primary"="SF-36 Vitality",
"SF-36.Mental.Health.primary"="SF-36 Mental health",
"SF-36.Mental.Health.OLS"="SF-36 Mental health",
"SF-36.Mental.Health.imputed"="SF-36 Mental health"))+
labs(y="",x="")+
annotate("text", x = 4.47, y = 0.2, label= "A", size = 4)+
coord_flip()
#t3----
#Pain----
pain.mlr.plot2=ggplot(pain.mlr3, mapping = aes(x = Follow.up, y = estimate, group=analyses)) +
geom_point(aes(shape=analyses,color=analyses)) +
scale_size_manual(values=c(5,5,5))+
theme(axis.text.x = element_text(face="bold"),
axis.text.y = element_text(face="bold"),
axis.text = element_text(size = rel(0.7))) +
theme(legend.position="none") +
scale_y_continuous(breaks=c (-3.2, -2.7,-2.2, -1.7, -1.4)) +
scale_x_discrete(labels = c("Pelvic.pain"="Pelvic pain",
"Pain.unpleasantness"="Pain unpleasantness"))+
labs(y="")+
annotate("text", x = 9.3, y = -4.5, label= "B", size = 4)+
coord_flip()
pain.mlr.plot2.2=ggplot(pain.mlr3b, mapping = aes(x = Follow.up, y = estimate,ymin = conf.low, ymax = conf.high, group=analyses)) +
geom_pointrange(aes(shape=analyses,color=analyses)) +
scale_size_manual(values=c(5,5,5))+
theme(axis.text.x = element_text(face="bold"),
axis.text.y = element_text(face="bold"),
axis.text = element_text(size = rel(0.7))) +
theme(legend.position="none") +
scale_y_continuous(breaks=seq(-6.0,0,0.5)) +
scale_x_discrete(labels = c("Pelvic.pain.primary"="Pelvic pain","Pelvic.pain.OLS"="Pelvic pain","Pelvic.pain.imputed"="Pelvic pain",
"Dysuria.primary"="Dysuria","Dysuria.OLS"="Dysuria","Dysuria.imputed"="Dysuria",
"Dyspareunia.primary"="Dyspareunia","Dyspareunia.OLS"="Dyspareunia","Dyspareunia.imputed"="Dyspareunia",
"Dyschezia.primary"="Dyschezia","Dyschezia.OLS"="Dyschezia","Dyschezia.imputed"="Dyschezia",
"Dysmenorrhea.primary"="Dysmenorrhea","Dysmenorrhea.OLS"="Dysmenorrhea","Dysmenorrhea.imputed"="Dysmenorrhea",
"Pain.unpleasantness.primary"="Pain unpleasantness","Pain.unpleasantness.OLS"="Pain unpleasantness",
"Pain.unpleasantness.imputed"="Pain unpleasantness"))+
labs(y="",x="")+
annotate("text", x = 18.3, y = -6, label= "B", size = 4)+
coord_flip()
#SF-36---
Sf.36.MLR3.plot=ggplot(Sf.36.MLR3, mapping = aes(x = Follow.up,ymin = conf.low, ymax = conf.high, y = estimate, group=analyses)) +
geom_pointrange(aes(shape=analyses,color=analyses)) +
scale_size_manual(values=c(5,5,5))+
theme(axis.text.x = element_text(face="bold"),
axis.text.y = element_text(face="bold"),
axis.text = element_text(size = rel(0.8))) +
scale_x_discrete(labels = c("SF-36.Vitality.primary"="SF-36 Vitality",
"SF-36.Vitality.OLS"="SF-36 Vitality",
"SF-36.Mental.Health.primary"="SF-36 Mental health",
"SF-36.Mental.Health.OLS"="SF-36 Mental health",
"SF-36.Mental.Health.imputed"="SF-36 Mental health"))+
labs(y="",x="")+
theme(legend.position="none") +
scale_y_continuous(breaks=c (2.5,7, 9, 16, 19,28,31)) +
annotate("text", x = 5.5, y = 2, label= "B", size = 4)+
coord_flip()
#grade plots----
grid.arrange(pain.mlr.plot1.2, pain.mlr.plot2.2 , ncol=1)
grid.arrange(Sf.36.MLR2.plot, Sf.36.MLR3.plot , ncol=1)
|
library(EngrExpt)
### Name: oven
### Title: Drying of silica
### Aliases: oven
### Keywords: datasets
### ** Examples
str(oven)
| /data/genthat_extracted_code/EngrExpt/examples/oven.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 135 | r | library(EngrExpt)
### Name: oven
### Title: Drying of silica
### Aliases: oven
### Keywords: datasets
### ** Examples
str(oven)
|
library(rcellminer)
set.seed(1)
molData <- getMolDataMatrices()
selectedOncogenes <- c("ABL1", "ALK", "BRAF", "CCND1", "CCND3", "CCNE1", "CCNE2",
"CDC25A", "EGFR", "ERBB2", "EZH2", "FOS", "FOXL2", "HRAS",
"IDH1", "IDH2", "JAK2", "KIT", "KRAS", "MET", "MOS", "MYC",
"NRAS", "PDGFB", "PDGFRA", "PIK3CA", "PIK3CB", "PIK3CD",
"PIK3CG", "PIM1", "PML", "RAF1", "REL", "RET", "SRC", "STK11",
"TP63", "WNT10B", "WNT4", "WNT2B", "WNT9A", "WNT3", "WNT5A",
"WNT5B", "WNT10A", "WNT11", "WNT2", "WNT1", "WNT7B", "WISP1",
"WNT8B", "WNT7A", "WNT16", "WISP2", "WISP3", "FZD5", "FZD1")
selectedOncogenes <- sample(selectedOncogenes, 20)
# Generate the appropriate rownames
expGeneLabels <- paste0("exp", selectedOncogenes)
copGeneLabels <- paste0("cop", selectedOncogenes)
a <- molData[["exp"]][expGeneLabels,]
d <- sapply(rownames(a), function(x) {
substr(x, 4, nchar(x))
})
e <- unname(d)
rownames(a) <- e
b <- a[, 1:20]
heatmap(b, cexCol=0.75)
write.table(b, file="heatmapExample.txt", sep="\t", quote=FALSE)
| /presentations/files/genHeatmapExample.R | no_license | cannin/rTutorial | R | false | false | 1,180 | r | library(rcellminer)
set.seed(1)
molData <- getMolDataMatrices()
selectedOncogenes <- c("ABL1", "ALK", "BRAF", "CCND1", "CCND3", "CCNE1", "CCNE2",
"CDC25A", "EGFR", "ERBB2", "EZH2", "FOS", "FOXL2", "HRAS",
"IDH1", "IDH2", "JAK2", "KIT", "KRAS", "MET", "MOS", "MYC",
"NRAS", "PDGFB", "PDGFRA", "PIK3CA", "PIK3CB", "PIK3CD",
"PIK3CG", "PIM1", "PML", "RAF1", "REL", "RET", "SRC", "STK11",
"TP63", "WNT10B", "WNT4", "WNT2B", "WNT9A", "WNT3", "WNT5A",
"WNT5B", "WNT10A", "WNT11", "WNT2", "WNT1", "WNT7B", "WISP1",
"WNT8B", "WNT7A", "WNT16", "WISP2", "WISP3", "FZD5", "FZD1")
selectedOncogenes <- sample(selectedOncogenes, 20)
# Generate the appropriate rownames
expGeneLabels <- paste0("exp", selectedOncogenes)
copGeneLabels <- paste0("cop", selectedOncogenes)
a <- molData[["exp"]][expGeneLabels,]
d <- sapply(rownames(a), function(x) {
substr(x, 4, nchar(x))
})
e <- unname(d)
rownames(a) <- e
b <- a[, 1:20]
heatmap(b, cexCol=0.75)
write.table(b, file="heatmapExample.txt", sep="\t", quote=FALSE)
|
## Reading the data
dat<-read.table("household_power_consumption.txt", header=TRUE, sep=";", dec=".", na.string="?")
## Extract data from the dates 2007-02-01 and 2007-02-02
data<-dat[with(dat, Date=="1/2/2007"|Date=="2/2/2007"), ]
##Change date, time format
dateTime <- strptime( paste(data$Date,data$Time), format= "%d/%m/%Y %H:%M:%S")
##Create a png
png('plot 4.png')
##Plot
par(mfrow = c(2, 2))
with(data, {
plot(dateTime, Global_active_power, xlab = "", ylab = "Global Active Power", type="l")
plot(dateTime, Voltage, ylab = "voltage", type="l")
plot(dateTime, Sub_metering_1, xlab = "", ylab = "Energy sub metering", type="l")
lines (x= dateTime, y= Sub_metering_2, col ="red")
lines (x= dateTime, y= Sub_metering_3, col ="blue")
legend("topright", col = c("black", "red", "blue")
, lty= "solid", bty ="N"
, legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(dateTime, Global_reactive_power, type="l")
})
dev.off() ## Close the PNG device!
| /Data-Science/4_Exploratory_Data_Analysis/Lab/Lab_1/Solution/plot 4.R | permissive | shanky0507/Coursera-John-Hopkins | R | false | false | 1,027 | r | ## Reading the data
dat<-read.table("household_power_consumption.txt", header=TRUE, sep=";", dec=".", na.string="?")
## Extract data from the dates 2007-02-01 and 2007-02-02
data<-dat[with(dat, Date=="1/2/2007"|Date=="2/2/2007"), ]
##Change date, time format
dateTime <- strptime( paste(data$Date,data$Time), format= "%d/%m/%Y %H:%M:%S")
##Create a png
png('plot 4.png')
##Plot
par(mfrow = c(2, 2))
with(data, {
plot(dateTime, Global_active_power, xlab = "", ylab = "Global Active Power", type="l")
plot(dateTime, Voltage, ylab = "voltage", type="l")
plot(dateTime, Sub_metering_1, xlab = "", ylab = "Energy sub metering", type="l")
lines (x= dateTime, y= Sub_metering_2, col ="red")
lines (x= dateTime, y= Sub_metering_3, col ="blue")
legend("topright", col = c("black", "red", "blue")
, lty= "solid", bty ="N"
, legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(dateTime, Global_reactive_power, type="l")
})
dev.off() ## Close the PNG device!
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/heading.R
\name{heading}
\alias{heading}
\title{Add a Heading to an RTF Document}
\usage{
heading(words, htype = 1, rtf = doc)
}
\arguments{
\item{words}{Character scalar text of heading to add to document.}
\item{htype}{Integer scalar heading type, 1=bold and font size 12, 2=bold and
font size 10, 3=italics and font size 10, default 1.}
\item{rtf}{An rtf object, default \code{doc}.}
}
\description{
Add a text heading to an rtf (rich text format) document.
}
\details{
The specified heading is written to the rtf file.
}
\seealso{
\code{\link{startrtf}} for an example, \code{\link{para}},
\code{\link{tabl}}, \code{\link{figu}},
\code{\link{figbig}}, \code{\link{endrtf}},
\code{\link[rtf]{RTF}}.
}
| /man/heading.Rd | no_license | JVAdams/GLFC | R | false | true | 790 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/heading.R
\name{heading}
\alias{heading}
\title{Add a Heading to an RTF Document}
\usage{
heading(words, htype = 1, rtf = doc)
}
\arguments{
\item{words}{Character scalar text of heading to add to document.}
\item{htype}{Integer scalar heading type, 1=bold and font size 12, 2=bold and
font size 10, 3=italics and font size 10, default 1.}
\item{rtf}{An rtf object, default \code{doc}.}
}
\description{
Add a text heading to an rtf (rich text format) document.
}
\details{
The specified heading is written to the rtf file.
}
\seealso{
\code{\link{startrtf}} for an example, \code{\link{para}},
\code{\link{tabl}}, \code{\link{figu}},
\code{\link{figbig}}, \code{\link{endrtf}},
\code{\link[rtf]{RTF}}.
}
|
#' Code
#'
#' Computer programming source code. Example: Full (compile ready) solutions, code snippet samples, scripts, templates.
#'
#'
#' @param id identifier for the object (URI)
#' @param workExample (CreativeWork type.) Example/instance/realization/derivation of the concept of this creative work. eg. The paperback edition, first edition, or eBook.
#' @param video (VideoObject type.) An embedded video object.
#' @param version (Text or Number type.) The version of the CreativeWork embodied by a specified resource.
#' @param typicalAgeRange (Text or Text type.) The typical expected age range, e.g. '7-9', '11-'.
#' @param translator (Person or Organization or Person or Organization type.) Organization or person who adapts a creative work to different languages, regional differences and technical requirements of a target market, or that translates during some event.
#' @param timeRequired (Duration type.) Approximate or typical time it takes to work with or through this learning resource for the typical intended target audience, e.g. 'P30M', 'P1H25M'.
#' @param thumbnailUrl (URL type.) A thumbnail image relevant to the Thing.
#' @param text (Text type.) The textual content of this CreativeWork.
#' @param temporalCoverage (URL or Text or DateTime type.) The temporalCoverage of a CreativeWork indicates the period that the content applies to, i.e. that it describes, either as a DateTime or as a textual string indicating a time period in [ISO 8601 time interval format](https://en.wikipedia.org/wiki/ISO_8601#Time_intervals). In the case of a Dataset it will typically indicate the relevant time period in a precise notation (e.g. for a 2011 census dataset, the year 2011 would be written "2011/2012"). Other forms of content e.g. ScholarlyArticle, Book, TVSeries or TVEpisode may indicate their temporalCoverage in broader terms - textually or via well-known URL. Written works such as books may sometimes have precise temporal coverage too, e.g. a work set in 1939 - 1945 can be indicated in ISO 8601 interval format format via "1939/1945".
#' @param sponsor (Person or Organization or Person or Organization or Person or Organization or Person or Organization type.) A person or organization that supports a thing through a pledge, promise, or financial contribution. e.g. a sponsor of a Medical Study or a corporate sponsor of an event.
#' @param spatialCoverage (Place type.) The spatialCoverage of a CreativeWork indicates the place(s) which are the focus of the content. It is a subproperty of contentLocation intended primarily for more technical and detailed materials. For example with a Dataset, it indicates areas that the dataset describes: a dataset of New York weather would have spatialCoverage which was the place: the state of New York.
#' @param sourceOrganization (Organization type.) The Organization on whose behalf the creator was working.
#' @param schemaVersion (URL or Text type.) Indicates (by URL or string) a particular version of a schema used in some CreativeWork. For example, a document could declare a schemaVersion using an URL such as http://schema.org/version/2.0/ if precise indication of schema version was required by some application.
#' @param reviews (Review or Review or Review or Review or Review type.) Review of the item.
#' @param review (Review or Review or Review or Review or Review or Review or Review or Review type.) A review of the item.
#' @param releasedEvent (PublicationEvent type.) The place and time the release was issued, expressed as a PublicationEvent.
#' @param recordedAt (Event type.) The Event where the CreativeWork was recorded. The CreativeWork may capture all or part of the event.
#' @param publishingPrinciples (URL or CreativeWork or URL or CreativeWork or URL or CreativeWork type.) The publishingPrinciples property indicates (typically via [[URL]]) a document describing the editorial principles of an [[Organization]] (or individual e.g. a [[Person]] writing a blog) that relate to their activities as a publisher, e.g. ethics or diversity policies. When applied to a [[CreativeWork]] (e.g. [[NewsArticle]]) the principles are those of the party primarily responsible for the creation of the [[CreativeWork]].While such policies are most typically expressed in natural language, sometimes related information (e.g. indicating a [[funder]]) can be expressed using schema.org terminology.
#' @param publisher (Person or Organization type.) The publisher of the creative work.
#' @param publication (PublicationEvent type.) A publication event associated with the item.
#' @param provider (Person or Organization or Person or Organization or Person or Organization or Person or Organization or Person or Organization or Person or Organization type.) The service provider, service operator, or service performer; the goods producer. Another party (a seller) may offer those services or goods on behalf of the provider. A provider may also serve as the seller.
#' @param producer (Person or Organization type.) The person or organization who produced the work (e.g. music album, movie, tv/radio series etc.).
#' @param position (Text or Integer or Text or Integer type.) The position of an item in a series or sequence of items.
#' @param offers (Offer or Offer or Offer or Offer or Offer or Offer or Offer type.) An offer to provide this item—for example, an offer to sell a product, rent the DVD of a movie, perform a service, or give away tickets to an event.
#' @param mentions (Thing type.) Indicates that the CreativeWork contains a reference to, but is not necessarily about a concept.
#' @param material (URL or Text or Product or URL or Text or Product type.) A material that something is made from, e.g. leather, wool, cotton, paper.
#' @param mainEntity (Thing type.) Indicates the primary entity described in some page or other CreativeWork.
#' @param locationCreated (Place type.) The location where the CreativeWork was created, which may not be the same as the location depicted in the CreativeWork.
#' @param license (URL or CreativeWork type.) A license document that applies to this content, typically indicated by URL.
#' @param learningResourceType (Text type.) The predominant type or kind characterizing the learning resource. For example, 'presentation', 'handout'.
#' @param keywords (Text type.) Keywords or tags used to describe this content. Multiple entries in a keywords list are typically delimited by commas.
#' @param isPartOf (CreativeWork type.) Indicates an item or CreativeWork that this item, or CreativeWork (in some sense), is part of.
#' @param isFamilyFriendly (Boolean type.) Indicates whether this content is family friendly.
#' @param isBasedOnUrl (URL or Product or CreativeWork type.) A resource that was used in the creation of this resource. This term can be repeated for multiple sources. For example, http://example.com/great-multiplication-intro.html.
#' @param isBasedOn (URL or Product or CreativeWork type.) A resource that was used in the creation of this resource. This term can be repeated for multiple sources. For example, http://example.com/great-multiplication-intro.html.
#' @param isAccessibleForFree (Boolean or Boolean or Boolean or Boolean type.) A flag to signal that the item, event, or place is accessible for free.
#' @param interactivityType (Text type.) The predominant mode of learning supported by the learning resource. Acceptable values are 'active', 'expositive', or 'mixed'.
#' @param interactionStatistic (InteractionCounter type.) The number of interactions for the CreativeWork using the WebSite or SoftwareApplication. The most specific child type of InteractionCounter should be used.
#' @param inLanguage (Text or Language or Text or Language or Text or Language or Text or Language type.) The language of the content or performance or used in an action. Please use one of the language codes from the [IETF BCP 47 standard](http://tools.ietf.org/html/bcp47). See also [[availableLanguage]].
#' @param headline (Text type.) Headline of the article.
#' @param hasPart (CreativeWork type.) Indicates an item or CreativeWork that is part of this item, or CreativeWork (in some sense).
#' @param genre (URL or Text or URL or Text or URL or Text type.) Genre of the creative work, broadcast channel or group.
#' @param funder (Person or Organization or Person or Organization or Person or Organization or Person or Organization type.) A person or organization that supports (sponsors) something through some kind of financial contribution.
#' @param fileFormat (URL or Text type.) Media type, typically MIME format (see [IANA site](http://www.iana.org/assignments/media-types/media-types.xhtml)) of the content e.g. application/zip of a SoftwareApplication binary. In cases where a CreativeWork has several media type representations, 'encoding' can be used to indicate each MediaObject alongside particular fileFormat information. Unregistered or niche file formats can be indicated instead via the most appropriate URL, e.g. defining Web page or a Wikipedia entry.
#' @param expires (Date type.) Date the content expires and is no longer useful or available. For example a [[VideoObject]] or [[NewsArticle]] whose availability or relevance is time-limited, or a [[ClaimReview]] fact check whose publisher wants to indicate that it may no longer be relevant (or helpful to highlight) after some date.
#' @param exampleOfWork (CreativeWork type.) A creative work that this work is an example/instance/realization/derivation of.
#' @param encodings (MediaObject type.) A media object that encodes this CreativeWork.
#' @param encodingFormat (URL or Text or URL or Text type.) Media type typically expressed using a MIME format (see [IANA site](http://www.iana.org/assignments/media-types/media-types.xhtml) and [MDN reference](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types)) e.g. application/zip for a SoftwareApplication binary, audio/mpeg for .mp3 etc.).In cases where a [[CreativeWork]] has several media type representations, [[encoding]] can be used to indicate each [[MediaObject]] alongside particular [[encodingFormat]] information.Unregistered or niche encoding and file formats can be indicated instead via the most appropriate URL, e.g. defining Web page or a Wikipedia/Wikidata entry.
#' @param encoding (MediaObject type.) A media object that encodes this CreativeWork. This property is a synonym for associatedMedia.
#' @param educationalUse (Text type.) The purpose of a work in the context of education; for example, 'assignment', 'group work'.
#' @param educationalAlignment (AlignmentObject type.) An alignment to an established educational framework.
#' @param editor (Person type.) Specifies the Person who edited the CreativeWork.
#' @param discussionUrl (URL type.) A link to the page containing the comments of the CreativeWork.
#' @param datePublished (Date type.) Date of first broadcast/publication.
#' @param dateModified (DateTime or Date or DateTime or Date type.) The date on which the CreativeWork was most recently modified or when the item's entry was modified within a DataFeed.
#' @param dateCreated (DateTime or Date or DateTime or Date type.) The date on which the CreativeWork was created or the item was added to a DataFeed.
#' @param creator (Person or Organization or Person or Organization type.) The creator/author of this CreativeWork. This is the same as the Author property for CreativeWork.
#' @param copyrightYear (Number type.) The year during which the claimed copyright for the CreativeWork was first asserted.
#' @param copyrightHolder (Person or Organization type.) The party holding the legal copyright to the CreativeWork.
#' @param contributor (Person or Organization or Person or Organization type.) A secondary contributor to the CreativeWork or Event.
#' @param contentRating (Text type.) Official rating of a piece of content—for example,'MPAA PG-13'.
#' @param contentLocation (Place type.) The location depicted or described in the content. For example, the location in a photograph or painting.
#' @param commentCount (Integer type.) The number of comments this CreativeWork (e.g. Article, Question or Answer) has received. This is most applicable to works published in Web sites with commenting system; additional comments may exist elsewhere.
#' @param comment (Comment or Comment type.) Comments, typically from users.
#' @param citation (Text or CreativeWork type.) A citation or reference to another creative work, such as another publication, web page, scholarly article, etc.
#' @param character (Person type.) Fictional person connected with a creative work.
#' @param awards (Text or Text or Text or Text type.) Awards won by or for this item.
#' @param award (Text or Text or Text or Text or Text type.) An award won by or for this item.
#' @param author (Person or Organization or Person or Organization type.) The author of this content or rating. Please note that author is special in that HTML 5 provides a special mechanism for indicating authorship via the rel tag. That is equivalent to this and may be used interchangeably.
#' @param audio (AudioObject type.) An embedded audio object.
#' @param audience (Audience or Audience or Audience or Audience or Audience or Audience type.) An intended audience, i.e. a group for whom something was created.
#' @param associatedMedia (MediaObject type.) A media object that encodes this CreativeWork. This property is a synonym for encoding.
#' @param alternativeHeadline (Text type.) A secondary title of the CreativeWork.
#' @param aggregateRating (AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating type.) The overall rating, based on a collection of reviews or ratings, of the item.
#' @param accountablePerson (Person type.) Specifies the Person that is legally accountable for the CreativeWork.
#' @param accessibilitySummary (Text type.) A human-readable summary of specific accessibility features or deficiencies, consistent with the other accessibility metadata but expressing subtleties such as "short descriptions are present but long descriptions will be needed for non-visual users" or "short descriptions are present and no long descriptions are needed."
#' @param accessibilityHazard (Text type.) A characteristic of the described resource that is physiologically dangerous to some users. Related to WCAG 2.0 guideline 2.3 ([WebSchemas wiki lists possible values](http://www.w3.org/wiki/WebSchemas/Accessibility)).
#' @param accessibilityFeature (Text type.) Content features of the resource, such as accessible media, alternatives and supported enhancements for accessibility ([WebSchemas wiki lists possible values](http://www.w3.org/wiki/WebSchemas/Accessibility)).
#' @param accessibilityControl (Text type.) Identifies input methods that are sufficient to fully control the described resource ([WebSchemas wiki lists possible values](http://www.w3.org/wiki/WebSchemas/Accessibility)).
#' @param accessibilityAPI (Text type.) Indicates that the resource is compatible with the referenced accessibility API ([WebSchemas wiki lists possible values](http://www.w3.org/wiki/WebSchemas/Accessibility)).
#' @param accessModeSufficient (Text type.) A list of single or combined accessModes that are sufficient to understand all the intellectual content of a resource. Expected values include: auditory, tactile, textual, visual.
#' @param accessMode (Text type.) The human sensory perceptual system or cognitive faculty through which a person may process or perceive information. Expected values include: auditory, tactile, textual, visual, colorDependent, chartOnVisual, chemOnVisual, diagramOnVisual, mathOnVisual, musicOnVisual, textOnVisual.
#' @param about (Thing or Thing or Thing type.) The subject matter of the content.
#' @param url (URL type.) URL of the item.
#' @param sameAs (URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.
#' @param potentialAction (Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.
#' @param name (Text type.) The name of the item.
#' @param mainEntityOfPage (URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.
#' @param image (URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].
#' @param identifier (URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.
#' @param disambiguatingDescription (Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.
#' @param description (Text type.) A description of the item.
#' @param alternateName (Text type.) An alias for the item.
#' @param additionalType (URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.
#'
#' @return a list object corresponding to a schema:Code
#'
#' @export
Code <- function(id = NULL,
workExample = NULL,
video = NULL,
version = NULL,
typicalAgeRange = NULL,
translator = NULL,
timeRequired = NULL,
thumbnailUrl = NULL,
text = NULL,
temporalCoverage = NULL,
sponsor = NULL,
spatialCoverage = NULL,
sourceOrganization = NULL,
schemaVersion = NULL,
reviews = NULL,
review = NULL,
releasedEvent = NULL,
recordedAt = NULL,
publishingPrinciples = NULL,
publisher = NULL,
publication = NULL,
provider = NULL,
producer = NULL,
position = NULL,
offers = NULL,
mentions = NULL,
material = NULL,
mainEntity = NULL,
locationCreated = NULL,
license = NULL,
learningResourceType = NULL,
keywords = NULL,
isPartOf = NULL,
isFamilyFriendly = NULL,
isBasedOnUrl = NULL,
isBasedOn = NULL,
isAccessibleForFree = NULL,
interactivityType = NULL,
interactionStatistic = NULL,
inLanguage = NULL,
headline = NULL,
hasPart = NULL,
genre = NULL,
funder = NULL,
fileFormat = NULL,
expires = NULL,
exampleOfWork = NULL,
encodings = NULL,
encodingFormat = NULL,
encoding = NULL,
educationalUse = NULL,
educationalAlignment = NULL,
editor = NULL,
discussionUrl = NULL,
datePublished = NULL,
dateModified = NULL,
dateCreated = NULL,
creator = NULL,
copyrightYear = NULL,
copyrightHolder = NULL,
contributor = NULL,
contentRating = NULL,
contentLocation = NULL,
commentCount = NULL,
comment = NULL,
citation = NULL,
character = NULL,
awards = NULL,
award = NULL,
author = NULL,
audio = NULL,
audience = NULL,
associatedMedia = NULL,
alternativeHeadline = NULL,
aggregateRating = NULL,
accountablePerson = NULL,
accessibilitySummary = NULL,
accessibilityHazard = NULL,
accessibilityFeature = NULL,
accessibilityControl = NULL,
accessibilityAPI = NULL,
accessModeSufficient = NULL,
accessMode = NULL,
about = NULL,
url = NULL,
sameAs = NULL,
potentialAction = NULL,
name = NULL,
mainEntityOfPage = NULL,
image = NULL,
identifier = NULL,
disambiguatingDescription = NULL,
description = NULL,
alternateName = NULL,
additionalType = NULL){
Filter(Negate(is.null),
list(
type = "Code",
id = id,
workExample = workExample,
video = video,
version = version,
typicalAgeRange = typicalAgeRange,
translator = translator,
timeRequired = timeRequired,
thumbnailUrl = thumbnailUrl,
text = text,
temporalCoverage = temporalCoverage,
sponsor = sponsor,
spatialCoverage = spatialCoverage,
sourceOrganization = sourceOrganization,
schemaVersion = schemaVersion,
reviews = reviews,
review = review,
releasedEvent = releasedEvent,
recordedAt = recordedAt,
publishingPrinciples = publishingPrinciples,
publisher = publisher,
publication = publication,
provider = provider,
producer = producer,
position = position,
offers = offers,
mentions = mentions,
material = material,
mainEntity = mainEntity,
locationCreated = locationCreated,
license = license,
learningResourceType = learningResourceType,
keywords = keywords,
isPartOf = isPartOf,
isFamilyFriendly = isFamilyFriendly,
isBasedOnUrl = isBasedOnUrl,
isBasedOn = isBasedOn,
isAccessibleForFree = isAccessibleForFree,
interactivityType = interactivityType,
interactionStatistic = interactionStatistic,
inLanguage = inLanguage,
headline = headline,
hasPart = hasPart,
genre = genre,
funder = funder,
fileFormat = fileFormat,
expires = expires,
exampleOfWork = exampleOfWork,
encodings = encodings,
encodingFormat = encodingFormat,
encoding = encoding,
educationalUse = educationalUse,
educationalAlignment = educationalAlignment,
editor = editor,
discussionUrl = discussionUrl,
datePublished = datePublished,
dateModified = dateModified,
dateCreated = dateCreated,
creator = creator,
copyrightYear = copyrightYear,
copyrightHolder = copyrightHolder,
contributor = contributor,
contentRating = contentRating,
contentLocation = contentLocation,
commentCount = commentCount,
comment = comment,
citation = citation,
character = character,
awards = awards,
award = award,
author = author,
audio = audio,
audience = audience,
associatedMedia = associatedMedia,
alternativeHeadline = alternativeHeadline,
aggregateRating = aggregateRating,
accountablePerson = accountablePerson,
accessibilitySummary = accessibilitySummary,
accessibilityHazard = accessibilityHazard,
accessibilityFeature = accessibilityFeature,
accessibilityControl = accessibilityControl,
accessibilityAPI = accessibilityAPI,
accessModeSufficient = accessModeSufficient,
accessMode = accessMode,
about = about,
url = url,
sameAs = sameAs,
potentialAction = potentialAction,
name = name,
mainEntityOfPage = mainEntityOfPage,
image = image,
identifier = identifier,
disambiguatingDescription = disambiguatingDescription,
description = description,
alternateName = alternateName,
additionalType = additionalType))}
| /R/Code.R | no_license | cboettig/schemar | R | false | false | 22,721 | r | #' Code
#'
#' Computer programming source code. Example: Full (compile ready) solutions, code snippet samples, scripts, templates.
#'
#'
#' @param id identifier for the object (URI)
#' @param workExample (CreativeWork type.) Example/instance/realization/derivation of the concept of this creative work. eg. The paperback edition, first edition, or eBook.
#' @param video (VideoObject type.) An embedded video object.
#' @param version (Text or Number type.) The version of the CreativeWork embodied by a specified resource.
#' @param typicalAgeRange (Text or Text type.) The typical expected age range, e.g. '7-9', '11-'.
#' @param translator (Person or Organization or Person or Organization type.) Organization or person who adapts a creative work to different languages, regional differences and technical requirements of a target market, or that translates during some event.
#' @param timeRequired (Duration type.) Approximate or typical time it takes to work with or through this learning resource for the typical intended target audience, e.g. 'P30M', 'P1H25M'.
#' @param thumbnailUrl (URL type.) A thumbnail image relevant to the Thing.
#' @param text (Text type.) The textual content of this CreativeWork.
#' @param temporalCoverage (URL or Text or DateTime type.) The temporalCoverage of a CreativeWork indicates the period that the content applies to, i.e. that it describes, either as a DateTime or as a textual string indicating a time period in [ISO 8601 time interval format](https://en.wikipedia.org/wiki/ISO_8601#Time_intervals). In the case of a Dataset it will typically indicate the relevant time period in a precise notation (e.g. for a 2011 census dataset, the year 2011 would be written "2011/2012"). Other forms of content e.g. ScholarlyArticle, Book, TVSeries or TVEpisode may indicate their temporalCoverage in broader terms - textually or via well-known URL. Written works such as books may sometimes have precise temporal coverage too, e.g. a work set in 1939 - 1945 can be indicated in ISO 8601 interval format format via "1939/1945".
#' @param sponsor (Person or Organization or Person or Organization or Person or Organization or Person or Organization type.) A person or organization that supports a thing through a pledge, promise, or financial contribution. e.g. a sponsor of a Medical Study or a corporate sponsor of an event.
#' @param spatialCoverage (Place type.) The spatialCoverage of a CreativeWork indicates the place(s) which are the focus of the content. It is a subproperty of contentLocation intended primarily for more technical and detailed materials. For example with a Dataset, it indicates areas that the dataset describes: a dataset of New York weather would have spatialCoverage which was the place: the state of New York.
#' @param sourceOrganization (Organization type.) The Organization on whose behalf the creator was working.
#' @param schemaVersion (URL or Text type.) Indicates (by URL or string) a particular version of a schema used in some CreativeWork. For example, a document could declare a schemaVersion using an URL such as http://schema.org/version/2.0/ if precise indication of schema version was required by some application.
#' @param reviews (Review or Review or Review or Review or Review type.) Review of the item.
#' @param review (Review or Review or Review or Review or Review or Review or Review or Review type.) A review of the item.
#' @param releasedEvent (PublicationEvent type.) The place and time the release was issued, expressed as a PublicationEvent.
#' @param recordedAt (Event type.) The Event where the CreativeWork was recorded. The CreativeWork may capture all or part of the event.
#' @param publishingPrinciples (URL or CreativeWork or URL or CreativeWork or URL or CreativeWork type.) The publishingPrinciples property indicates (typically via [[URL]]) a document describing the editorial principles of an [[Organization]] (or individual e.g. a [[Person]] writing a blog) that relate to their activities as a publisher, e.g. ethics or diversity policies. When applied to a [[CreativeWork]] (e.g. [[NewsArticle]]) the principles are those of the party primarily responsible for the creation of the [[CreativeWork]].While such policies are most typically expressed in natural language, sometimes related information (e.g. indicating a [[funder]]) can be expressed using schema.org terminology.
#' @param publisher (Person or Organization type.) The publisher of the creative work.
#' @param publication (PublicationEvent type.) A publication event associated with the item.
#' @param provider (Person or Organization or Person or Organization or Person or Organization or Person or Organization or Person or Organization or Person or Organization type.) The service provider, service operator, or service performer; the goods producer. Another party (a seller) may offer those services or goods on behalf of the provider. A provider may also serve as the seller.
#' @param producer (Person or Organization type.) The person or organization who produced the work (e.g. music album, movie, tv/radio series etc.).
#' @param position (Text or Integer or Text or Integer type.) The position of an item in a series or sequence of items.
#' @param offers (Offer or Offer or Offer or Offer or Offer or Offer or Offer type.) An offer to provide this item—for example, an offer to sell a product, rent the DVD of a movie, perform a service, or give away tickets to an event.
#' @param mentions (Thing type.) Indicates that the CreativeWork contains a reference to, but is not necessarily about a concept.
#' @param material (URL or Text or Product or URL or Text or Product type.) A material that something is made from, e.g. leather, wool, cotton, paper.
#' @param mainEntity (Thing type.) Indicates the primary entity described in some page or other CreativeWork.
#' @param locationCreated (Place type.) The location where the CreativeWork was created, which may not be the same as the location depicted in the CreativeWork.
#' @param license (URL or CreativeWork type.) A license document that applies to this content, typically indicated by URL.
#' @param learningResourceType (Text type.) The predominant type or kind characterizing the learning resource. For example, 'presentation', 'handout'.
#' @param keywords (Text type.) Keywords or tags used to describe this content. Multiple entries in a keywords list are typically delimited by commas.
#' @param isPartOf (CreativeWork type.) Indicates an item or CreativeWork that this item, or CreativeWork (in some sense), is part of.
#' @param isFamilyFriendly (Boolean type.) Indicates whether this content is family friendly.
#' @param isBasedOnUrl (URL or Product or CreativeWork type.) A resource that was used in the creation of this resource. This term can be repeated for multiple sources. For example, http://example.com/great-multiplication-intro.html.
#' @param isBasedOn (URL or Product or CreativeWork type.) A resource that was used in the creation of this resource. This term can be repeated for multiple sources. For example, http://example.com/great-multiplication-intro.html.
#' @param isAccessibleForFree (Boolean or Boolean or Boolean or Boolean type.) A flag to signal that the item, event, or place is accessible for free.
#' @param interactivityType (Text type.) The predominant mode of learning supported by the learning resource. Acceptable values are 'active', 'expositive', or 'mixed'.
#' @param interactionStatistic (InteractionCounter type.) The number of interactions for the CreativeWork using the WebSite or SoftwareApplication. The most specific child type of InteractionCounter should be used.
#' @param inLanguage (Text or Language or Text or Language or Text or Language or Text or Language type.) The language of the content or performance or used in an action. Please use one of the language codes from the [IETF BCP 47 standard](http://tools.ietf.org/html/bcp47). See also [[availableLanguage]].
#' @param headline (Text type.) Headline of the article.
#' @param hasPart (CreativeWork type.) Indicates an item or CreativeWork that is part of this item, or CreativeWork (in some sense).
#' @param genre (URL or Text or URL or Text or URL or Text type.) Genre of the creative work, broadcast channel or group.
#' @param funder (Person or Organization or Person or Organization or Person or Organization or Person or Organization type.) A person or organization that supports (sponsors) something through some kind of financial contribution.
#' @param fileFormat (URL or Text type.) Media type, typically MIME format (see [IANA site](http://www.iana.org/assignments/media-types/media-types.xhtml)) of the content e.g. application/zip of a SoftwareApplication binary. In cases where a CreativeWork has several media type representations, 'encoding' can be used to indicate each MediaObject alongside particular fileFormat information. Unregistered or niche file formats can be indicated instead via the most appropriate URL, e.g. defining Web page or a Wikipedia entry.
#' @param expires (Date type.) Date the content expires and is no longer useful or available. For example a [[VideoObject]] or [[NewsArticle]] whose availability or relevance is time-limited, or a [[ClaimReview]] fact check whose publisher wants to indicate that it may no longer be relevant (or helpful to highlight) after some date.
#' @param exampleOfWork (CreativeWork type.) A creative work that this work is an example/instance/realization/derivation of.
#' @param encodings (MediaObject type.) A media object that encodes this CreativeWork.
#' @param encodingFormat (URL or Text or URL or Text type.) Media type typically expressed using a MIME format (see [IANA site](http://www.iana.org/assignments/media-types/media-types.xhtml) and [MDN reference](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types)) e.g. application/zip for a SoftwareApplication binary, audio/mpeg for .mp3 etc.).In cases where a [[CreativeWork]] has several media type representations, [[encoding]] can be used to indicate each [[MediaObject]] alongside particular [[encodingFormat]] information.Unregistered or niche encoding and file formats can be indicated instead via the most appropriate URL, e.g. defining Web page or a Wikipedia/Wikidata entry.
#' @param encoding (MediaObject type.) A media object that encodes this CreativeWork. This property is a synonym for associatedMedia.
#' @param educationalUse (Text type.) The purpose of a work in the context of education; for example, 'assignment', 'group work'.
#' @param educationalAlignment (AlignmentObject type.) An alignment to an established educational framework.
#' @param editor (Person type.) Specifies the Person who edited the CreativeWork.
#' @param discussionUrl (URL type.) A link to the page containing the comments of the CreativeWork.
#' @param datePublished (Date type.) Date of first broadcast/publication.
#' @param dateModified (DateTime or Date or DateTime or Date type.) The date on which the CreativeWork was most recently modified or when the item's entry was modified within a DataFeed.
#' @param dateCreated (DateTime or Date or DateTime or Date type.) The date on which the CreativeWork was created or the item was added to a DataFeed.
#' @param creator (Person or Organization or Person or Organization type.) The creator/author of this CreativeWork. This is the same as the Author property for CreativeWork.
#' @param copyrightYear (Number type.) The year during which the claimed copyright for the CreativeWork was first asserted.
#' @param copyrightHolder (Person or Organization type.) The party holding the legal copyright to the CreativeWork.
#' @param contributor (Person or Organization or Person or Organization type.) A secondary contributor to the CreativeWork or Event.
#' @param contentRating (Text type.) Official rating of a piece of content—for example,'MPAA PG-13'.
#' @param contentLocation (Place type.) The location depicted or described in the content. For example, the location in a photograph or painting.
#' @param commentCount (Integer type.) The number of comments this CreativeWork (e.g. Article, Question or Answer) has received. This is most applicable to works published in Web sites with commenting system; additional comments may exist elsewhere.
#' @param comment (Comment or Comment type.) Comments, typically from users.
#' @param citation (Text or CreativeWork type.) A citation or reference to another creative work, such as another publication, web page, scholarly article, etc.
#' @param character (Person type.) Fictional person connected with a creative work.
#' @param awards (Text or Text or Text or Text type.) Awards won by or for this item.
#' @param award (Text or Text or Text or Text or Text type.) An award won by or for this item.
#' @param author (Person or Organization or Person or Organization type.) The author of this content or rating. Please note that author is special in that HTML 5 provides a special mechanism for indicating authorship via the rel tag. That is equivalent to this and may be used interchangeably.
#' @param audio (AudioObject type.) An embedded audio object.
#' @param audience (Audience or Audience or Audience or Audience or Audience or Audience type.) An intended audience, i.e. a group for whom something was created.
#' @param associatedMedia (MediaObject type.) A media object that encodes this CreativeWork. This property is a synonym for encoding.
#' @param alternativeHeadline (Text type.) A secondary title of the CreativeWork.
#' @param aggregateRating (AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating type.) The overall rating, based on a collection of reviews or ratings, of the item.
#' @param accountablePerson (Person type.) Specifies the Person that is legally accountable for the CreativeWork.
#' @param accessibilitySummary (Text type.) A human-readable summary of specific accessibility features or deficiencies, consistent with the other accessibility metadata but expressing subtleties such as "short descriptions are present but long descriptions will be needed for non-visual users" or "short descriptions are present and no long descriptions are needed."
#' @param accessibilityHazard (Text type.) A characteristic of the described resource that is physiologically dangerous to some users. Related to WCAG 2.0 guideline 2.3 ([WebSchemas wiki lists possible values](http://www.w3.org/wiki/WebSchemas/Accessibility)).
#' @param accessibilityFeature (Text type.) Content features of the resource, such as accessible media, alternatives and supported enhancements for accessibility ([WebSchemas wiki lists possible values](http://www.w3.org/wiki/WebSchemas/Accessibility)).
#' @param accessibilityControl (Text type.) Identifies input methods that are sufficient to fully control the described resource ([WebSchemas wiki lists possible values](http://www.w3.org/wiki/WebSchemas/Accessibility)).
#' @param accessibilityAPI (Text type.) Indicates that the resource is compatible with the referenced accessibility API ([WebSchemas wiki lists possible values](http://www.w3.org/wiki/WebSchemas/Accessibility)).
#' @param accessModeSufficient (Text type.) A list of single or combined accessModes that are sufficient to understand all the intellectual content of a resource. Expected values include: auditory, tactile, textual, visual.
#' @param accessMode (Text type.) The human sensory perceptual system or cognitive faculty through which a person may process or perceive information. Expected values include: auditory, tactile, textual, visual, colorDependent, chartOnVisual, chemOnVisual, diagramOnVisual, mathOnVisual, musicOnVisual, textOnVisual.
#' @param about (Thing or Thing or Thing type.) The subject matter of the content.
#' @param url (URL type.) URL of the item.
#' @param sameAs (URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.
#' @param potentialAction (Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.
#' @param name (Text type.) The name of the item.
#' @param mainEntityOfPage (URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.
#' @param image (URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].
#' @param identifier (URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.
#' @param disambiguatingDescription (Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.
#' @param description (Text type.) A description of the item.
#' @param alternateName (Text type.) An alias for the item.
#' @param additionalType (URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.
#'
#' @return a list object corresponding to a schema:Code
#'
#' @export
Code <- function(id = NULL,
workExample = NULL,
video = NULL,
version = NULL,
typicalAgeRange = NULL,
translator = NULL,
timeRequired = NULL,
thumbnailUrl = NULL,
text = NULL,
temporalCoverage = NULL,
sponsor = NULL,
spatialCoverage = NULL,
sourceOrganization = NULL,
schemaVersion = NULL,
reviews = NULL,
review = NULL,
releasedEvent = NULL,
recordedAt = NULL,
publishingPrinciples = NULL,
publisher = NULL,
publication = NULL,
provider = NULL,
producer = NULL,
position = NULL,
offers = NULL,
mentions = NULL,
material = NULL,
mainEntity = NULL,
locationCreated = NULL,
license = NULL,
learningResourceType = NULL,
keywords = NULL,
isPartOf = NULL,
isFamilyFriendly = NULL,
isBasedOnUrl = NULL,
isBasedOn = NULL,
isAccessibleForFree = NULL,
interactivityType = NULL,
interactionStatistic = NULL,
inLanguage = NULL,
headline = NULL,
hasPart = NULL,
genre = NULL,
funder = NULL,
fileFormat = NULL,
expires = NULL,
exampleOfWork = NULL,
encodings = NULL,
encodingFormat = NULL,
encoding = NULL,
educationalUse = NULL,
educationalAlignment = NULL,
editor = NULL,
discussionUrl = NULL,
datePublished = NULL,
dateModified = NULL,
dateCreated = NULL,
creator = NULL,
copyrightYear = NULL,
copyrightHolder = NULL,
contributor = NULL,
contentRating = NULL,
contentLocation = NULL,
commentCount = NULL,
comment = NULL,
citation = NULL,
character = NULL,
awards = NULL,
award = NULL,
author = NULL,
audio = NULL,
audience = NULL,
associatedMedia = NULL,
alternativeHeadline = NULL,
aggregateRating = NULL,
accountablePerson = NULL,
accessibilitySummary = NULL,
accessibilityHazard = NULL,
accessibilityFeature = NULL,
accessibilityControl = NULL,
accessibilityAPI = NULL,
accessModeSufficient = NULL,
accessMode = NULL,
about = NULL,
url = NULL,
sameAs = NULL,
potentialAction = NULL,
name = NULL,
mainEntityOfPage = NULL,
image = NULL,
identifier = NULL,
disambiguatingDescription = NULL,
description = NULL,
alternateName = NULL,
additionalType = NULL){
Filter(Negate(is.null),
list(
type = "Code",
id = id,
workExample = workExample,
video = video,
version = version,
typicalAgeRange = typicalAgeRange,
translator = translator,
timeRequired = timeRequired,
thumbnailUrl = thumbnailUrl,
text = text,
temporalCoverage = temporalCoverage,
sponsor = sponsor,
spatialCoverage = spatialCoverage,
sourceOrganization = sourceOrganization,
schemaVersion = schemaVersion,
reviews = reviews,
review = review,
releasedEvent = releasedEvent,
recordedAt = recordedAt,
publishingPrinciples = publishingPrinciples,
publisher = publisher,
publication = publication,
provider = provider,
producer = producer,
position = position,
offers = offers,
mentions = mentions,
material = material,
mainEntity = mainEntity,
locationCreated = locationCreated,
license = license,
learningResourceType = learningResourceType,
keywords = keywords,
isPartOf = isPartOf,
isFamilyFriendly = isFamilyFriendly,
isBasedOnUrl = isBasedOnUrl,
isBasedOn = isBasedOn,
isAccessibleForFree = isAccessibleForFree,
interactivityType = interactivityType,
interactionStatistic = interactionStatistic,
inLanguage = inLanguage,
headline = headline,
hasPart = hasPart,
genre = genre,
funder = funder,
fileFormat = fileFormat,
expires = expires,
exampleOfWork = exampleOfWork,
encodings = encodings,
encodingFormat = encodingFormat,
encoding = encoding,
educationalUse = educationalUse,
educationalAlignment = educationalAlignment,
editor = editor,
discussionUrl = discussionUrl,
datePublished = datePublished,
dateModified = dateModified,
dateCreated = dateCreated,
creator = creator,
copyrightYear = copyrightYear,
copyrightHolder = copyrightHolder,
contributor = contributor,
contentRating = contentRating,
contentLocation = contentLocation,
commentCount = commentCount,
comment = comment,
citation = citation,
character = character,
awards = awards,
award = award,
author = author,
audio = audio,
audience = audience,
associatedMedia = associatedMedia,
alternativeHeadline = alternativeHeadline,
aggregateRating = aggregateRating,
accountablePerson = accountablePerson,
accessibilitySummary = accessibilitySummary,
accessibilityHazard = accessibilityHazard,
accessibilityFeature = accessibilityFeature,
accessibilityControl = accessibilityControl,
accessibilityAPI = accessibilityAPI,
accessModeSufficient = accessModeSufficient,
accessMode = accessMode,
about = about,
url = url,
sameAs = sameAs,
potentialAction = potentialAction,
name = name,
mainEntityOfPage = mainEntityOfPage,
image = image,
identifier = identifier,
disambiguatingDescription = disambiguatingDescription,
description = description,
alternateName = alternateName,
additionalType = additionalType))}
|
vars.tmp <- commandArgs(T)
vars <- vars.tmp[length(vars.tmp)]
split.vars <- unlist(strsplit(vars,','))
len=length(split.vars)
output=split.vars[len]
dataall=lapply(1:(len),function(x) { data =read.table(paste(split.vars[x],'.phyloP.txt',sep=''),head=T)
data=as.numeric(data[,1])} )
names(dataall)= split.vars
#color=rainbow(len)
color=c("black","blue","red","purple","brown","orange","green","yellow","gray")
#save.image('test.Rdata')
denall=as.numeric()
for(i in 1:len) {
den=density(dataall[[i]])
denall = c(denall,max(den$y))
}
ymax=max(denall)
#print(ymax)
pdf("conservation.density.plot.pdf")
plot(density(dataall[[1]]),col=color[1],ylab='density',xlab='PhyloP Score',main='',xaxs='i',ylim=c(0,ymax))
for( i in 2:len) {
lines(density(dataall[[i]]),col= color[i])
}
legend("right", legend = split.vars,lty = 1,col=color)
dev.off()
pdf("conservation.cumulative.plot.pdf")
plot(ecdf(dataall[[1]]),verticals = T, do.points = F,col=color[1],xlab='PhyloP Score',ylab='cumulative frequency',main='')
for( i in 2:len) {
lines(ecdf(dataall[[i]]),col= color[i],do.points = F)
}
legend("right", legend = split.vars,lty = 1,col=color)
dev.off()
png("conservation.density.plot.png")
plot(density(dataall[[1]]),col=color[1],ylab='density',xlab='PhyloP Score',main='',xaxs='i',ylim=c(0,ymax))
for( i in 2:len) {
lines(density(dataall[[i]]),col= color[i])
}
legend("right", legend = split.vars,lty = 1,col=color)
dev.off()
png("conservation.cumulative.plot.png")
plot(ecdf(dataall[[1]]),verticals = T, do.points = F,col=color[1],xlab='PhyloP Score',ylab='cumulative frequency',main='')
for( i in 2:len) {
lines(ecdf(dataall[[i]]),col= color[i],do.points = F)
}
legend("right", legend = split.vars,lty = 1,col=color)
dev.off()
| /bin/lncRNA_bin/R_plot/conservation/phyloP_density.R | permissive | IM-han/lncRNA | R | false | false | 1,798 | r | vars.tmp <- commandArgs(T)
vars <- vars.tmp[length(vars.tmp)]
split.vars <- unlist(strsplit(vars,','))
len=length(split.vars)
output=split.vars[len]
dataall=lapply(1:(len),function(x) { data =read.table(paste(split.vars[x],'.phyloP.txt',sep=''),head=T)
data=as.numeric(data[,1])} )
names(dataall)= split.vars
#color=rainbow(len)
color=c("black","blue","red","purple","brown","orange","green","yellow","gray")
#save.image('test.Rdata')
denall=as.numeric()
for(i in 1:len) {
den=density(dataall[[i]])
denall = c(denall,max(den$y))
}
ymax=max(denall)
#print(ymax)
pdf("conservation.density.plot.pdf")
plot(density(dataall[[1]]),col=color[1],ylab='density',xlab='PhyloP Score',main='',xaxs='i',ylim=c(0,ymax))
for( i in 2:len) {
lines(density(dataall[[i]]),col= color[i])
}
legend("right", legend = split.vars,lty = 1,col=color)
dev.off()
pdf("conservation.cumulative.plot.pdf")
plot(ecdf(dataall[[1]]),verticals = T, do.points = F,col=color[1],xlab='PhyloP Score',ylab='cumulative frequency',main='')
for( i in 2:len) {
lines(ecdf(dataall[[i]]),col= color[i],do.points = F)
}
legend("right", legend = split.vars,lty = 1,col=color)
dev.off()
png("conservation.density.plot.png")
plot(density(dataall[[1]]),col=color[1],ylab='density',xlab='PhyloP Score',main='',xaxs='i',ylim=c(0,ymax))
for( i in 2:len) {
lines(density(dataall[[i]]),col= color[i])
}
legend("right", legend = split.vars,lty = 1,col=color)
dev.off()
png("conservation.cumulative.plot.png")
plot(ecdf(dataall[[1]]),verticals = T, do.points = F,col=color[1],xlab='PhyloP Score',ylab='cumulative frequency',main='')
for( i in 2:len) {
lines(ecdf(dataall[[i]]),col= color[i],do.points = F)
}
legend("right", legend = split.vars,lty = 1,col=color)
dev.off()
|
suppressMessages(library(MOFA))
suppressMessages(library(data.table))
suppressMessages(library(purrr))
suppressMessages(library(ggplot2))
suppressMessages(library(scater))
suppressMessages(library(reticulate))
suppressMessages(library(argparse))
# print(py_config())
# print(.libPaths())
# Define arguments
p <- ArgumentParser(description='')
p$add_argument('-i','--trial', type="integer", help='Trial number')
args <- p$parse_args(commandArgs(TRUE))
if (is.null(args$trial)) args$trial <- 1
# Load settings
if (grepl("ricard",Sys.info()['nodename'])) {
source("/Users/ricard/gastrulation/metaccrna/mofa/primitive_streak/endoderm/load_settings.R")
use_python("/Users/ricard/anaconda3/bin/python")
} else {
source("/homes/ricard/gastrulation/metaccrna/mofa/primitive_streak/endoderm/load_settings.R")
use_python("/nfs/software/stegle/users/ricard/anaconda/bin/python")
}
# Load data
if (grepl("ricard",Sys.info()['nodename'])) {
source("/Users/ricard/gastrulation/metaccrna/mofa/primitive_streak/endoderm/load_data.R")
} else {
source("/homes/ricard/gastrulation/metaccrna/mofa/primitive_streak/endoderm/load_data.R")
}
# Create MOFAobject
all_matrix_list <- c(rna=list(rna_matrix))
MOFAobject <- createMOFAobject(all_matrix_list)
# Data processing options
DataOptions <- getDefaultDataOptions()
# Model options
ModelOptions <- getDefaultModelOptions(MOFAobject)
ModelOptions$numFactors <- 5
# Training options
TrainOptions <- getDefaultTrainOptions()
TrainOptions$maxiter <- 5000
TrainOptions$tolerance <- 0.25
TrainOptions$DropFactorThreshold <- 0
TrainOptions$seed <- args$trial
# Prepare MOFAobject for training
MOFAmodel <- prepareMOFA(MOFAobject,
DataOptions = DataOptions,
ModelOptions = ModelOptions,
TrainOptions = TrainOptions
)
# Train the model
outfile <- sprintf("%s/hdf5/model_endoderm.hdf5",io$outdir)
model <- runMOFA(MOFAmodel, outfile)
| /metaccrna/mesendoderm_commitment/endoderm/run.R | no_license | BioBuild/scnmt_gastrulation | R | false | false | 1,885 | r | suppressMessages(library(MOFA))
suppressMessages(library(data.table))
suppressMessages(library(purrr))
suppressMessages(library(ggplot2))
suppressMessages(library(scater))
suppressMessages(library(reticulate))
suppressMessages(library(argparse))
# print(py_config())
# print(.libPaths())
# Define arguments
p <- ArgumentParser(description='')
p$add_argument('-i','--trial', type="integer", help='Trial number')
args <- p$parse_args(commandArgs(TRUE))
if (is.null(args$trial)) args$trial <- 1
# Load settings
if (grepl("ricard",Sys.info()['nodename'])) {
source("/Users/ricard/gastrulation/metaccrna/mofa/primitive_streak/endoderm/load_settings.R")
use_python("/Users/ricard/anaconda3/bin/python")
} else {
source("/homes/ricard/gastrulation/metaccrna/mofa/primitive_streak/endoderm/load_settings.R")
use_python("/nfs/software/stegle/users/ricard/anaconda/bin/python")
}
# Load data
if (grepl("ricard",Sys.info()['nodename'])) {
source("/Users/ricard/gastrulation/metaccrna/mofa/primitive_streak/endoderm/load_data.R")
} else {
source("/homes/ricard/gastrulation/metaccrna/mofa/primitive_streak/endoderm/load_data.R")
}
# Create MOFAobject
all_matrix_list <- c(rna=list(rna_matrix))
MOFAobject <- createMOFAobject(all_matrix_list)
# Data processing options
DataOptions <- getDefaultDataOptions()
# Model options
ModelOptions <- getDefaultModelOptions(MOFAobject)
ModelOptions$numFactors <- 5
# Training options
TrainOptions <- getDefaultTrainOptions()
TrainOptions$maxiter <- 5000
TrainOptions$tolerance <- 0.25
TrainOptions$DropFactorThreshold <- 0
TrainOptions$seed <- args$trial
# Prepare MOFAobject for training
MOFAmodel <- prepareMOFA(MOFAobject,
DataOptions = DataOptions,
ModelOptions = ModelOptions,
TrainOptions = TrainOptions
)
# Train the model
outfile <- sprintf("%s/hdf5/model_endoderm.hdf5",io$outdir)
model <- runMOFA(MOFAmodel, outfile)
|
library(hdi)
library(glmnet)
setwd("C:/Users/Farees Patel/Downloads")
data<- read.csv("blogData_train.csv", header = FALSE)
#try<- lasso.cv(as.matrix(data[,1:280]), data[,281],nfolds = 10)
test<- read.csv("blogData_test-2012.03.31.01_00.csv", header=FALSE)
glmnet1<-cv.glmnet(as.matrix(data[,1:280]), data[,281],type.measure='mse',nfolds=10,alpha=0)
fit<- glmnet(as.matrix(data[,1:280]), data[,281], alpha = 1, lambda = 100 )
pred<- predict.cv.glmnet(glmnet1,as.matrix(test[,1:280]))
pred<- predict(fit,as.matrix(test[,1:280]))
sum((pred-test[,281])^2)^0.5
cv.ridge=cv.glmnet(as.matrix(data[,1:280]), data[,281],alpha=0)
plot(cv.ridge)
fit_lm<- lm(V281~., data=data)
pred_lm<- predict(fit_lm,test[,1:280])
rmse(pred_lm, test[,281])
fit_ridge<- glmnet(as.matrix(data[,1:280]), data[,281],alpha=0)
pred_ridge<- predict(fit_ridge,as.matrix(test[,1:280]))
rmse(pred_ridge, test[,281])
fit_lasso<- glmnet(as.matrix(data[,1:280]), data[,281],alpha=1)
pred_lasso<- predct(fit_lasso,as.matrix(test[,1:280]))
rmse(pred_lasso, test[,281])
sum((pred-test[,281])^2)^0.5
rmse= sqrt(apply((pred_ridge-test[,281])^2,2,mean))
plot(log(fit_ridge$lambda),rmse,type="b",xlab="Log(lambda)")
lam.best=fit_ridge$lambda[order(rmse)[1]]
lam.best
coef(fit_ridge,s=lam.best)
| /Regression Analysis 2.R | no_license | fareespatel/BlogFeedback-Regression-Analysis | R | false | false | 1,315 | r | library(hdi)
library(glmnet)
setwd("C:/Users/Farees Patel/Downloads")
data<- read.csv("blogData_train.csv", header = FALSE)
#try<- lasso.cv(as.matrix(data[,1:280]), data[,281],nfolds = 10)
test<- read.csv("blogData_test-2012.03.31.01_00.csv", header=FALSE)
glmnet1<-cv.glmnet(as.matrix(data[,1:280]), data[,281],type.measure='mse',nfolds=10,alpha=0)
fit<- glmnet(as.matrix(data[,1:280]), data[,281], alpha = 1, lambda = 100 )
pred<- predict.cv.glmnet(glmnet1,as.matrix(test[,1:280]))
pred<- predict(fit,as.matrix(test[,1:280]))
sum((pred-test[,281])^2)^0.5
cv.ridge=cv.glmnet(as.matrix(data[,1:280]), data[,281],alpha=0)
plot(cv.ridge)
fit_lm<- lm(V281~., data=data)
pred_lm<- predict(fit_lm,test[,1:280])
rmse(pred_lm, test[,281])
fit_ridge<- glmnet(as.matrix(data[,1:280]), data[,281],alpha=0)
pred_ridge<- predict(fit_ridge,as.matrix(test[,1:280]))
rmse(pred_ridge, test[,281])
fit_lasso<- glmnet(as.matrix(data[,1:280]), data[,281],alpha=1)
pred_lasso<- predct(fit_lasso,as.matrix(test[,1:280]))
rmse(pred_lasso, test[,281])
sum((pred-test[,281])^2)^0.5
rmse= sqrt(apply((pred_ridge-test[,281])^2,2,mean))
plot(log(fit_ridge$lambda),rmse,type="b",xlab="Log(lambda)")
lam.best=fit_ridge$lambda[order(rmse)[1]]
lam.best
coef(fit_ridge,s=lam.best)
|
#====================================================================
# SETUP
#====================================================================
#INFO
#account required https://urs.earthdata.nasa.gov/profile
#DEPENDENCY
require(raster)
#SOURCE
#====================================================================
# PARAMETERS/ARGS
#====================================================================
args = commandArgs(trailingOnly=TRUE)
wd=args[1]
demDir=args[2]
#runtype=args[2]
#box=as.numeric(unlist(strsplit( args[3],",")))
bbox=c(as.numeric(args[3]),as.numeric(args[4]),as.numeric(args[5]),as.numeric(args[6])) # w,s,e,n
#grid=args[3]
#points input
#lon=c(85.52 ,85.68,84.2)
#lat=c(28.1 ,28.3, 27.8)
#====================================================================
# PARAMETERS FIXED
#====================================================================
#DEM Data dir
#parse credentials file to get user/pwd: https://urs.earthdata.nasa.gov/profile
#to create ~/.netrc credentials file run lpdaacLogin() (install.package('MODIS')
SERVICE=unlist(strsplit(readLines("~/.netrc")[[1]]," "))[2]
print(paste0('using credentials for: ', SERVICE))
USER=unlist(strsplit(readLines("~/.netrc")[[2]]," "))[2]
PWD=unlist(strsplit(readLines("~/.netrc")[[3]]," "))[2]
#bbox=(e,s,w,n)
#bbox=c(85.1, 27.7, 85.8, 27.8)
#compute_svf<-FALSE
#********************** SCRIPT BEGIN *******************************
dir.create(paste0(wd,'/predictors'), showWarnings=FALSE)
setwd(demDir)
#====================================================================
# DEM retrieval based on set of points:
#====================================================================
# if (runtype == "points"){
# setwd(demDir)
# df=data.frame(lon,lat)
# #find unique llcorner
# df2=unique(floor(df))
# #clean up
# system("rm SRTMDAT*")
# system("rm *.hgt")
# for (i in 1:(dim(df2)[1])){
# if (sign(df2$lat[i])==-1){LATVAL<-"S"}
# if (sign(df2$lat[i])==1){LATVAL<-"N"}
# if (sign(df2$lon[i])==-1){LONVAL<-"W"}
# if (sign(df2$lon[i])==1){LONVAL<-"E"}
# lon_pretty=formatC(df2$lon[i],width=3,flag="0")
# #get tile
# filetoget=paste0(LATVAL,df2$lat[i],LONVAL,lon_pretty,".SRTMGL1.hgt.zip")
# filetogetUNZIP=paste0(LATVAL,df2$lat[i],LONVAL,lon_pretty,".hgt")
# if (file.exists(filetoget)){ #dont download again
# print(paste0(filetoget, " exists"))
# system(paste0("unzip ", filetoget))
# system(paste0("gdal_translate -q -co TILED=YES -co COMPRESS=DEFLATE -co ZLEVEL=9 -co PREDICTOR=2 ", filetogetUNZIP, " SRTMDAT",i,".tif"))
# } else {
# system(paste0("wget --user ", USER , " --password " ,PWD, " http://e4ftl01.cr.usgs.gov//MODV6_Dal_D/SRTM/SRTMGL1.003/2000.02.11/",filetoget))
# # extract
# system(paste0("unzip ", filetoget))
# system(paste0("gdal_translate -q -co TILED=YES -co COMPRESS=DEFLATE -co ZLEVEL=9 -co PREDICTOR=2 ", filetogetUNZIP, " SRTMDAT",i,".tif"))
# }
# }
# }
#
#====================================================================
# DEM retrieval based on bbox
#====================================================================
#if (runtype == "bbox"){
setwd(demDir)
floorbox=floor(bbox)
#reduce by 1 longlat seq to prevent neighbouring tile being downloaded - coords are ll corner. bbox with coord = 8 46 9 47 means just give me a sing 1x1 deg tile with coords in ll corner of 8,46. Without the subtraction of 1 we would get 4 1*1 deg tiles ie 8,46 + 8,47 + 9,46 + 9,47
floorbox[3] <- floorbox[3] -1
floorbox[4] <- floorbox[4] -1
lonseq=seq(floorbox[1],floorbox[3],1)
latseq=seq(floorbox[2],floorbox[4],1)
gridstoget=expand.grid(lonseq,latseq)
names(gridstoget)<-c("lon", "lat")
df2<-gridstoget
ngrids=length(df2[,1])
print (paste0("Retrieving ",ngrids, " SRTM30 grids (1x1 deg)"))
#cleanup
system("rm SRTMDAT*")
system("rm *.hgt")
for (i in 1:(dim(df2)[1])){
if (sign(df2$lat[i])==-1){LATVAL<-"S"}
if (sign(df2$lat[i])==1){LATVAL<-"N"}
if (sign(df2$lon[i])==-1){LONVAL<-"W"}
if (sign(df2$lon[i])==1){LONVAL<-"E"}
lon_pretty=formatC(abs(df2$lon[i]),width=3,flag="0")
#get tile
filetoget=paste0(LATVAL,abs(df2$lat[i]),LONVAL,lon_pretty,".SRTMGL1.hgt.zip")
filetogetUNZIP=paste0(LATVAL,abs(df2$lat[i]),LONVAL,lon_pretty,".hgt")
if (file.exists(filetoget)){ #dont download again
print(paste0(filetoget, " exists"))
system(paste0("unzip ", filetoget))
system(paste0("gdal_translate -q -co TILED=YES -co COMPRESS=DEFLATE -co ZLEVEL=9 -co PREDICTOR=2 ", filetogetUNZIP, " SRTMDAT",i,".tif"))
} else {
system(paste0("wget --user ", USER , " --password " ,PWD, " http://e4ftl01.cr.usgs.gov//MODV6_Dal_D/SRTM/SRTMGL1.003/2000.02.11/",filetoget))
# extract
system(paste0("unzip ", filetoget))
system(paste0("gdal_translate -q -co TILED=YES -co COMPRESS=DEFLATE -co ZLEVEL=9 -co PREDICTOR=2 ", filetogetUNZIP, " SRTMDAT",i,".tif"))
}
}
#}
#====================================================================
# MERGE RASTER
#====================================================================
demfiles=list.files(pattern="SRTMDAT*")
if(length(demfiles)>1){
rasters1 <- list.files(pattern="SRTMDAT*",full.names=TRUE, recursive=FALSE)
rast.list <- list()
for(i in 1:length(rasters1)) { rast.list[i] <- raster(rasters1[i]) }
# And then use do.call on the list of raster objects
rast.list$fun <- mean
rast.mosaic <- do.call(mosaic,rast.list)
dem<-rast.mosaic
}else{
dem <- raster(demfiles)
}
setwd(wd)
writeRaster(dem, 'predictors/dem.tif', overwrite=TRUE)
#outline = rasterToPolygons(dem)
#shapefile(outline,"extentRequest.shp")
#====================================================================
# CLIP TO NEAREST ERA EXTENT
#====================================================================
# #parameters
# ele=dem
# tol=0 #must be greater than 0.5*box resolution to get correct extent in degrees
# xtent=extent(ele)
# n=xtent@ymax+tol
# s=xtent@ymin-tol
# e=xtent@xmax+tol
# w=xtent@xmin-tol
# ar= paste(n,w,s,e,sep='/')# region of interest N/W/S/E this corresponds to box centres
# t='00/12'#00/12 gives 3hr data for sfc retrieval ; 00/06/12/18 gives 6hr data for pl retrieval (3hr not possible) ; 00/12 for accumulated
# stp='3/6/9/12'#3/6/9/12 gives 3hr data for sfc ; 0 gives 6hr data for pl retrieval (3hr not possible)
# lt='sfc'# sfc=surface or pl=pressure level
# typ='fc'#an=analysis or fc=forecast, depends on parameter - check on ERA gui.
# par= 168# parameter code - check on ERA gui.
# tar='eraExtent.nc'
# grd=paste0(grid,'/',grid)
# dd="20121230/to/20121231"
# #request
# getERA(dd=dd, t=t, grd=grd, stp=stp, lt=lt,typ=typ,par=par,ar=ar,tar=tar,plev=NULL,workd=wd)
# eraExtent=raster('eraExtent.nc')
# # crop domain to era grids completely covered by DEM - this will lose margin of dem
# # accuracy of these two extents is around half DEm pixel = 15m ie can be 15m difference in boudaries
# newExtent=crop(eraExtent,ele,snap='in')
# newDEM=crop(ele,newExtent)
# writeRaster(newExtent, 'eraExtent.tif', overwrite=TRUE)
# dem<-newDEM
# #plot of simulation domain
# pdf('extentMap.pdf')
# plot(extent(eraExtent),col='green', lwd=2, main='New extent of ERA-grids overlaid input DEM.' , sub='New DEM outline (blue). Original ERA request (green)')
# plot(ele,add=TRUE, lwd=2)
# plot(rasterToPolygons(newExtent),add=TRUE, lwd=2)
# plot(extent(newDEM),add=TRUE, col='blue', lwd=2)
# dev.off()
#====================================================================
# EXTRACT SVF
#====================================================================
# #https://cran.r-project.org/web/packages/horizon/horizon.pdf
# #http://onlinelibrary.wiley.com/doi/10.1002/joc.3523/pdf
# if (compute_svf == TRUE){
# r <- dem
# s <- svf(r, nAngles=4, maxDist=500, ll=TRUE)
# setwd(paste0(wd,'/predictors'))
# writeRaster(round(s,2), "svf.tif", overwrite=TRUE) #write and reduce precision
# }
# #perhaps need to do this on indiv tiles for memory issues?
# #====================================================================
# # EXTRACT SLP/ASP
# #================================================================= ==
# slp=terrain(dem, opt="slope", unit="degrees", neighbors=8)
# asp=terrain(dem, opt="aspect", unit="degrees", neighbors=8)
# #====================================================================
# # WRITE OUTPUTS
# #====================================================================
# setwd(paste0(wd,'/predictors'))
# writeRaster(round(slp,0), "slp.tif", overwrite=TRUE) #write and reduce precision
# writeRaster(round(asp,0), "asp.tif", overwrite=TRUE) #write and reduce precision
# writeRaster(dem, "ele.tif", overwrite=TRUE)
# #====================================================================
# # CLEANUP
# #====================================================================
# rm(list = ls())
#====================================================================
# OUTPUT
#====================================================================
# dem.tif in longlat
# asp.tif in degrees
# slp.tif in degrees
# svf.tif
#write these to predictors dir
#====================================================================
# OLDSTUFF
#====================================================================
# elevation: https://pypi.python.org/pypi/elevation #perhaps not anymore?
#erathdata login: https://urs.earthdata.nasa.gov/profile
#gdal_translate
#pypi/elevation
# eio clip -o yala.tif --bounds 12.35 41.8 12.65 42
# eio clip -o test1.tif --bounds 12.35 31.8 12.65 32.1
# eio clip -o langtang.tif --bounds 85.52 28.1 85.68 28.3
# 85.52 28.1 85.68 28.2
#this works
# wget --user USER --password PWD http://e4ftl01.cr.usgs.gov//MODV6_Dal_D/SRTM/SRTMGL1.003/2000.02.11/N28E085.SRTMGL1.hgt.zip
# unzip N28E085.SRTMGL1.hgt.zip
# gdal_translate -q -co TILED=YES -co COMPRESS=DEFLATE -co ZLEVEL=9 -co PREDICTOR=2 N28E085.hgt SRTMDAT.tif
#naming convention http://e4ftl01.cr.usgs.gov//MODV6_Dal_D/SRTM/SRTMGL1.003/2000.02.11/
#name of dem file is llcorner 1deg tiles
# to find tile need to round down long/let to nearest degree
#points by google-api
#wget https://maps.googleapis.com/maps/api/elevation/json?locations=43.7391536,8.9847034&key=AIzaSyCTCRL-sszoCWqzNHcKz4FrAwJLvh7A3x8
| /rsrc/getDEM.R | permissive | joelfiddes/topoMAPP | R | false | false | 10,172 | r | #====================================================================
# SETUP
#====================================================================
#INFO
#account required https://urs.earthdata.nasa.gov/profile
#DEPENDENCY
require(raster)
#SOURCE
#====================================================================
# PARAMETERS/ARGS
#====================================================================
args = commandArgs(trailingOnly=TRUE)
wd=args[1]
demDir=args[2]
#runtype=args[2]
#box=as.numeric(unlist(strsplit( args[3],",")))
bbox=c(as.numeric(args[3]),as.numeric(args[4]),as.numeric(args[5]),as.numeric(args[6])) # w,s,e,n
#grid=args[3]
#points input
#lon=c(85.52 ,85.68,84.2)
#lat=c(28.1 ,28.3, 27.8)
#====================================================================
# PARAMETERS FIXED
#====================================================================
#DEM Data dir
#parse credentials file to get user/pwd: https://urs.earthdata.nasa.gov/profile
#to create ~/.netrc credentials file run lpdaacLogin() (install.package('MODIS')
SERVICE=unlist(strsplit(readLines("~/.netrc")[[1]]," "))[2]
print(paste0('using credentials for: ', SERVICE))
USER=unlist(strsplit(readLines("~/.netrc")[[2]]," "))[2]
PWD=unlist(strsplit(readLines("~/.netrc")[[3]]," "))[2]
#bbox=(e,s,w,n)
#bbox=c(85.1, 27.7, 85.8, 27.8)
#compute_svf<-FALSE
#********************** SCRIPT BEGIN *******************************
dir.create(paste0(wd,'/predictors'), showWarnings=FALSE)
setwd(demDir)
#====================================================================
# DEM retrieval based on set of points:
#====================================================================
# if (runtype == "points"){
# setwd(demDir)
# df=data.frame(lon,lat)
# #find unique llcorner
# df2=unique(floor(df))
# #clean up
# system("rm SRTMDAT*")
# system("rm *.hgt")
# for (i in 1:(dim(df2)[1])){
# if (sign(df2$lat[i])==-1){LATVAL<-"S"}
# if (sign(df2$lat[i])==1){LATVAL<-"N"}
# if (sign(df2$lon[i])==-1){LONVAL<-"W"}
# if (sign(df2$lon[i])==1){LONVAL<-"E"}
# lon_pretty=formatC(df2$lon[i],width=3,flag="0")
# #get tile
# filetoget=paste0(LATVAL,df2$lat[i],LONVAL,lon_pretty,".SRTMGL1.hgt.zip")
# filetogetUNZIP=paste0(LATVAL,df2$lat[i],LONVAL,lon_pretty,".hgt")
# if (file.exists(filetoget)){ #dont download again
# print(paste0(filetoget, " exists"))
# system(paste0("unzip ", filetoget))
# system(paste0("gdal_translate -q -co TILED=YES -co COMPRESS=DEFLATE -co ZLEVEL=9 -co PREDICTOR=2 ", filetogetUNZIP, " SRTMDAT",i,".tif"))
# } else {
# system(paste0("wget --user ", USER , " --password " ,PWD, " http://e4ftl01.cr.usgs.gov//MODV6_Dal_D/SRTM/SRTMGL1.003/2000.02.11/",filetoget))
# # extract
# system(paste0("unzip ", filetoget))
# system(paste0("gdal_translate -q -co TILED=YES -co COMPRESS=DEFLATE -co ZLEVEL=9 -co PREDICTOR=2 ", filetogetUNZIP, " SRTMDAT",i,".tif"))
# }
# }
# }
#
#====================================================================
# DEM retrieval based on bbox
#====================================================================
#if (runtype == "bbox"){
setwd(demDir)
floorbox=floor(bbox)
#reduce by 1 longlat seq to prevent neighbouring tile being downloaded - coords are ll corner. bbox with coord = 8 46 9 47 means just give me a sing 1x1 deg tile with coords in ll corner of 8,46. Without the subtraction of 1 we would get 4 1*1 deg tiles ie 8,46 + 8,47 + 9,46 + 9,47
floorbox[3] <- floorbox[3] -1
floorbox[4] <- floorbox[4] -1
lonseq=seq(floorbox[1],floorbox[3],1)
latseq=seq(floorbox[2],floorbox[4],1)
gridstoget=expand.grid(lonseq,latseq)
names(gridstoget)<-c("lon", "lat")
df2<-gridstoget
ngrids=length(df2[,1])
print (paste0("Retrieving ",ngrids, " SRTM30 grids (1x1 deg)"))
#cleanup
system("rm SRTMDAT*")
system("rm *.hgt")
for (i in 1:(dim(df2)[1])){
if (sign(df2$lat[i])==-1){LATVAL<-"S"}
if (sign(df2$lat[i])==1){LATVAL<-"N"}
if (sign(df2$lon[i])==-1){LONVAL<-"W"}
if (sign(df2$lon[i])==1){LONVAL<-"E"}
lon_pretty=formatC(abs(df2$lon[i]),width=3,flag="0")
#get tile
filetoget=paste0(LATVAL,abs(df2$lat[i]),LONVAL,lon_pretty,".SRTMGL1.hgt.zip")
filetogetUNZIP=paste0(LATVAL,abs(df2$lat[i]),LONVAL,lon_pretty,".hgt")
if (file.exists(filetoget)){ #dont download again
print(paste0(filetoget, " exists"))
system(paste0("unzip ", filetoget))
system(paste0("gdal_translate -q -co TILED=YES -co COMPRESS=DEFLATE -co ZLEVEL=9 -co PREDICTOR=2 ", filetogetUNZIP, " SRTMDAT",i,".tif"))
} else {
system(paste0("wget --user ", USER , " --password " ,PWD, " http://e4ftl01.cr.usgs.gov//MODV6_Dal_D/SRTM/SRTMGL1.003/2000.02.11/",filetoget))
# extract
system(paste0("unzip ", filetoget))
system(paste0("gdal_translate -q -co TILED=YES -co COMPRESS=DEFLATE -co ZLEVEL=9 -co PREDICTOR=2 ", filetogetUNZIP, " SRTMDAT",i,".tif"))
}
}
#}
#====================================================================
# MERGE RASTER
#====================================================================
demfiles=list.files(pattern="SRTMDAT*")
if(length(demfiles)>1){
rasters1 <- list.files(pattern="SRTMDAT*",full.names=TRUE, recursive=FALSE)
rast.list <- list()
for(i in 1:length(rasters1)) { rast.list[i] <- raster(rasters1[i]) }
# And then use do.call on the list of raster objects
rast.list$fun <- mean
rast.mosaic <- do.call(mosaic,rast.list)
dem<-rast.mosaic
}else{
dem <- raster(demfiles)
}
setwd(wd)
writeRaster(dem, 'predictors/dem.tif', overwrite=TRUE)
#outline = rasterToPolygons(dem)
#shapefile(outline,"extentRequest.shp")
#====================================================================
# CLIP TO NEAREST ERA EXTENT
#====================================================================
# #parameters
# ele=dem
# tol=0 #must be greater than 0.5*box resolution to get correct extent in degrees
# xtent=extent(ele)
# n=xtent@ymax+tol
# s=xtent@ymin-tol
# e=xtent@xmax+tol
# w=xtent@xmin-tol
# ar= paste(n,w,s,e,sep='/')# region of interest N/W/S/E this corresponds to box centres
# t='00/12'#00/12 gives 3hr data for sfc retrieval ; 00/06/12/18 gives 6hr data for pl retrieval (3hr not possible) ; 00/12 for accumulated
# stp='3/6/9/12'#3/6/9/12 gives 3hr data for sfc ; 0 gives 6hr data for pl retrieval (3hr not possible)
# lt='sfc'# sfc=surface or pl=pressure level
# typ='fc'#an=analysis or fc=forecast, depends on parameter - check on ERA gui.
# par= 168# parameter code - check on ERA gui.
# tar='eraExtent.nc'
# grd=paste0(grid,'/',grid)
# dd="20121230/to/20121231"
# #request
# getERA(dd=dd, t=t, grd=grd, stp=stp, lt=lt,typ=typ,par=par,ar=ar,tar=tar,plev=NULL,workd=wd)
# eraExtent=raster('eraExtent.nc')
# # crop domain to era grids completely covered by DEM - this will lose margin of dem
# # accuracy of these two extents is around half DEm pixel = 15m ie can be 15m difference in boudaries
# newExtent=crop(eraExtent,ele,snap='in')
# newDEM=crop(ele,newExtent)
# writeRaster(newExtent, 'eraExtent.tif', overwrite=TRUE)
# dem<-newDEM
# #plot of simulation domain
# pdf('extentMap.pdf')
# plot(extent(eraExtent),col='green', lwd=2, main='New extent of ERA-grids overlaid input DEM.' , sub='New DEM outline (blue). Original ERA request (green)')
# plot(ele,add=TRUE, lwd=2)
# plot(rasterToPolygons(newExtent),add=TRUE, lwd=2)
# plot(extent(newDEM),add=TRUE, col='blue', lwd=2)
# dev.off()
#====================================================================
# EXTRACT SVF
#====================================================================
# #https://cran.r-project.org/web/packages/horizon/horizon.pdf
# #http://onlinelibrary.wiley.com/doi/10.1002/joc.3523/pdf
# if (compute_svf == TRUE){
# r <- dem
# s <- svf(r, nAngles=4, maxDist=500, ll=TRUE)
# setwd(paste0(wd,'/predictors'))
# writeRaster(round(s,2), "svf.tif", overwrite=TRUE) #write and reduce precision
# }
# #perhaps need to do this on indiv tiles for memory issues?
# #====================================================================
# # EXTRACT SLP/ASP
# #================================================================= ==
# slp=terrain(dem, opt="slope", unit="degrees", neighbors=8)
# asp=terrain(dem, opt="aspect", unit="degrees", neighbors=8)
# #====================================================================
# # WRITE OUTPUTS
# #====================================================================
# setwd(paste0(wd,'/predictors'))
# writeRaster(round(slp,0), "slp.tif", overwrite=TRUE) #write and reduce precision
# writeRaster(round(asp,0), "asp.tif", overwrite=TRUE) #write and reduce precision
# writeRaster(dem, "ele.tif", overwrite=TRUE)
# #====================================================================
# # CLEANUP
# #====================================================================
# rm(list = ls())
#====================================================================
# OUTPUT
#====================================================================
# dem.tif in longlat
# asp.tif in degrees
# slp.tif in degrees
# svf.tif
#write these to predictors dir
#====================================================================
# OLDSTUFF
#====================================================================
# elevation: https://pypi.python.org/pypi/elevation #perhaps not anymore?
#erathdata login: https://urs.earthdata.nasa.gov/profile
#gdal_translate
#pypi/elevation
# eio clip -o yala.tif --bounds 12.35 41.8 12.65 42
# eio clip -o test1.tif --bounds 12.35 31.8 12.65 32.1
# eio clip -o langtang.tif --bounds 85.52 28.1 85.68 28.3
# 85.52 28.1 85.68 28.2
#this works
# wget --user USER --password PWD http://e4ftl01.cr.usgs.gov//MODV6_Dal_D/SRTM/SRTMGL1.003/2000.02.11/N28E085.SRTMGL1.hgt.zip
# unzip N28E085.SRTMGL1.hgt.zip
# gdal_translate -q -co TILED=YES -co COMPRESS=DEFLATE -co ZLEVEL=9 -co PREDICTOR=2 N28E085.hgt SRTMDAT.tif
#naming convention http://e4ftl01.cr.usgs.gov//MODV6_Dal_D/SRTM/SRTMGL1.003/2000.02.11/
#name of dem file is llcorner 1deg tiles
# to find tile need to round down long/let to nearest degree
#points by google-api
#wget https://maps.googleapis.com/maps/api/elevation/json?locations=43.7391536,8.9847034&key=AIzaSyCTCRL-sszoCWqzNHcKz4FrAwJLvh7A3x8
|
library(tidyverse)
library(ggrepel)
library(shiny)
library(lubridate)
iowaliquor <- read_rds("../data/datacleaned.rds")
storedata <- read_rds("../data/storedata.rds")
storedatayear <- read_rds("../data/storedatayear.rds")
ui <- navbarPage(
theme = "yeti",
tags$title(" "),
div(
tags$header(p("Spatial and Temporal Analysis of Liquor Sale in Iowa", style="font-size:40px"),
p("group 4", style="font-size:30px")),
align = "center", style="color:#ffffff; background-color: #4d728d"),
tabPanel("Temporal",
sidebarLayout(sidebarPanel(
selectInput("city_time", "City",
choices = c("ames", "story city",
"nevada", "huxley",
"slater", "cambridge",
"maxwell", "colo", "roland"),
selected = "ames")),
mainPanel(tabsetPanel(
tabPanel("Sales_dollars_by_year_and_city",
plotOutput("Sales_dollars_by_year_and_city")),
tabPanel("Sales_volume_by_year_and_city",
plotOutput("Sales_volume_by_year_and_city")))))),
tabPanel("Spatial",
sidebarLayout(sidebarPanel(
selectInput("city", "City",
choices = c("ames", "story city",
"nevada", "huxley",
"slater", "cambridge",
"maxwell", "colo", "roland"),
selected = "ames")),
mainPanel(
tabsetPanel(
tabPanel("Stroy County Liquor Sales",
plotOutput("Stroy_County_Liquor_Sales")),
tabPanel("City Liquor Sales",
plotOutput("City_Liquor_Sales")),
tabPanel("City Top Five Liquor Sales' Stores",
plotOutput("City_Liquor_Sales_T5")),
tabPanel("City Liquor Sales Income in Each Stores",
plotOutput("City_Liquor_Sales_income"))
))
))
)
server <- function(input, output) {
output$Sales_dollars_by_year_and_city <- renderPlot({
storedatayear %>% dplyr::filter(City == input$city_time) %>%
ggplot(aes(x = Date, y = sale_dollars)) +
geom_point() +
ggtitle(paste0(input$city, "Sale dollars by year and city"))
})
output$Sales_volume_by_year_and_city <- renderPlot({
storedatayear %>% dplyr::filter(City == input$city_time) %>%
ggplot(aes(x = Date, y = volume_liters)) +
geom_point() +
ggtitle(paste0(input$city, "Sale volume by year and city"))
})
output$Stroy_County_Liquor_Sales <- renderPlot({
story <- map_data("county") %>%
dplyr::filter(subregion == "story")
story %>%
ggplot() +
geom_path(aes(x = long, y = lat, group = group)) +
geom_count(data = iowaliquor, aes(x = long, y =lat, color = City), alpha = 0.5) +
ggtitle("Story County Liquor Sales Volume") +
theme_light()
})
output$City_Liquor_Sales <- renderPlot({
ggplot() +
geom_count(data = iowaliquor %>% dplyr::filter(City == input$city),
aes(x = long, y =lat), alpha = 0.5, color = "pink2") +
ggtitle(paste0(input$city, "Liquor Sales Volume"))
})
output$City_Liquor_Sales_T5 <- renderPlot({
ggplot(data = iowaliquor %>% dplyr::filter(City == input$city),
aes(x = long, y =lat)) +
geom_count(alpha = 0.5, color = "pink2") +
geom_label_repel(data = storedata %>% dplyr::filter(City == input$city) %>% head(n = 5),
aes(label = `Store Name`),
hjust = 1, vjust = -7) +
ggtitle(paste0(input$city, "#observations and top five stores"))
})
output$City_Liquor_Sales_income <- renderPlot({
storedata %>% dplyr::filter(City == input$city) %>%
ggplot(aes(x = long, y = lat, size = sale_dollars)) +
geom_point() +
ggtitle(paste0(input$city, "Store and Income"))
})
}
shinyApp(ui, server)
| /shiny/app.R | no_license | yudizhangzyd/STAT585Lab4Group4 | R | false | false | 4,176 | r | library(tidyverse)
library(ggrepel)
library(shiny)
library(lubridate)
iowaliquor <- read_rds("../data/datacleaned.rds")
storedata <- read_rds("../data/storedata.rds")
storedatayear <- read_rds("../data/storedatayear.rds")
ui <- navbarPage(
theme = "yeti",
tags$title(" "),
div(
tags$header(p("Spatial and Temporal Analysis of Liquor Sale in Iowa", style="font-size:40px"),
p("group 4", style="font-size:30px")),
align = "center", style="color:#ffffff; background-color: #4d728d"),
tabPanel("Temporal",
sidebarLayout(sidebarPanel(
selectInput("city_time", "City",
choices = c("ames", "story city",
"nevada", "huxley",
"slater", "cambridge",
"maxwell", "colo", "roland"),
selected = "ames")),
mainPanel(tabsetPanel(
tabPanel("Sales_dollars_by_year_and_city",
plotOutput("Sales_dollars_by_year_and_city")),
tabPanel("Sales_volume_by_year_and_city",
plotOutput("Sales_volume_by_year_and_city")))))),
tabPanel("Spatial",
sidebarLayout(sidebarPanel(
selectInput("city", "City",
choices = c("ames", "story city",
"nevada", "huxley",
"slater", "cambridge",
"maxwell", "colo", "roland"),
selected = "ames")),
mainPanel(
tabsetPanel(
tabPanel("Stroy County Liquor Sales",
plotOutput("Stroy_County_Liquor_Sales")),
tabPanel("City Liquor Sales",
plotOutput("City_Liquor_Sales")),
tabPanel("City Top Five Liquor Sales' Stores",
plotOutput("City_Liquor_Sales_T5")),
tabPanel("City Liquor Sales Income in Each Stores",
plotOutput("City_Liquor_Sales_income"))
))
))
)
server <- function(input, output) {
output$Sales_dollars_by_year_and_city <- renderPlot({
storedatayear %>% dplyr::filter(City == input$city_time) %>%
ggplot(aes(x = Date, y = sale_dollars)) +
geom_point() +
ggtitle(paste0(input$city, "Sale dollars by year and city"))
})
output$Sales_volume_by_year_and_city <- renderPlot({
storedatayear %>% dplyr::filter(City == input$city_time) %>%
ggplot(aes(x = Date, y = volume_liters)) +
geom_point() +
ggtitle(paste0(input$city, "Sale volume by year and city"))
})
output$Stroy_County_Liquor_Sales <- renderPlot({
story <- map_data("county") %>%
dplyr::filter(subregion == "story")
story %>%
ggplot() +
geom_path(aes(x = long, y = lat, group = group)) +
geom_count(data = iowaliquor, aes(x = long, y =lat, color = City), alpha = 0.5) +
ggtitle("Story County Liquor Sales Volume") +
theme_light()
})
output$City_Liquor_Sales <- renderPlot({
ggplot() +
geom_count(data = iowaliquor %>% dplyr::filter(City == input$city),
aes(x = long, y =lat), alpha = 0.5, color = "pink2") +
ggtitle(paste0(input$city, "Liquor Sales Volume"))
})
output$City_Liquor_Sales_T5 <- renderPlot({
ggplot(data = iowaliquor %>% dplyr::filter(City == input$city),
aes(x = long, y =lat)) +
geom_count(alpha = 0.5, color = "pink2") +
geom_label_repel(data = storedata %>% dplyr::filter(City == input$city) %>% head(n = 5),
aes(label = `Store Name`),
hjust = 1, vjust = -7) +
ggtitle(paste0(input$city, "#observations and top five stores"))
})
output$City_Liquor_Sales_income <- renderPlot({
storedata %>% dplyr::filter(City == input$city) %>%
ggplot(aes(x = long, y = lat, size = sale_dollars)) +
geom_point() +
ggtitle(paste0(input$city, "Store and Income"))
})
}
shinyApp(ui, server)
|
#' AMARETTO_HTMLreport
#'
#' Retrieve an interactive html report, including gene set enrichment analysis if asked for.
#'
#' @param AMARETTOinit AMARETTO initialize output
#' @param AMARETTOresults AMARETTO results output
#' @param ProcessedData List of processed input data
#' @param SAMPLE_annotation SAMPLE annotation will be added to heatmap
#' @param ID ID column of the SAMPLE annotation data frame
#' @param hyper_geo_reference Either GMT file address for genesets or computed GSEA dataframe using HyperGeoEnrichmentTest()
#' @param output_address Output directory for the html files.
#' @param show_row_names if True, sample names will appear in the heatmap
#' @param driverGSEA if TRUE, module drivers will also be included in the hypergeometric test.
#' @param phenotype_association_table Optional, Phenotype Association table.
#' @param genetic_pert_hyper_geo_reference
#' @param chem_pert_hyper_geo_reference
#' @param imaging_phenotypes_keywords a character vector of keywords distinguishing imaging phenotypes.
#'
#' @import dplyr
#' @importFrom doParallel registerDoParallel
#' @importFrom DT datatable formatRound formatSignif formatStyle styleColorBar styleInterval
#' @importFrom reshape2 melt
#' @importFrom dplyr arrange group_by left_join mutate select summarise rename filter case_when
#' @importFrom foreach foreach %dopar% %do%
#' @importFrom parallel makeCluster stopCluster detectCores
#' @importFrom knitr knit_meta
#' @importFrom utils write.table
#' @importFrom tibble rownames_to_column
#' @importFrom stats p.adjust phyper
#' @importFrom rmarkdown render
#' @return result
#' @export
#' @examples
#'\dontrun{
#' data('ProcessedDataLIHC')
#' AMARETTOinit <- AMARETTO_Initialize(ProcessedData = ProcessedDataLIHC,
#' NrModules = 2, VarPercentage = 50)
#'
#' AMARETTOresults <- AMARETTO_Run(AMARETTOinit)
#'
#' AMARETTO_HTMLreport(AMARETTOinit= AMARETTOinit,AMARETTOresults= AMARETTOresults,
#' ProcessedData = ProcessedDataLIHC,
#' hyper_geo_test_bool=FALSE,
#' output_address='./')
#'}
AMARETTO_HTMLreport <- function(AMARETTOinit,
AMARETTOresults,
ProcessedData,
show_row_names = FALSE,
SAMPLE_annotation = NULL,
ID = NULL,
hyper_geo_reference = NULL,
genetic_pert_hyper_geo_reference = NULL,
chem_pert_hyper_geo_reference = NULL,
output_address = './',
driverGSEA = TRUE,
phenotype_association_table = NULL,
imaging_phenotypes_keywords = NULL){
`%dopar%` <- foreach::`%dopar%`
`%do%` <- foreach::`%do%`
CNV_matrix <- ProcessedData[[2]]
MET_matrix <- ProcessedData[[3]]
NrModules <- AMARETTOresults$NrModules
VarPercentage <- AMARETTOinit$Parameters$VarPercentage
#set number of cores and check
NrCores <- AMARETTOinit$NrCores
MaxCores <- parallel::detectCores(all.tests = FALSE, logical = TRUE)
options('DT.warn.size'=FALSE)
if(MaxCores < NrCores){
stop(paste0("The number of cores that is asked for (",NrCores,"), is more than what's avalaible. Changes can be made on AMARETTOinit$NrCores."))
}
#check directory
if (!dir.exists(output_address)){
stop("Output directory is not existing.")
}
report_address <- file.path(output_address)
dir.create(paste0(report_address, "/AMARETTOhtmls/modules"), recursive = TRUE, showWarnings = FALSE)
cat("The output folder structure is created.\n")
#==============================================================================================================
hyper_geo_test_bool<-TRUE
if(is.null(hyper_geo_reference)){
hyper_geo_test_bool<-FALSE
}else if (is.data.frame(hyper_geo_reference)){
output_hgt<-hyper_geo_reference
}else if(is.character(hyper_geo_reference)&file.exists(hyper_geo_reference[1])){
output_hgt <-HyperGeoEnrichmentTest(AMARETTOinit, AMARETTOresults, hyper_geo_reference, driverGSEA, NrCores)
}else {
stop("The hyper_geo_reference is not properly provided. It should be either an address to an existing .gmt file or a hyper-geo-test dataframe table\n")
}
#======================================
genetic_pert_hyper_geo_test_bool<-TRUE
if(is.null(genetic_pert_hyper_geo_reference)){
genetic_pert_hyper_geo_test_bool<-FALSE
}else if (is.data.frame(genetic_pert_hyper_geo_reference)){
genetic_pert_output_hgt<-genetic_pert_hyper_geo_reference
}else if(is.character(genetic_pert_hyper_geo_reference)&file.exists(genetic_pert_hyper_geo_reference[1])){
genetic_pert_output_hgt <-HyperGeoEnrichmentTest(AMARETTOinit, AMARETTOresults, genetic_pert_hyper_geo_reference, driverGSEA, NrCores)
}else {
stop("The genetic_pert_hyper_geo_reference is not properly provided. It should be either an address to an existing .gmt file or a hyper-geo-test dataframe table\n")
}
#======================================
chem_pert_hyper_geo_test_bool<-TRUE
if(is.null(genetic_pert_hyper_geo_reference)){
chem_pert_hyper_geo_test_bool<-FALSE
}else if (is.data.frame(chem_pert_hyper_geo_reference)){
chem_pert_output_hgt<-chem_pert_hyper_geo_reference
}else if(is.character(chem_pert_hyper_geo_reference)&file.exists(chem_pert_hyper_geo_reference[1])){
chem_pert_output_hgt <-HyperGeoEnrichmentTest(AMARETTOinit, AMARETTOresults, chem_pert_hyper_geo_reference, driverGSEA, NrCores)
}else{
stop("The chem_pert_hyper_geo_reference is not properly provided. It should be either an address to an existing .gmt file or a hyper-geo-test dataframe table\n")
}
#==============================================================================================================
#Parallelizing
cluster <- parallel::makeCluster(c(rep("localhost", NrCores)), type = "SOCK")
doParallel::registerDoParallel(cluster,cores=NrCores)
full_path <- normalizePath(report_address)
unlink(paste0(full_path,"/*"))
ModuleOverviewTable <- NULL
yml_file <- paste0(full_path,"/AMARETTOhtmls/modules/_site.yml")
file.copy(system.file("templates/module_templates/_site.yml",package="AMARETTO"),yml_file)
ModuleOverviewTable<-foreach (ModuleNr = 1:NrModules, .packages = c('AMARETTO','tidyverse','DT','rmarkdown')) %dopar% {
#for(ModuleNr in 1:NrModules){
#get heatmap
print(paste0("ModuleNr = ",ModuleNr))
heatmap_module <- AMARETTO_VisualizeModule(AMARETTOinit, AMARETTOresults, ProcessedData, show_row_names = show_row_names, SAMPLE_annotation=SAMPLE_annotation, ID=ID, ModuleNr=ModuleNr,printHM = FALSE)
print("The Heatmap is visualised.")
# create datables that are supplied to the RMarkdown file
ModuleRegulators <- AMARETTOresults$RegulatoryPrograms[ModuleNr,which(AMARETTOresults$RegulatoryPrograms[ModuleNr,] != 0)]
print("ModuleRegulators are defined.")
filename_table <- paste0("regulators_module",ModuleNr)
buttons_list <- list(list(extend ='csv',filename=filename_table), list(extend ='excel',filename=filename_table), list(extend = 'pdf', pageSize = 'A4', orientation = 'landscape',filename=filename_table),list(extend ='print'), list(extend ='colvis'))
column_markup <- list(list(width = '200px', className = 'dt-head-center', targets = "_all"), list(className = 'text-left', targets = "_all"))
dt_regulators <- DT::datatable(tibble::rownames_to_column(as.data.frame(ModuleRegulators),"RegulatorIDs") %>%
dplyr::rename(Weights="ModuleRegulators") %>%
dplyr::mutate(Weights=signif(Weights, digits = 3)) %>%
dplyr::mutate(RegulatorIDs=paste0('<a href="https://www.genecards.org/cgi-bin/carddisp.pl?gene=',RegulatorIDs,'">',RegulatorIDs,'</a>')) %>%
dplyr::arrange(-Weights),
class = 'display', filter = 'top', extensions = c('Buttons','KeyTable'), rownames = FALSE,
options = list(columnDefs = column_markup, pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100),
keys = TRUE, dom = 'Blfrtip', buttons = buttons_list),
colnames = c("Driver Gene", "Weight"), escape = 'Weight') %>%
DT::formatStyle('Weights',color = DT::styleInterval(0, c('darkblue', 'darkred')))
print("Data tabel of regulators is created.")
filename_table <- paste0("targets_module",ModuleNr)
buttons_list <- list(list(extend ='csv',filename=filename_table), list(extend ='excel',filename=filename_table), list(extend = 'pdf', pageSize = 'A4', orientation = 'landscape',filename=filename_table),list(extend ='print'), list(extend ='colvis'))
dt_targets <- DT::datatable(as.data.frame(AMARETTOresults$ModuleMembership) %>%
tibble::rownames_to_column("TargetIDs") %>%
dplyr::arrange(TargetIDs) %>%
dplyr::rename(moduleNr=ModuleNr) %>%
dplyr::filter(moduleNr==ModuleNr) %>%
dplyr::select(-moduleNr) %>%
dplyr::mutate(TargetIDs=paste0('<a href="https://www.genecards.org/cgi-bin/carddisp.pl?gene=',TargetIDs,'">',TargetIDs,'</a>')),
class = 'display', filter = 'top', extensions = c('Buttons','KeyTable'), rownames = FALSE,
options = list(columnDefs = column_markup, pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100),
keys = TRUE, dom = 'Blfrtip', buttons = buttons_list),
colnames = c("Target Gene"),escape = FALSE)
print("Data tabel of targets is created.")
#=========================================================================================================
# create GSEA output table, taking into account the resource of the GMT file (eg. MSIGDB)
if (hyper_geo_test_bool){
dt_genesets_list<-create_hgt_datatable(output_hgt=output_hgt, module_table=TRUE, ModuleNr = ModuleNr)
dt_genesets<-dt_genesets_list$dt_genesets
ngenesets <- dt_genesets_list$ngenesets
} else {
dt_genesets <- "Genesets were not analysed as they were not provided."
ngenesets<-"NA"
#ngenesets <- "NA"
}
print("Data Table for GSEA results is created.")
#=========================================================
# create GSEA output table, taking into account the resource of the GMT file (eg. MSIGDB)
if (genetic_pert_hyper_geo_test_bool){
dt_genesets_genetic_pert<-create_hgt_datatable(output_hgt=genetic_pert_output_hgt, module_table=TRUE, ModuleNr = ModuleNr)
dt_genesets_genetic_pert<-dt_genesets_genetic_pert$dt_genesets
} else {
dt_genesets_genetic_pert <- "Genesets were not analysed as they were not provided."
#ngenesets <- "NA"
}
print("Data Table for GSEA results is created.")
#=========================================================
# create GSEA output table, taking into account the resource of the GMT file (eg. MSIGDB)
if (chem_pert_hyper_geo_test_bool){
dt_genesets_chem_pert<-create_hgt_datatable(output_hgt=chem_pert_output_hgt, module_table=TRUE, ModuleNr = ModuleNr)
dt_genesets_chem_pert<-dt_genesets_chem_pert$dt_genesets
} else {
dt_genesets_chem_pert <- "Genesets were not analysed as they were not provided."
#ngenesets <- "NA"
}
print("Data Table for GSEA results is created.")
#=========================================================
#created datatable for phenotype associations
if (!is.null(phenotype_association_table)){
filename_table <- paste0("phenotypes_module",ModuleNr)
buttons_list <- list(list(extend ='csv',filename=filename_table), list(extend ='excel',filename=filename_table), list(extend = 'pdf', pageSize = 'A4', orientation = 'landscape',filename=filename_table),list(extend ='print'), list(extend ='colvis'))
module_phenotype_association_table<-phenotype_association_table %>% dplyr::filter(ModuleNr==paste0("Module ",!!ModuleNr)) %>%
dplyr::mutate(p.value = signif(p.value, digits = 3), q.value = signif(q.value, digits = 3)) %>% dplyr::arrange(q.value) %>%dplyr::select(-ModuleNr)%>%mutate(Phenotypes=as.factor(Phenotypes))%>%mutate(Statistical_Test=as.factor(Statistical_Test))
if(is.null(imaging_phenotypes_keywords)){
module_phenotype_association_table_molecular_clinical<-module_phenotype_association_table
module_phenotype_association_table_imaging<-data.frame(Phenotypes="There is no imaging phenotype.")
dt_phenotype_association_img<-DT::datatable(data.frame(Phenotype = "There is no imaging phenotype association."))
}
else{
imaging_phenotypes<-paste(imaging_phenotypes_keywords,collapse = "|")
module_phenotype_association_table_molecular_clinical<-module_phenotype_association_table%>%dplyr::filter(!grepl(imaging_phenotypes,Phenotypes,ignore.case = TRUE))
module_phenotype_association_table_imaging<-module_phenotype_association_table%>%dplyr::filter(grepl(imaging_phenotypes,Phenotypes,ignore.case = TRUE))
dt_phenotype_association_img <- DT::datatable(module_phenotype_association_table_imaging,
class='display', filter = 'top', extensions = c('Buttons','KeyTable'), rownames = FALSE,
options = list(pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100), keys = TRUE, dom = 'Blfrtip',buttons = buttons_list),
colnames=c("Phenotype","Statistics Test","P-value","FDR Q-value","Descriptive Statistics"),escape = FALSE) %>%
DT::formatSignif(c('p.value','q.value'), 2)
}
dt_phenotype_association_mc <- DT::datatable(module_phenotype_association_table_molecular_clinical,
class='display', filter = 'top', extensions = c('Buttons','KeyTable'), rownames = FALSE,
options = list(pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100), keys = TRUE, dom = 'Blfrtip',buttons = buttons_list),
colnames=c("Phenotype","Statistics Test","P-value","FDR Q-value","Descriptive Statistics"),escape = FALSE) %>%
DT::formatSignif(c('p.value','q.value'), 2)
} else{
dt_phenotype_association_mc <- data.frame(Phenotype = "Phenotype association resuls were not provided.")
}
print("The datatable with phenotype association results is created.")
#copy the template file, needed when parallelized
modulemd <- paste0(full_path,"/AMARETTOhtmls/modules/module",ModuleNr,".rmd")
file.copy(system.file("templates/module_templates/TemplateReportModule.Rmd",package="AMARETTO"),modulemd)
print("The copy of the template file is created.")
# output_format<-system.file("templates/module_templates/TemplateReportModule.Rmd",package="AMARETTO")
knitr::knit_meta(class=NULL, clean = TRUE)
rmarkdown::render(modulemd,
output_file = paste0("module",ModuleNr,".html"),
params = list(
report_address = report_address,
ModuleNr = ModuleNr,
heatmap_module = heatmap_module,
dt_regulators = dt_regulators,
dt_targets = dt_targets,
dt_phenotype_association_mc = dt_phenotype_association_mc,
dt_phenotype_association_img = dt_phenotype_association_img,
dt_genesets = dt_genesets,
dt_genesets_genetic_pert = dt_genesets_genetic_pert,
dt_genesets_chem_pert = dt_genesets_chem_pert), knit_meta=knitr::knit_meta(class=NULL, clean = TRUE),quiet = TRUE)
print("Rmarkdown created the module html page.")
#remove rmd copy of template
file.remove(modulemd)
#file.remove(paste0(full_path,"/AMARETTOhtmls/modules/module",ModuleNr,"_files"))
print("file removed successfully :) Done!")
#ModuleOverviewTable<-rbind(ModuleOverviewTable,c(ModuleNr,length(which(AMARETTOresults$ModuleMembership==ModuleNr)),length(ModuleRegulators),ngenesets))
while (!is.null(dev.list())) dev.off()
return(c(ModuleNr, length(which(AMARETTOresults$ModuleMembership==ModuleNr)), length(ModuleRegulators),ngenesets))
# },error=function(e){message(paste("an error occured for Module", ModuleNr))})
}
suppressWarnings(suppressMessages(file.remove(paste0(full_path,"/AMARETTOhtmls/modules/_site.yml"))))
file_remove<-suppressWarnings(suppressMessages(file.remove(paste0(full_path,"/AMARETTOhtmls/modules/module",c(1:NrModules),"_files"))))
parallel::stopCluster(cluster)
cat("All module htmls are created.\n")
ModuleOverviewTable <- data.frame(matrix(unlist(ModuleOverviewTable), byrow=TRUE, ncol=4), stringsAsFactors=FALSE)
colnames(ModuleOverviewTable)<-c("ModuleNr","NrTarGenes","NrRegGenes","SignGS")
if (!is.null(CNV_matrix)){
nCNV = ncol(CNV_matrix)
} else {nCNV = NA}
if (!is.null(MET_matrix)){
nMET = ncol(MET_matrix)
} else {nMET = NA}
nExp = ncol(AMARETTOresults$RegulatoryProgramData)
nGenes = length(AMARETTOresults$AllGenes)
nMod = AMARETTOresults$NrModules
options('DT.warn.size'=FALSE) # avoid showing datatable size-related warnings.
filename_table <- "overview_modules"
buttons_list <- list(list(extend ='csv',filename=filename_table), list(extend ='excel',filename=filename_table), list(extend = 'pdf', pageSize = 'A4', orientation = 'landscape',filename=filename_table),list(extend ='print'), list(extend ='colvis'))
dt_overview<-DT::datatable(ModuleOverviewTable %>%
dplyr::mutate(ModuleNr=paste0('<a href="./modules/module',ModuleNr,'.html">Module ',ModuleNr,'</a>')),
class = 'display', filter = 'top', extensions = c('Buttons','KeyTable'), rownames = FALSE, colnames =c("Module","# Target Genes", "# Driver Genes", "# Gene Sets"),
options = list(pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100, 200), keys = TRUE, dom = 'Blfrtip',buttons = buttons_list,columnDefs = list(list(className = 'dt-head-center', targets = "_all"),list(className = 'text-left', targets = "_all"))),
escape = FALSE)
all_targets<-tibble::rownames_to_column(data.frame(AMARETTOresults$ModuleMembership),"Genes") %>%
dplyr::rename(Module="ModuleNr") %>%
dplyr::mutate(value=0) %>%
dplyr::mutate(Type="Target") %>%
dplyr::select(Genes,Module,value,Type)
all_regulators <- reshape2::melt(tibble::rownames_to_column(as.data.frame(AMARETTOresults$RegulatoryPrograms),"Module"),id.vars = "Module") %>%
dplyr::filter(value!=0) %>% dplyr::mutate(Module=sub("Module_","",Module),Type="Driver") %>%
dplyr::rename(Genes='variable') %>%
dplyr::select(Genes,Module,value,Type)
all_genes <- rbind(all_targets,all_regulators) %>%
dplyr::arrange(Genes) %>%
dplyr::mutate(Genes=paste0('<a href="https://www.genecards.org/cgi-bin/carddisp.pl?gene=',Genes,'">',Genes,'</a>')) %>%
dplyr::mutate(Module=paste0('<a href="./modules/module',Module,'.html">Module ',Module,'</a>'))
all_genes <- all_genes %>%
dplyr::mutate(Color=dplyr::case_when(
is.na(as.numeric(value))~"",
as.numeric(value)>0~"darkred",
as.numeric(value)<0~"darkblue",
TRUE~"darkgreen")) %>%
dplyr::mutate(Type=paste0('<font color=',Color,'>',Type,'</font>')) %>%
dplyr::select(-Color,-value)
filename_table <- "genes_to_modules"
buttons_list <- list(list(extend ='csv',filename=filename_table), list(extend ='excel',filename=filename_table), list(extend = 'pdf', pageSize = 'A4', orientation = 'landscape',filename=filename_table),list(extend ='print'), list(extend ='colvis'))
dt_genes <- DT::datatable(all_genes,
class = 'display', filter = 'top', extensions = c('Buttons','KeyTable'), rownames = FALSE,colnames =c("Gene","Module","Gene Type"),
options = list(deferRender=TRUE,columnDefs = list(list(className = 'dt-head-center', targets = "_all"), list(className = 'text-left', targets = "_all")), pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100), keys = TRUE, dom = 'Blfrtip',buttons = buttons_list),
escape = FALSE)
#=================================================================================
if (hyper_geo_test_bool){
dt_genesetsall <- create_hgt_datatable(output_hgt = output_hgt, module_table = FALSE)
} else {
dt_genesetsall <- data.frame(Hyper_Geometric_Test="Genesets were not analysed as they were not provided.")
}
#=============================
if (genetic_pert_hyper_geo_test_bool){
dt_genesetsall_genetic_pert <- create_hgt_datatable(output_hgt = genetic_pert_output_hgt, module_table = FALSE)
} else {
dt_genesetsall_genetic_pert <- data.frame(Hyper_Geometric_Test="Genesets were not analysed as they were not provided.")
}
#=============================
if (chem_pert_hyper_geo_test_bool){
dt_genesetsall_chem_pert <- create_hgt_datatable(output_hgt = chem_pert_output_hgt, module_table = FALSE)
} else {
dt_genesetsall_chem_pert <- data.frame(Hyper_Geometric_Test="Genesets were not analysed as they were not provided.")
}
#=============================
#created phenotype table for index page
if (!is.null(phenotype_association_table)){
filename_table <- "phenotypes_all_modules"
buttons_list <- list(list(extend ='csv',filename=filename_table), list(extend ='excel',filename=filename_table), list(extend = 'pdf', pageSize = 'A4', orientation = 'landscape',filename=filename_table),list(extend ='print'), list(extend ='colvis'))
phenotype_association_all<-phenotype_association_table %>% dplyr::mutate(p.value=signif(p.value, digits = 3), q.value=signif(q.value, digits = 3)) %>%
dplyr::mutate(ModuleNr=paste0('<a href="./modules/module',gsub("Module ","",ModuleNr),'.html">',ModuleNr,'</a>'))%>%dplyr::arrange(q.value)%>%mutate(Phenotypes=as.factor(Phenotypes))%>%mutate(Statistical_Test=as.factor(Statistical_Test))
if(is.null(imaging_phenotypes_keywords)){
all_phenotype_association_table_molecular_clinical<-phenotype_association_all
all_phenotype_association_table_imaging<-data.frame(Phenotypes="There is no imaging phenotype.")
dt_phenotype_association_img_all<-DT::datatable(data.frame(Phenotype = "There is no imaging phenotype association."))
}
else{
imaging_phenotypes<-paste(imaging_phenotypes_keywords,collapse = "|")
all_phenotype_association_table_molecular_clinical<-phenotype_association_all%>%dplyr::filter(!grepl(imaging_phenotypes,Phenotypes,ignore.case = TRUE))
all_phenotype_association_table_imaging<-phenotype_association_all%>%dplyr::filter(grepl(imaging_phenotypes,Phenotypes,ignore.case = TRUE))
dt_phenotype_association_img_all <- DT::datatable(all_phenotype_association_table_imaging,
class='display', filter = 'top', extensions = c('Buttons','KeyTable'), rownames = FALSE,
options = list(pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100), keys = TRUE, dom = 'Blfrtip',buttons = buttons_list),
colnames=c("Phenotype","Statistics Test","P-value","FDR Q-value","Descriptive Statistics"),escape = FALSE) %>%
DT::formatSignif(c('p.value','q.value'), 2)
}
dt_phenotype_association_mc_all<- DT::datatable(all_phenotype_association_table_molecular_clinical, class='display',filter = 'top', extensions = c('Buttons','KeyTable'),rownames = FALSE,
options = list(pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100), keys = TRUE, dom = 'Blfrtip',buttons = buttons_list, columnDefs = list(list(className = 'dt-head-center', targets = "_all"),list(className = 'text-left', targets = "_all"))),colnames=c("Module","Phenotype","Statistics Test","P-value","FDR Q-value","Descriptive Statistics"),
escape = FALSE) %>% DT::formatSignif(c('p.value','q.value'),2)
}
else{
dt_phenotype_association_mc_all <- data.frame(Phenotype_Association="Phenotype association resuls were not provided.")
dt_phenotype_association_img_all<-data.frame(Phenotypes="There is no imaging phenotype.")
}
#Render index page
rmarkdown::render(system.file("templates/TemplateIndexPage.Rmd",package="AMARETTO"), output_dir=paste0(full_path,"/AMARETTOhtmls/"),output_file= "index.html", params = list(
nExp = nExp,
nCNV = nCNV,
nMET = nMET,
nGenes = nGenes,
VarPercentage = VarPercentage,
nMod = nMod,
dt_overview = dt_overview),quiet = TRUE)
rmarkdown::render(system.file("templates/TemplateIndexPage_Overview.Rmd",package="AMARETTO"), output_dir=paste0(full_path,"/AMARETTOhtmls/"),output_file= "index_Overview.html", params = list(
dt_overview = dt_overview),quiet = TRUE)
rmarkdown::render(system.file("templates/TemplateIndexPage_AllGenes.Rmd",package="AMARETTO"), output_dir=paste0(full_path,"/AMARETTOhtmls/"),output_file= "index_AllGenes.html", params = list(
dt_genes = dt_genes),quiet = TRUE)
rmarkdown::render(system.file("templates/TemplateIndexPage_GenesetsEnrichment.Rmd",package="AMARETTO"), output_dir=paste0(full_path,"/AMARETTOhtmls/"),output_file= "index_GenesetsEnrichment.html", params = list(
dt_gensesetsall = dt_gensesetsall),quiet = TRUE)
rmarkdown::render(system.file("templates/TemplateIndexPage_GenesetsEnrichment_gp.Rmd",package="AMARETTO"), output_dir=paste0(full_path,"/AMARETTOhtmls/"),output_file= "index_GenesetsEnrichment_gp.html", params = list(
dt_genesetsall_genetic_pert = dt_genesetsall_genetic_pert),quiet = TRUE)
rmarkdown::render(system.file("templates/TemplateIndexPage_GenesetsEnrichment_cp.Rmd",package="AMARETTO"), output_dir=paste0(full_path,"/AMARETTOhtmls/"),output_file= "index_GenesetsEnrichment_cp.html", params = list(
dt_genesetsall_chem_pert = dt_genesetsall_chem_pert),quiet = TRUE)
rmarkdown::render(system.file("templates/TemplateIndexPage_PhenoAssociation.Rmd",package="AMARETTO"), output_dir=paste0(full_path,"/AMARETTOhtmls/"),output_file= "index_PhenoAssociation.html", params = list(
dt_phenotype_association_all = dt_phenotype_association_mc_all),quiet = TRUE)
rmarkdown::render(system.file("templates/TemplateIndexPage_PhenoAssociationImg.Rmd",package="AMARETTO"), output_dir=paste0(full_path,"/AMARETTOhtmls/"),output_file= "index_PhenoAssociationImg.html", params = list(
dt_phenotype_association_all = dt_phenotype_association_img_all),quiet = TRUE)
dir.create(paste0(report_address, "/AMARETTOhtmls/Report_data"), recursive = TRUE, showWarnings = FALSE)
report_data <- list(nExp = nExp,
nCNV = nCNV,
nMET = nMET,
nGenes = nGenes,
VarPercentage = VarPercentage,
nMod = nMod,
ModuleOverviewTable = ModuleOverviewTable,
all_genes = all_genes,
dt_genesetsall = dt_genesetsall,
dt_genesetsall_genetic_pert = dt_genesetsall_genetic_pert,
dt_genesetsall_chem_pert = dt_genesetsall_chem_pert,
dt_phenotype_association_mc_all = dt_phenotype_association_mc_all,
dt_phenotype_association_img_all = dt_phenotype_association_img_all,
AMARETTOinit = AMARETTOinit,
AMARETTOresults = AMARETTOresults)
saveRDS(report_data, file = paste0(report_address, "/AMARETTOhtmls/Report_data/AMARETTOreport_data.rds"))
cat("The full report is created and ready to use.\n")
return(report_data)
}
#' Hyper Geometric Geneset Enrichement Test
#'
#' Calculates the p-values for unranked gene set enrichment based on two gmt files as input and the hyper geometric test.
#' @return result
#' @param gmtfile The gmt file with reference gene set.
#' @param testgmtfile The gmt file with gene sets to test. In our case, the gmt file of the modules.
#' @param NrCores Number of cores used for parallelization.
#' @param ref.numb.genes The total number of genes teste, standard equal to 45 956 (MSIGDB standard).
#' @importFrom foreach foreach
#' @importFrom parallel makeCluster stopCluster
#' @importFrom doParallel registerDoParallel
#' @keywords internal
HyperGTestGeneEnrichment<-function(gmtfile,testgmtfile,NrCores,ref.numb.genes=45956){
`%dopar%` <- foreach::`%dopar%`
`%do%` <- foreach::`%do%`
test.gmt<-readGMT(testgmtfile) # our gmt_file_output_from Amaretto
gmt.path<-readGMT(gmtfile) # the hallmarks_and_co2...
########################### Parallelizing :
cluster <- parallel::makeCluster(c(rep("localhost", NrCores)), type = "SOCK")
doParallel::registerDoParallel(cluster,cores=NrCores)
resultloop<-foreach(j=1:length(test.gmt), .combine='rbind') %do% {
foreach(i=1:length(gmt.path),.combine='rbind') %dopar% {
l<-length(gmt.path[[i]])
k<-sum(gmt.path[[i]] %in% test.gmt[[j]])
m<-ref.numb.genes
n<-length(test.gmt[[j]])
p1<-stats::phyper(k-1,l,m-l,n,lower.tail=FALSE)
if (k>0){
overlapping.genes<-gmt.path[[i]][gmt.path[[i]] %in% test.gmt[[j]]]
overlapping.genes<-paste(overlapping.genes,collapse = ', ')
# resultloop<-rbind(resultloop,c(Geneset=names(gmt.path[i]),Testset=names(test.gmt[j]),p_value=p1,n_Overlapping=k,Overlapping_genes=overlapping.genes))
c(Geneset=names(gmt.path[i]),Testset=names(test.gmt[j]),Geneset_length=l,p_value=p1,n_Overlapping=k,Overlapping_genes=overlapping.genes)
}
}
}
parallel::stopCluster(cluster)
resultloop<-as.data.frame(resultloop,stringsAsFactors=FALSE)
resultloop$p_value<-as.numeric(resultloop$p_value)
resultloop$n_Overlapping<-as.numeric((resultloop$n_Overlapping))
resultloop$Geneset_length<-as.numeric(resultloop$Geneset_length)
resultloop[,"padj"]<-stats::p.adjust(resultloop[,"p_value"],method='BH')
return(resultloop)
}
#' GmtFromModules
#' @return result
#'
#' @param driverGSEA if TRUE , module driver genes will also be added to module target genes for GSEA.
#' @param AMARETTOresults List output from AMARETTO_Run().
#'
#' @importFrom tibble rownames_to_column
#' @importFrom reshape2 melt
#' @importFrom dplyr arrange mutate select rename filter
#' @importFrom utils write.table
#' @keywords internal
GmtFromModules <- function(AMARETTOresults,driverGSEA){
ModuleMembership <- tibble::rownames_to_column(as.data.frame(AMARETTOresults$ModuleMembership),"GeneNames")
if(driverGSEA){
all_regulators <- reshape2::melt(tibble::rownames_to_column(as.data.frame(AMARETTOresults$RegulatoryPrograms),"Module"), id.vars = "Module") %>%
dplyr::filter(value!=0) %>% dplyr::select(variable, Module) %>%
dplyr::mutate(Module = sub("Module_", "", Module)) %>%
dplyr::rename(GeneNames = "variable")%>%
dplyr::rename(ModuleNr = "Module")
ModuleMembership <- rbind(ModuleMembership, all_regulators)
}
NrModules <- AMARETTOresults$NrModules
ModuleMembership <- ModuleMembership %>% dplyr::arrange(GeneNames)
ModuleMembers_list <- split(ModuleMembership$GeneNames,ModuleMembership$ModuleNr)
names(ModuleMembers_list) <- paste0("Module_",names(ModuleMembers_list))
gmt_file="./Modules_genes.gmt"
utils::write.table(sapply(names(ModuleMembers_list), function(x) paste(x,paste(ModuleMembers_list[[x]],collapse="\t"),sep="\t")), gmt_file, quote = FALSE, row.names = TRUE, col.names = FALSE,sep='\t')
}
#' readGMT
#'
#' @param filename
#'
#' @return result
#' @keywords internal
readGMT <- function(filename){
gmtLines <- strsplit(readLines(filename),"\t")
gmtLines_genes <- lapply(gmtLines, tail, -2)
names(gmtLines_genes) <- sapply(gmtLines, head, 1)
return(gmtLines_genes)
}
#' Title plot_run_history
#'
#' @param AMARETTOinit
#' @param AMARETTOResults
#'
#' @import ggplot2
#' @importFrom gridExtra grid.arrange
#' @importFrom stats sd
#' @return plot
#' @export
#'
#' @examples plot_run_history(AMARETTOinit,AMARETTOResults)
plot_run_history <- function(AMARETTOinit,AMARETTOResults){
means <- unlist(lapply(AMARETTOResults$run_history$error_history, mean))
stds <- unlist(lapply(AMARETTOResults$run_history$error_history, sd))
iterationNr <- c(1:length(means))
NrReassignGenes <- AMARETTOResults$run_history$NrReassignGenes_history[-1]
threshold <- AMARETTOinit$Parameters$convergence_cutoff*nrow(AMARETTOinit$MA_matrix_Var)
TotGenesNr <- nrow(AMARETTOinit$MA_matrix_Var)
df <- data.frame(iterationNr = iterationNr,
means = means,
stds = stds,
NrReassignGenes = NrReassignGenes,
threshold = threshold,
TotGenesNr = TotGenesNr,
stringsAsFactors = FALSE)
p1 <- ggplot2::qplot(x = iterationNr, y = means, data = df) +
ggplot2::geom_errorbar(ggplot2::aes(x=iterationNr, ymin=means-stds, ymax=means+stds),data=df,width=0.25) +
ggplot2::xlab("Iteration Number") + ggplot2::ylab("Mean Square Error") +
ggplot2::geom_line() +
ggplot2::geom_point()
p2 <- ggplot2::qplot(x = iterationNr, y = NrReassignGenes) +
ggplot2::geom_hline(yintercept = TotGenesNr, linetype="dashed", color = "blue") +
ggplot2::geom_hline(yintercept = threshold, linetype="dashed", color = "red") +
ggplot2::xlab("Iteration Number") +
ggplot2::ylab("Target Gene Reassignments Number") +
ggplot2::geom_line() +
ggplot2::geom_point() +
ggplot2::scale_y_continuous(trans='log2')
gridExtra::grid.arrange(p1, p2, nrow = 2)
}
#' Title HyperGeoEnrichmentTest
#'
#' @param AMARETTOresults AMARETTO results output
#' @param hyper_geo_reference GMT file with gene sets to compare with.
#' @param driverGSEA if TRUE, module drivers will also be included in the hypergeometric test.
#' @param NrCores Number of cores for parallel processing.
#'
#' @return Hyper-Geometric Enrichment Test table
#' @export
#'
#' @examples HyperGeoEnrichmentTest(AMARETTOresults=NULL, hyper_geo_reference, driverGSEA=TRUE, MSIGDB=TRUE, NrCores=4)
HyperGeoEnrichmentTest<-function(AMARETTOresults, hyper_geo_reference, driverGSEA=TRUE, NrCores=4){
output_hgt_all<-NULL
for(i in 1:length(hyper_geo_reference)){
if (is.null(AMARETTOresults)){
return(1)
}
GmtFromModules(AMARETTOresults, driverGSEA)
output_hgt <- HyperGTestGeneEnrichment(hyper_geo_reference[i], "./Modules_genes.gmt", NrCores)
utils::data(MsigdbMapping)
MsigdbMapping<-MsigdbMapping%>%dplyr::mutate(url=paste0('<a href="http://software.broadinstitute.org/gsea/msigdb/cards/',geneset,'">',gsub("_"," ",geneset),'</a>'))
output_hgt<-output_hgt%>%dplyr::left_join(MsigdbMapping,by=c("Geneset"="geneset"))%>%
dplyr::mutate(description=ifelse(is.na(description),Geneset,description))%>%
dplyr::mutate(Geneset=ifelse(is.na(url),Geneset,url))%>%dplyr::rename("Description"="description")%>%dplyr::select(-url)
cat("The hyper geometric test results are calculated.\n")
output_hgt_all<-rbind(output_hgt_all,output_hgt)
}
return(output_hgt_all)
}
#' Title create_hgt_datatable
#'
#' @param output_hgt GSEA test dataframe from HyperGeoEnrichmentTest function.
#' @param module_table If TRUE, makes the ModuleNr datatable, If FALSE, makes the index page datatable.
#' @param ModuleNr The module number.
#'
#' @return result
#' @examples
create_hgt_datatable<-function(output_hgt, module_table, ModuleNr = 1){
if (module_table){
##################################################################
# filter results from module from all datatable with all GSEA results
output_hgt_filter <- output_hgt %>% dplyr::filter(Testset==paste0("Module_",as.character(ModuleNr))) %>% dplyr::arrange(padj)
output_hgt_filter <- output_hgt_filter %>% dplyr::mutate(overlap_perc=n_Overlapping/Geneset_length) %>%
mutate(overlap_perc=signif(overlap_perc, digits = 3)) %>%
dplyr::select(Geneset,Description,Geneset_length, n_Overlapping, Overlapping_genes, overlap_perc, p_value,padj) %>%
arrange(padj) %>%
mutate(Geneset_length=as.integer(Geneset_length), n_Overlapping=as.integer(n_Overlapping))
filename_table <- paste0("gsea_module",ModuleNr)
buttons_list <- list(list(extend ='csv',filename=filename_table), list(extend ='excel',filename=filename_table), list(extend = 'pdf', pageSize = 'A4', orientation = 'landscape',filename=filename_table),list(extend ='print'), list(extend ='colvis'))
#create interactive tables
options('DT.warn.size'=FALSE)
dt_genesets <- DT::datatable(output_hgt_filter,
#dplyr::mutate(Geneset=paste0('<a href="http://software.broadinstitute.org/gsea/msigdb/cards/',Geneset,'">',gsub("_"," ",Geneset),'</a>')),
class = 'display', filter = 'top', extensions = c('Buttons','KeyTable'), rownames = FALSE,
options = list(pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100), keys = TRUE, dom = 'Blfrtip',buttons = buttons_list,columnDefs = list(list(className = 'dt-head-center', targets = "_all"),list(className = 'text-left', targets = "_all"))),
colnames=c("Gene Set Name", "Gene Set Description", "# Genes in Gene Set", "# Genes in Overlap", "Genes in Overlap", "% Genes in overlap", "P-value", "FDR Q-value"), escape = FALSE) %>%
DT::formatSignif(c('p_value','padj','overlap_perc'),2) %>%
DT::formatStyle('overlap_perc', background = DT::styleColorBar(c(0,1), 'lightblue'), backgroundSize = '98% 88%', backgroundRepeat = 'no-repeat', backgroundPosition = 'center') %>%
DT::formatStyle(columns = c(5), fontSize = '60%')
ngenesets <- nrow(output_hgt_filter %>% dplyr::filter(padj<0.05))
return(list(dt_genesets=dt_genesets,ngenesets=ngenesets))
}
##################################################################
else{
genesetsall<-output_hgt %>%
dplyr::mutate(Testset=paste0('<a href="./modules/module',sub("Module_","",Testset),'.html">',paste0(Testset,paste0(rep(" ",14),collapse = "")),'</a>')) %>%
dplyr::mutate(Modules=gsub("_"," ",Testset))%>%dplyr::mutate(overlap_perc=n_Overlapping/Geneset_length) %>%
dplyr::mutate(overlap_perc=signif(overlap_perc, digits = 3))
genesetsall<-genesetsall %>%
select(Modules, Geneset, Description, Geneset_length, n_Overlapping, Overlapping_genes, overlap_perc, p_value, padj) %>%
dplyr::arrange(padj) %>%
dplyr::filter(n_Overlapping>2) %>%
dplyr::mutate(Geneset_length=as.integer(Geneset_length),n_Overlapping=as.integer(n_Overlapping))
genesetsall<-as.matrix(genesetsall)
filename_table <- "gsea_all_modules"
buttons_list <- list(list(extend ='csv',filename=filename_table), list(extend ='excel',filename=filename_table), list(extend = 'pdf', pageSize = 'A4', orientation = 'landscape',filename=filename_table),list(extend ='print'), list(extend ='colvis'))
dt_genesetsall<-DT::datatable(genesetsall,class = 'display',filter = 'top', extensions = c('Buttons'), rownames = FALSE,
options = list(deferRender=TRUE,paging =TRUE, pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100), keys = TRUE, dom = 'Blfrtip',buttons = buttons_list,columnDefs = list(list(className = 'dt-head-center', targets = "_all"),list(className = 'text-left', targets = "_all"))),
colnames=c("Module","Gene Set Name","Gene Set Description","# Genes in Gene Set","# Genes in Overlap","Genes in Overlap","% Genes in overlap","P-value","FDR Q-value"),
escape = FALSE) %>%
DT::formatSignif(c('p_value','padj','overlap_perc'),2) %>%
DT::formatStyle('overlap_perc',background = DT::styleColorBar(c(0,1), 'lightblue'),backgroundSize = '98% 88%',backgroundRepeat = 'no-repeat', backgroundPosition = 'center') %>%
DT::formatStyle(columns = c(6), fontSize = '60%')
return(dt_genesetsall)
}
}
| /R/amaretto_htmlreport.R | permissive | renesugar/ImagingAMARETTO | R | false | false | 40,925 | r | #' AMARETTO_HTMLreport
#'
#' Retrieve an interactive html report, including gene set enrichment analysis if asked for.
#'
#' @param AMARETTOinit AMARETTO initialize output
#' @param AMARETTOresults AMARETTO results output
#' @param ProcessedData List of processed input data
#' @param SAMPLE_annotation SAMPLE annotation will be added to heatmap
#' @param ID ID column of the SAMPLE annotation data frame
#' @param hyper_geo_reference Either GMT file address for genesets or computed GSEA dataframe using HyperGeoEnrichmentTest()
#' @param output_address Output directory for the html files.
#' @param show_row_names if True, sample names will appear in the heatmap
#' @param driverGSEA if TRUE, module drivers will also be included in the hypergeometric test.
#' @param phenotype_association_table Optional, Phenotype Association table.
#' @param genetic_pert_hyper_geo_reference
#' @param chem_pert_hyper_geo_reference
#' @param imaging_phenotypes_keywords a character vector of keywords distinguishing imaging phenotypes.
#'
#' @import dplyr
#' @importFrom doParallel registerDoParallel
#' @importFrom DT datatable formatRound formatSignif formatStyle styleColorBar styleInterval
#' @importFrom reshape2 melt
#' @importFrom dplyr arrange group_by left_join mutate select summarise rename filter case_when
#' @importFrom foreach foreach %dopar% %do%
#' @importFrom parallel makeCluster stopCluster detectCores
#' @importFrom knitr knit_meta
#' @importFrom utils write.table
#' @importFrom tibble rownames_to_column
#' @importFrom stats p.adjust phyper
#' @importFrom rmarkdown render
#' @return result
#' @export
#' @examples
#'\dontrun{
#' data('ProcessedDataLIHC')
#' AMARETTOinit <- AMARETTO_Initialize(ProcessedData = ProcessedDataLIHC,
#' NrModules = 2, VarPercentage = 50)
#'
#' AMARETTOresults <- AMARETTO_Run(AMARETTOinit)
#'
#' AMARETTO_HTMLreport(AMARETTOinit= AMARETTOinit,AMARETTOresults= AMARETTOresults,
#' ProcessedData = ProcessedDataLIHC,
#' hyper_geo_test_bool=FALSE,
#' output_address='./')
#'}
AMARETTO_HTMLreport <- function(AMARETTOinit,
AMARETTOresults,
ProcessedData,
show_row_names = FALSE,
SAMPLE_annotation = NULL,
ID = NULL,
hyper_geo_reference = NULL,
genetic_pert_hyper_geo_reference = NULL,
chem_pert_hyper_geo_reference = NULL,
output_address = './',
driverGSEA = TRUE,
phenotype_association_table = NULL,
imaging_phenotypes_keywords = NULL){
`%dopar%` <- foreach::`%dopar%`
`%do%` <- foreach::`%do%`
CNV_matrix <- ProcessedData[[2]]
MET_matrix <- ProcessedData[[3]]
NrModules <- AMARETTOresults$NrModules
VarPercentage <- AMARETTOinit$Parameters$VarPercentage
#set number of cores and check
NrCores <- AMARETTOinit$NrCores
MaxCores <- parallel::detectCores(all.tests = FALSE, logical = TRUE)
options('DT.warn.size'=FALSE)
if(MaxCores < NrCores){
stop(paste0("The number of cores that is asked for (",NrCores,"), is more than what's avalaible. Changes can be made on AMARETTOinit$NrCores."))
}
#check directory
if (!dir.exists(output_address)){
stop("Output directory is not existing.")
}
report_address <- file.path(output_address)
dir.create(paste0(report_address, "/AMARETTOhtmls/modules"), recursive = TRUE, showWarnings = FALSE)
cat("The output folder structure is created.\n")
#==============================================================================================================
hyper_geo_test_bool<-TRUE
if(is.null(hyper_geo_reference)){
hyper_geo_test_bool<-FALSE
}else if (is.data.frame(hyper_geo_reference)){
output_hgt<-hyper_geo_reference
}else if(is.character(hyper_geo_reference)&file.exists(hyper_geo_reference[1])){
output_hgt <-HyperGeoEnrichmentTest(AMARETTOinit, AMARETTOresults, hyper_geo_reference, driverGSEA, NrCores)
}else {
stop("The hyper_geo_reference is not properly provided. It should be either an address to an existing .gmt file or a hyper-geo-test dataframe table\n")
}
#======================================
genetic_pert_hyper_geo_test_bool<-TRUE
if(is.null(genetic_pert_hyper_geo_reference)){
genetic_pert_hyper_geo_test_bool<-FALSE
}else if (is.data.frame(genetic_pert_hyper_geo_reference)){
genetic_pert_output_hgt<-genetic_pert_hyper_geo_reference
}else if(is.character(genetic_pert_hyper_geo_reference)&file.exists(genetic_pert_hyper_geo_reference[1])){
genetic_pert_output_hgt <-HyperGeoEnrichmentTest(AMARETTOinit, AMARETTOresults, genetic_pert_hyper_geo_reference, driverGSEA, NrCores)
}else {
stop("The genetic_pert_hyper_geo_reference is not properly provided. It should be either an address to an existing .gmt file or a hyper-geo-test dataframe table\n")
}
#======================================
chem_pert_hyper_geo_test_bool<-TRUE
if(is.null(genetic_pert_hyper_geo_reference)){
chem_pert_hyper_geo_test_bool<-FALSE
}else if (is.data.frame(chem_pert_hyper_geo_reference)){
chem_pert_output_hgt<-chem_pert_hyper_geo_reference
}else if(is.character(chem_pert_hyper_geo_reference)&file.exists(chem_pert_hyper_geo_reference[1])){
chem_pert_output_hgt <-HyperGeoEnrichmentTest(AMARETTOinit, AMARETTOresults, chem_pert_hyper_geo_reference, driverGSEA, NrCores)
}else{
stop("The chem_pert_hyper_geo_reference is not properly provided. It should be either an address to an existing .gmt file or a hyper-geo-test dataframe table\n")
}
#==============================================================================================================
#Parallelizing
cluster <- parallel::makeCluster(c(rep("localhost", NrCores)), type = "SOCK")
doParallel::registerDoParallel(cluster,cores=NrCores)
full_path <- normalizePath(report_address)
unlink(paste0(full_path,"/*"))
ModuleOverviewTable <- NULL
yml_file <- paste0(full_path,"/AMARETTOhtmls/modules/_site.yml")
file.copy(system.file("templates/module_templates/_site.yml",package="AMARETTO"),yml_file)
ModuleOverviewTable<-foreach (ModuleNr = 1:NrModules, .packages = c('AMARETTO','tidyverse','DT','rmarkdown')) %dopar% {
#for(ModuleNr in 1:NrModules){
#get heatmap
print(paste0("ModuleNr = ",ModuleNr))
heatmap_module <- AMARETTO_VisualizeModule(AMARETTOinit, AMARETTOresults, ProcessedData, show_row_names = show_row_names, SAMPLE_annotation=SAMPLE_annotation, ID=ID, ModuleNr=ModuleNr,printHM = FALSE)
print("The Heatmap is visualised.")
# create datables that are supplied to the RMarkdown file
ModuleRegulators <- AMARETTOresults$RegulatoryPrograms[ModuleNr,which(AMARETTOresults$RegulatoryPrograms[ModuleNr,] != 0)]
print("ModuleRegulators are defined.")
filename_table <- paste0("regulators_module",ModuleNr)
buttons_list <- list(list(extend ='csv',filename=filename_table), list(extend ='excel',filename=filename_table), list(extend = 'pdf', pageSize = 'A4', orientation = 'landscape',filename=filename_table),list(extend ='print'), list(extend ='colvis'))
column_markup <- list(list(width = '200px', className = 'dt-head-center', targets = "_all"), list(className = 'text-left', targets = "_all"))
dt_regulators <- DT::datatable(tibble::rownames_to_column(as.data.frame(ModuleRegulators),"RegulatorIDs") %>%
dplyr::rename(Weights="ModuleRegulators") %>%
dplyr::mutate(Weights=signif(Weights, digits = 3)) %>%
dplyr::mutate(RegulatorIDs=paste0('<a href="https://www.genecards.org/cgi-bin/carddisp.pl?gene=',RegulatorIDs,'">',RegulatorIDs,'</a>')) %>%
dplyr::arrange(-Weights),
class = 'display', filter = 'top', extensions = c('Buttons','KeyTable'), rownames = FALSE,
options = list(columnDefs = column_markup, pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100),
keys = TRUE, dom = 'Blfrtip', buttons = buttons_list),
colnames = c("Driver Gene", "Weight"), escape = 'Weight') %>%
DT::formatStyle('Weights',color = DT::styleInterval(0, c('darkblue', 'darkred')))
print("Data tabel of regulators is created.")
filename_table <- paste0("targets_module",ModuleNr)
buttons_list <- list(list(extend ='csv',filename=filename_table), list(extend ='excel',filename=filename_table), list(extend = 'pdf', pageSize = 'A4', orientation = 'landscape',filename=filename_table),list(extend ='print'), list(extend ='colvis'))
dt_targets <- DT::datatable(as.data.frame(AMARETTOresults$ModuleMembership) %>%
tibble::rownames_to_column("TargetIDs") %>%
dplyr::arrange(TargetIDs) %>%
dplyr::rename(moduleNr=ModuleNr) %>%
dplyr::filter(moduleNr==ModuleNr) %>%
dplyr::select(-moduleNr) %>%
dplyr::mutate(TargetIDs=paste0('<a href="https://www.genecards.org/cgi-bin/carddisp.pl?gene=',TargetIDs,'">',TargetIDs,'</a>')),
class = 'display', filter = 'top', extensions = c('Buttons','KeyTable'), rownames = FALSE,
options = list(columnDefs = column_markup, pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100),
keys = TRUE, dom = 'Blfrtip', buttons = buttons_list),
colnames = c("Target Gene"),escape = FALSE)
print("Data tabel of targets is created.")
#=========================================================================================================
# create GSEA output table, taking into account the resource of the GMT file (eg. MSIGDB)
if (hyper_geo_test_bool){
dt_genesets_list<-create_hgt_datatable(output_hgt=output_hgt, module_table=TRUE, ModuleNr = ModuleNr)
dt_genesets<-dt_genesets_list$dt_genesets
ngenesets <- dt_genesets_list$ngenesets
} else {
dt_genesets <- "Genesets were not analysed as they were not provided."
ngenesets<-"NA"
#ngenesets <- "NA"
}
print("Data Table for GSEA results is created.")
#=========================================================
# create GSEA output table, taking into account the resource of the GMT file (eg. MSIGDB)
if (genetic_pert_hyper_geo_test_bool){
dt_genesets_genetic_pert<-create_hgt_datatable(output_hgt=genetic_pert_output_hgt, module_table=TRUE, ModuleNr = ModuleNr)
dt_genesets_genetic_pert<-dt_genesets_genetic_pert$dt_genesets
} else {
dt_genesets_genetic_pert <- "Genesets were not analysed as they were not provided."
#ngenesets <- "NA"
}
print("Data Table for GSEA results is created.")
#=========================================================
# create GSEA output table, taking into account the resource of the GMT file (eg. MSIGDB)
if (chem_pert_hyper_geo_test_bool){
dt_genesets_chem_pert<-create_hgt_datatable(output_hgt=chem_pert_output_hgt, module_table=TRUE, ModuleNr = ModuleNr)
dt_genesets_chem_pert<-dt_genesets_chem_pert$dt_genesets
} else {
dt_genesets_chem_pert <- "Genesets were not analysed as they were not provided."
#ngenesets <- "NA"
}
print("Data Table for GSEA results is created.")
#=========================================================
#created datatable for phenotype associations
if (!is.null(phenotype_association_table)){
filename_table <- paste0("phenotypes_module",ModuleNr)
buttons_list <- list(list(extend ='csv',filename=filename_table), list(extend ='excel',filename=filename_table), list(extend = 'pdf', pageSize = 'A4', orientation = 'landscape',filename=filename_table),list(extend ='print'), list(extend ='colvis'))
module_phenotype_association_table<-phenotype_association_table %>% dplyr::filter(ModuleNr==paste0("Module ",!!ModuleNr)) %>%
dplyr::mutate(p.value = signif(p.value, digits = 3), q.value = signif(q.value, digits = 3)) %>% dplyr::arrange(q.value) %>%dplyr::select(-ModuleNr)%>%mutate(Phenotypes=as.factor(Phenotypes))%>%mutate(Statistical_Test=as.factor(Statistical_Test))
if(is.null(imaging_phenotypes_keywords)){
module_phenotype_association_table_molecular_clinical<-module_phenotype_association_table
module_phenotype_association_table_imaging<-data.frame(Phenotypes="There is no imaging phenotype.")
dt_phenotype_association_img<-DT::datatable(data.frame(Phenotype = "There is no imaging phenotype association."))
}
else{
imaging_phenotypes<-paste(imaging_phenotypes_keywords,collapse = "|")
module_phenotype_association_table_molecular_clinical<-module_phenotype_association_table%>%dplyr::filter(!grepl(imaging_phenotypes,Phenotypes,ignore.case = TRUE))
module_phenotype_association_table_imaging<-module_phenotype_association_table%>%dplyr::filter(grepl(imaging_phenotypes,Phenotypes,ignore.case = TRUE))
dt_phenotype_association_img <- DT::datatable(module_phenotype_association_table_imaging,
class='display', filter = 'top', extensions = c('Buttons','KeyTable'), rownames = FALSE,
options = list(pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100), keys = TRUE, dom = 'Blfrtip',buttons = buttons_list),
colnames=c("Phenotype","Statistics Test","P-value","FDR Q-value","Descriptive Statistics"),escape = FALSE) %>%
DT::formatSignif(c('p.value','q.value'), 2)
}
dt_phenotype_association_mc <- DT::datatable(module_phenotype_association_table_molecular_clinical,
class='display', filter = 'top', extensions = c('Buttons','KeyTable'), rownames = FALSE,
options = list(pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100), keys = TRUE, dom = 'Blfrtip',buttons = buttons_list),
colnames=c("Phenotype","Statistics Test","P-value","FDR Q-value","Descriptive Statistics"),escape = FALSE) %>%
DT::formatSignif(c('p.value','q.value'), 2)
} else{
dt_phenotype_association_mc <- data.frame(Phenotype = "Phenotype association resuls were not provided.")
}
print("The datatable with phenotype association results is created.")
#copy the template file, needed when parallelized
modulemd <- paste0(full_path,"/AMARETTOhtmls/modules/module",ModuleNr,".rmd")
file.copy(system.file("templates/module_templates/TemplateReportModule.Rmd",package="AMARETTO"),modulemd)
print("The copy of the template file is created.")
# output_format<-system.file("templates/module_templates/TemplateReportModule.Rmd",package="AMARETTO")
knitr::knit_meta(class=NULL, clean = TRUE)
rmarkdown::render(modulemd,
output_file = paste0("module",ModuleNr,".html"),
params = list(
report_address = report_address,
ModuleNr = ModuleNr,
heatmap_module = heatmap_module,
dt_regulators = dt_regulators,
dt_targets = dt_targets,
dt_phenotype_association_mc = dt_phenotype_association_mc,
dt_phenotype_association_img = dt_phenotype_association_img,
dt_genesets = dt_genesets,
dt_genesets_genetic_pert = dt_genesets_genetic_pert,
dt_genesets_chem_pert = dt_genesets_chem_pert), knit_meta=knitr::knit_meta(class=NULL, clean = TRUE),quiet = TRUE)
print("Rmarkdown created the module html page.")
#remove rmd copy of template
file.remove(modulemd)
#file.remove(paste0(full_path,"/AMARETTOhtmls/modules/module",ModuleNr,"_files"))
print("file removed successfully :) Done!")
#ModuleOverviewTable<-rbind(ModuleOverviewTable,c(ModuleNr,length(which(AMARETTOresults$ModuleMembership==ModuleNr)),length(ModuleRegulators),ngenesets))
while (!is.null(dev.list())) dev.off()
return(c(ModuleNr, length(which(AMARETTOresults$ModuleMembership==ModuleNr)), length(ModuleRegulators),ngenesets))
# },error=function(e){message(paste("an error occured for Module", ModuleNr))})
}
suppressWarnings(suppressMessages(file.remove(paste0(full_path,"/AMARETTOhtmls/modules/_site.yml"))))
file_remove<-suppressWarnings(suppressMessages(file.remove(paste0(full_path,"/AMARETTOhtmls/modules/module",c(1:NrModules),"_files"))))
parallel::stopCluster(cluster)
cat("All module htmls are created.\n")
ModuleOverviewTable <- data.frame(matrix(unlist(ModuleOverviewTable), byrow=TRUE, ncol=4), stringsAsFactors=FALSE)
colnames(ModuleOverviewTable)<-c("ModuleNr","NrTarGenes","NrRegGenes","SignGS")
if (!is.null(CNV_matrix)){
nCNV = ncol(CNV_matrix)
} else {nCNV = NA}
if (!is.null(MET_matrix)){
nMET = ncol(MET_matrix)
} else {nMET = NA}
nExp = ncol(AMARETTOresults$RegulatoryProgramData)
nGenes = length(AMARETTOresults$AllGenes)
nMod = AMARETTOresults$NrModules
options('DT.warn.size'=FALSE) # avoid showing datatable size-related warnings.
filename_table <- "overview_modules"
buttons_list <- list(list(extend ='csv',filename=filename_table), list(extend ='excel',filename=filename_table), list(extend = 'pdf', pageSize = 'A4', orientation = 'landscape',filename=filename_table),list(extend ='print'), list(extend ='colvis'))
dt_overview<-DT::datatable(ModuleOverviewTable %>%
dplyr::mutate(ModuleNr=paste0('<a href="./modules/module',ModuleNr,'.html">Module ',ModuleNr,'</a>')),
class = 'display', filter = 'top', extensions = c('Buttons','KeyTable'), rownames = FALSE, colnames =c("Module","# Target Genes", "# Driver Genes", "# Gene Sets"),
options = list(pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100, 200), keys = TRUE, dom = 'Blfrtip',buttons = buttons_list,columnDefs = list(list(className = 'dt-head-center', targets = "_all"),list(className = 'text-left', targets = "_all"))),
escape = FALSE)
all_targets<-tibble::rownames_to_column(data.frame(AMARETTOresults$ModuleMembership),"Genes") %>%
dplyr::rename(Module="ModuleNr") %>%
dplyr::mutate(value=0) %>%
dplyr::mutate(Type="Target") %>%
dplyr::select(Genes,Module,value,Type)
all_regulators <- reshape2::melt(tibble::rownames_to_column(as.data.frame(AMARETTOresults$RegulatoryPrograms),"Module"),id.vars = "Module") %>%
dplyr::filter(value!=0) %>% dplyr::mutate(Module=sub("Module_","",Module),Type="Driver") %>%
dplyr::rename(Genes='variable') %>%
dplyr::select(Genes,Module,value,Type)
all_genes <- rbind(all_targets,all_regulators) %>%
dplyr::arrange(Genes) %>%
dplyr::mutate(Genes=paste0('<a href="https://www.genecards.org/cgi-bin/carddisp.pl?gene=',Genes,'">',Genes,'</a>')) %>%
dplyr::mutate(Module=paste0('<a href="./modules/module',Module,'.html">Module ',Module,'</a>'))
all_genes <- all_genes %>%
dplyr::mutate(Color=dplyr::case_when(
is.na(as.numeric(value))~"",
as.numeric(value)>0~"darkred",
as.numeric(value)<0~"darkblue",
TRUE~"darkgreen")) %>%
dplyr::mutate(Type=paste0('<font color=',Color,'>',Type,'</font>')) %>%
dplyr::select(-Color,-value)
filename_table <- "genes_to_modules"
buttons_list <- list(list(extend ='csv',filename=filename_table), list(extend ='excel',filename=filename_table), list(extend = 'pdf', pageSize = 'A4', orientation = 'landscape',filename=filename_table),list(extend ='print'), list(extend ='colvis'))
dt_genes <- DT::datatable(all_genes,
class = 'display', filter = 'top', extensions = c('Buttons','KeyTable'), rownames = FALSE,colnames =c("Gene","Module","Gene Type"),
options = list(deferRender=TRUE,columnDefs = list(list(className = 'dt-head-center', targets = "_all"), list(className = 'text-left', targets = "_all")), pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100), keys = TRUE, dom = 'Blfrtip',buttons = buttons_list),
escape = FALSE)
#=================================================================================
if (hyper_geo_test_bool){
dt_genesetsall <- create_hgt_datatable(output_hgt = output_hgt, module_table = FALSE)
} else {
dt_genesetsall <- data.frame(Hyper_Geometric_Test="Genesets were not analysed as they were not provided.")
}
#=============================
if (genetic_pert_hyper_geo_test_bool){
dt_genesetsall_genetic_pert <- create_hgt_datatable(output_hgt = genetic_pert_output_hgt, module_table = FALSE)
} else {
dt_genesetsall_genetic_pert <- data.frame(Hyper_Geometric_Test="Genesets were not analysed as they were not provided.")
}
#=============================
if (chem_pert_hyper_geo_test_bool){
dt_genesetsall_chem_pert <- create_hgt_datatable(output_hgt = chem_pert_output_hgt, module_table = FALSE)
} else {
dt_genesetsall_chem_pert <- data.frame(Hyper_Geometric_Test="Genesets were not analysed as they were not provided.")
}
#=============================
#created phenotype table for index page
if (!is.null(phenotype_association_table)){
filename_table <- "phenotypes_all_modules"
buttons_list <- list(list(extend ='csv',filename=filename_table), list(extend ='excel',filename=filename_table), list(extend = 'pdf', pageSize = 'A4', orientation = 'landscape',filename=filename_table),list(extend ='print'), list(extend ='colvis'))
phenotype_association_all<-phenotype_association_table %>% dplyr::mutate(p.value=signif(p.value, digits = 3), q.value=signif(q.value, digits = 3)) %>%
dplyr::mutate(ModuleNr=paste0('<a href="./modules/module',gsub("Module ","",ModuleNr),'.html">',ModuleNr,'</a>'))%>%dplyr::arrange(q.value)%>%mutate(Phenotypes=as.factor(Phenotypes))%>%mutate(Statistical_Test=as.factor(Statistical_Test))
if(is.null(imaging_phenotypes_keywords)){
all_phenotype_association_table_molecular_clinical<-phenotype_association_all
all_phenotype_association_table_imaging<-data.frame(Phenotypes="There is no imaging phenotype.")
dt_phenotype_association_img_all<-DT::datatable(data.frame(Phenotype = "There is no imaging phenotype association."))
}
else{
imaging_phenotypes<-paste(imaging_phenotypes_keywords,collapse = "|")
all_phenotype_association_table_molecular_clinical<-phenotype_association_all%>%dplyr::filter(!grepl(imaging_phenotypes,Phenotypes,ignore.case = TRUE))
all_phenotype_association_table_imaging<-phenotype_association_all%>%dplyr::filter(grepl(imaging_phenotypes,Phenotypes,ignore.case = TRUE))
dt_phenotype_association_img_all <- DT::datatable(all_phenotype_association_table_imaging,
class='display', filter = 'top', extensions = c('Buttons','KeyTable'), rownames = FALSE,
options = list(pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100), keys = TRUE, dom = 'Blfrtip',buttons = buttons_list),
colnames=c("Phenotype","Statistics Test","P-value","FDR Q-value","Descriptive Statistics"),escape = FALSE) %>%
DT::formatSignif(c('p.value','q.value'), 2)
}
dt_phenotype_association_mc_all<- DT::datatable(all_phenotype_association_table_molecular_clinical, class='display',filter = 'top', extensions = c('Buttons','KeyTable'),rownames = FALSE,
options = list(pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100), keys = TRUE, dom = 'Blfrtip',buttons = buttons_list, columnDefs = list(list(className = 'dt-head-center', targets = "_all"),list(className = 'text-left', targets = "_all"))),colnames=c("Module","Phenotype","Statistics Test","P-value","FDR Q-value","Descriptive Statistics"),
escape = FALSE) %>% DT::formatSignif(c('p.value','q.value'),2)
}
else{
dt_phenotype_association_mc_all <- data.frame(Phenotype_Association="Phenotype association resuls were not provided.")
dt_phenotype_association_img_all<-data.frame(Phenotypes="There is no imaging phenotype.")
}
#Render index page
rmarkdown::render(system.file("templates/TemplateIndexPage.Rmd",package="AMARETTO"), output_dir=paste0(full_path,"/AMARETTOhtmls/"),output_file= "index.html", params = list(
nExp = nExp,
nCNV = nCNV,
nMET = nMET,
nGenes = nGenes,
VarPercentage = VarPercentage,
nMod = nMod,
dt_overview = dt_overview),quiet = TRUE)
rmarkdown::render(system.file("templates/TemplateIndexPage_Overview.Rmd",package="AMARETTO"), output_dir=paste0(full_path,"/AMARETTOhtmls/"),output_file= "index_Overview.html", params = list(
dt_overview = dt_overview),quiet = TRUE)
rmarkdown::render(system.file("templates/TemplateIndexPage_AllGenes.Rmd",package="AMARETTO"), output_dir=paste0(full_path,"/AMARETTOhtmls/"),output_file= "index_AllGenes.html", params = list(
dt_genes = dt_genes),quiet = TRUE)
rmarkdown::render(system.file("templates/TemplateIndexPage_GenesetsEnrichment.Rmd",package="AMARETTO"), output_dir=paste0(full_path,"/AMARETTOhtmls/"),output_file= "index_GenesetsEnrichment.html", params = list(
dt_gensesetsall = dt_gensesetsall),quiet = TRUE)
rmarkdown::render(system.file("templates/TemplateIndexPage_GenesetsEnrichment_gp.Rmd",package="AMARETTO"), output_dir=paste0(full_path,"/AMARETTOhtmls/"),output_file= "index_GenesetsEnrichment_gp.html", params = list(
dt_genesetsall_genetic_pert = dt_genesetsall_genetic_pert),quiet = TRUE)
rmarkdown::render(system.file("templates/TemplateIndexPage_GenesetsEnrichment_cp.Rmd",package="AMARETTO"), output_dir=paste0(full_path,"/AMARETTOhtmls/"),output_file= "index_GenesetsEnrichment_cp.html", params = list(
dt_genesetsall_chem_pert = dt_genesetsall_chem_pert),quiet = TRUE)
rmarkdown::render(system.file("templates/TemplateIndexPage_PhenoAssociation.Rmd",package="AMARETTO"), output_dir=paste0(full_path,"/AMARETTOhtmls/"),output_file= "index_PhenoAssociation.html", params = list(
dt_phenotype_association_all = dt_phenotype_association_mc_all),quiet = TRUE)
rmarkdown::render(system.file("templates/TemplateIndexPage_PhenoAssociationImg.Rmd",package="AMARETTO"), output_dir=paste0(full_path,"/AMARETTOhtmls/"),output_file= "index_PhenoAssociationImg.html", params = list(
dt_phenotype_association_all = dt_phenotype_association_img_all),quiet = TRUE)
dir.create(paste0(report_address, "/AMARETTOhtmls/Report_data"), recursive = TRUE, showWarnings = FALSE)
report_data <- list(nExp = nExp,
nCNV = nCNV,
nMET = nMET,
nGenes = nGenes,
VarPercentage = VarPercentage,
nMod = nMod,
ModuleOverviewTable = ModuleOverviewTable,
all_genes = all_genes,
dt_genesetsall = dt_genesetsall,
dt_genesetsall_genetic_pert = dt_genesetsall_genetic_pert,
dt_genesetsall_chem_pert = dt_genesetsall_chem_pert,
dt_phenotype_association_mc_all = dt_phenotype_association_mc_all,
dt_phenotype_association_img_all = dt_phenotype_association_img_all,
AMARETTOinit = AMARETTOinit,
AMARETTOresults = AMARETTOresults)
saveRDS(report_data, file = paste0(report_address, "/AMARETTOhtmls/Report_data/AMARETTOreport_data.rds"))
cat("The full report is created and ready to use.\n")
return(report_data)
}
#' Hyper Geometric Geneset Enrichement Test
#'
#' Calculates the p-values for unranked gene set enrichment based on two gmt files as input and the hyper geometric test.
#' @return result
#' @param gmtfile The gmt file with reference gene set.
#' @param testgmtfile The gmt file with gene sets to test. In our case, the gmt file of the modules.
#' @param NrCores Number of cores used for parallelization.
#' @param ref.numb.genes The total number of genes teste, standard equal to 45 956 (MSIGDB standard).
#' @importFrom foreach foreach
#' @importFrom parallel makeCluster stopCluster
#' @importFrom doParallel registerDoParallel
#' @keywords internal
HyperGTestGeneEnrichment<-function(gmtfile,testgmtfile,NrCores,ref.numb.genes=45956){
`%dopar%` <- foreach::`%dopar%`
`%do%` <- foreach::`%do%`
test.gmt<-readGMT(testgmtfile) # our gmt_file_output_from Amaretto
gmt.path<-readGMT(gmtfile) # the hallmarks_and_co2...
########################### Parallelizing :
cluster <- parallel::makeCluster(c(rep("localhost", NrCores)), type = "SOCK")
doParallel::registerDoParallel(cluster,cores=NrCores)
resultloop<-foreach(j=1:length(test.gmt), .combine='rbind') %do% {
foreach(i=1:length(gmt.path),.combine='rbind') %dopar% {
l<-length(gmt.path[[i]])
k<-sum(gmt.path[[i]] %in% test.gmt[[j]])
m<-ref.numb.genes
n<-length(test.gmt[[j]])
p1<-stats::phyper(k-1,l,m-l,n,lower.tail=FALSE)
if (k>0){
overlapping.genes<-gmt.path[[i]][gmt.path[[i]] %in% test.gmt[[j]]]
overlapping.genes<-paste(overlapping.genes,collapse = ', ')
# resultloop<-rbind(resultloop,c(Geneset=names(gmt.path[i]),Testset=names(test.gmt[j]),p_value=p1,n_Overlapping=k,Overlapping_genes=overlapping.genes))
c(Geneset=names(gmt.path[i]),Testset=names(test.gmt[j]),Geneset_length=l,p_value=p1,n_Overlapping=k,Overlapping_genes=overlapping.genes)
}
}
}
parallel::stopCluster(cluster)
resultloop<-as.data.frame(resultloop,stringsAsFactors=FALSE)
resultloop$p_value<-as.numeric(resultloop$p_value)
resultloop$n_Overlapping<-as.numeric((resultloop$n_Overlapping))
resultloop$Geneset_length<-as.numeric(resultloop$Geneset_length)
resultloop[,"padj"]<-stats::p.adjust(resultloop[,"p_value"],method='BH')
return(resultloop)
}
#' GmtFromModules
#' @return result
#'
#' @param driverGSEA if TRUE , module driver genes will also be added to module target genes for GSEA.
#' @param AMARETTOresults List output from AMARETTO_Run().
#'
#' @importFrom tibble rownames_to_column
#' @importFrom reshape2 melt
#' @importFrom dplyr arrange mutate select rename filter
#' @importFrom utils write.table
#' @keywords internal
GmtFromModules <- function(AMARETTOresults,driverGSEA){
ModuleMembership <- tibble::rownames_to_column(as.data.frame(AMARETTOresults$ModuleMembership),"GeneNames")
if(driverGSEA){
all_regulators <- reshape2::melt(tibble::rownames_to_column(as.data.frame(AMARETTOresults$RegulatoryPrograms),"Module"), id.vars = "Module") %>%
dplyr::filter(value!=0) %>% dplyr::select(variable, Module) %>%
dplyr::mutate(Module = sub("Module_", "", Module)) %>%
dplyr::rename(GeneNames = "variable")%>%
dplyr::rename(ModuleNr = "Module")
ModuleMembership <- rbind(ModuleMembership, all_regulators)
}
NrModules <- AMARETTOresults$NrModules
ModuleMembership <- ModuleMembership %>% dplyr::arrange(GeneNames)
ModuleMembers_list <- split(ModuleMembership$GeneNames,ModuleMembership$ModuleNr)
names(ModuleMembers_list) <- paste0("Module_",names(ModuleMembers_list))
gmt_file="./Modules_genes.gmt"
utils::write.table(sapply(names(ModuleMembers_list), function(x) paste(x,paste(ModuleMembers_list[[x]],collapse="\t"),sep="\t")), gmt_file, quote = FALSE, row.names = TRUE, col.names = FALSE,sep='\t')
}
#' readGMT
#'
#' @param filename
#'
#' @return result
#' @keywords internal
readGMT <- function(filename){
gmtLines <- strsplit(readLines(filename),"\t")
gmtLines_genes <- lapply(gmtLines, tail, -2)
names(gmtLines_genes) <- sapply(gmtLines, head, 1)
return(gmtLines_genes)
}
#' Title plot_run_history
#'
#' @param AMARETTOinit
#' @param AMARETTOResults
#'
#' @import ggplot2
#' @importFrom gridExtra grid.arrange
#' @importFrom stats sd
#' @return plot
#' @export
#'
#' @examples plot_run_history(AMARETTOinit,AMARETTOResults)
plot_run_history <- function(AMARETTOinit,AMARETTOResults){
means <- unlist(lapply(AMARETTOResults$run_history$error_history, mean))
stds <- unlist(lapply(AMARETTOResults$run_history$error_history, sd))
iterationNr <- c(1:length(means))
NrReassignGenes <- AMARETTOResults$run_history$NrReassignGenes_history[-1]
threshold <- AMARETTOinit$Parameters$convergence_cutoff*nrow(AMARETTOinit$MA_matrix_Var)
TotGenesNr <- nrow(AMARETTOinit$MA_matrix_Var)
df <- data.frame(iterationNr = iterationNr,
means = means,
stds = stds,
NrReassignGenes = NrReassignGenes,
threshold = threshold,
TotGenesNr = TotGenesNr,
stringsAsFactors = FALSE)
p1 <- ggplot2::qplot(x = iterationNr, y = means, data = df) +
ggplot2::geom_errorbar(ggplot2::aes(x=iterationNr, ymin=means-stds, ymax=means+stds),data=df,width=0.25) +
ggplot2::xlab("Iteration Number") + ggplot2::ylab("Mean Square Error") +
ggplot2::geom_line() +
ggplot2::geom_point()
p2 <- ggplot2::qplot(x = iterationNr, y = NrReassignGenes) +
ggplot2::geom_hline(yintercept = TotGenesNr, linetype="dashed", color = "blue") +
ggplot2::geom_hline(yintercept = threshold, linetype="dashed", color = "red") +
ggplot2::xlab("Iteration Number") +
ggplot2::ylab("Target Gene Reassignments Number") +
ggplot2::geom_line() +
ggplot2::geom_point() +
ggplot2::scale_y_continuous(trans='log2')
gridExtra::grid.arrange(p1, p2, nrow = 2)
}
#' Title HyperGeoEnrichmentTest
#'
#' @param AMARETTOresults AMARETTO results output
#' @param hyper_geo_reference GMT file with gene sets to compare with.
#' @param driverGSEA if TRUE, module drivers will also be included in the hypergeometric test.
#' @param NrCores Number of cores for parallel processing.
#'
#' @return Hyper-Geometric Enrichment Test table
#' @export
#'
#' @examples HyperGeoEnrichmentTest(AMARETTOresults=NULL, hyper_geo_reference, driverGSEA=TRUE, MSIGDB=TRUE, NrCores=4)
HyperGeoEnrichmentTest<-function(AMARETTOresults, hyper_geo_reference, driverGSEA=TRUE, NrCores=4){
output_hgt_all<-NULL
for(i in 1:length(hyper_geo_reference)){
if (is.null(AMARETTOresults)){
return(1)
}
GmtFromModules(AMARETTOresults, driverGSEA)
output_hgt <- HyperGTestGeneEnrichment(hyper_geo_reference[i], "./Modules_genes.gmt", NrCores)
utils::data(MsigdbMapping)
MsigdbMapping<-MsigdbMapping%>%dplyr::mutate(url=paste0('<a href="http://software.broadinstitute.org/gsea/msigdb/cards/',geneset,'">',gsub("_"," ",geneset),'</a>'))
output_hgt<-output_hgt%>%dplyr::left_join(MsigdbMapping,by=c("Geneset"="geneset"))%>%
dplyr::mutate(description=ifelse(is.na(description),Geneset,description))%>%
dplyr::mutate(Geneset=ifelse(is.na(url),Geneset,url))%>%dplyr::rename("Description"="description")%>%dplyr::select(-url)
cat("The hyper geometric test results are calculated.\n")
output_hgt_all<-rbind(output_hgt_all,output_hgt)
}
return(output_hgt_all)
}
#' Title create_hgt_datatable
#'
#' @param output_hgt GSEA test dataframe from HyperGeoEnrichmentTest function.
#' @param module_table If TRUE, makes the ModuleNr datatable, If FALSE, makes the index page datatable.
#' @param ModuleNr The module number.
#'
#' @return result
#' @examples
create_hgt_datatable<-function(output_hgt, module_table, ModuleNr = 1){
if (module_table){
##################################################################
# filter results from module from all datatable with all GSEA results
output_hgt_filter <- output_hgt %>% dplyr::filter(Testset==paste0("Module_",as.character(ModuleNr))) %>% dplyr::arrange(padj)
output_hgt_filter <- output_hgt_filter %>% dplyr::mutate(overlap_perc=n_Overlapping/Geneset_length) %>%
mutate(overlap_perc=signif(overlap_perc, digits = 3)) %>%
dplyr::select(Geneset,Description,Geneset_length, n_Overlapping, Overlapping_genes, overlap_perc, p_value,padj) %>%
arrange(padj) %>%
mutate(Geneset_length=as.integer(Geneset_length), n_Overlapping=as.integer(n_Overlapping))
filename_table <- paste0("gsea_module",ModuleNr)
buttons_list <- list(list(extend ='csv',filename=filename_table), list(extend ='excel',filename=filename_table), list(extend = 'pdf', pageSize = 'A4', orientation = 'landscape',filename=filename_table),list(extend ='print'), list(extend ='colvis'))
#create interactive tables
options('DT.warn.size'=FALSE)
dt_genesets <- DT::datatable(output_hgt_filter,
#dplyr::mutate(Geneset=paste0('<a href="http://software.broadinstitute.org/gsea/msigdb/cards/',Geneset,'">',gsub("_"," ",Geneset),'</a>')),
class = 'display', filter = 'top', extensions = c('Buttons','KeyTable'), rownames = FALSE,
options = list(pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100), keys = TRUE, dom = 'Blfrtip',buttons = buttons_list,columnDefs = list(list(className = 'dt-head-center', targets = "_all"),list(className = 'text-left', targets = "_all"))),
colnames=c("Gene Set Name", "Gene Set Description", "# Genes in Gene Set", "# Genes in Overlap", "Genes in Overlap", "% Genes in overlap", "P-value", "FDR Q-value"), escape = FALSE) %>%
DT::formatSignif(c('p_value','padj','overlap_perc'),2) %>%
DT::formatStyle('overlap_perc', background = DT::styleColorBar(c(0,1), 'lightblue'), backgroundSize = '98% 88%', backgroundRepeat = 'no-repeat', backgroundPosition = 'center') %>%
DT::formatStyle(columns = c(5), fontSize = '60%')
ngenesets <- nrow(output_hgt_filter %>% dplyr::filter(padj<0.05))
return(list(dt_genesets=dt_genesets,ngenesets=ngenesets))
}
##################################################################
else{
genesetsall<-output_hgt %>%
dplyr::mutate(Testset=paste0('<a href="./modules/module',sub("Module_","",Testset),'.html">',paste0(Testset,paste0(rep(" ",14),collapse = "")),'</a>')) %>%
dplyr::mutate(Modules=gsub("_"," ",Testset))%>%dplyr::mutate(overlap_perc=n_Overlapping/Geneset_length) %>%
dplyr::mutate(overlap_perc=signif(overlap_perc, digits = 3))
genesetsall<-genesetsall %>%
select(Modules, Geneset, Description, Geneset_length, n_Overlapping, Overlapping_genes, overlap_perc, p_value, padj) %>%
dplyr::arrange(padj) %>%
dplyr::filter(n_Overlapping>2) %>%
dplyr::mutate(Geneset_length=as.integer(Geneset_length),n_Overlapping=as.integer(n_Overlapping))
genesetsall<-as.matrix(genesetsall)
filename_table <- "gsea_all_modules"
buttons_list <- list(list(extend ='csv',filename=filename_table), list(extend ='excel',filename=filename_table), list(extend = 'pdf', pageSize = 'A4', orientation = 'landscape',filename=filename_table),list(extend ='print'), list(extend ='colvis'))
dt_genesetsall<-DT::datatable(genesetsall,class = 'display',filter = 'top', extensions = c('Buttons'), rownames = FALSE,
options = list(deferRender=TRUE,paging =TRUE, pageLength = 10, lengthMenu = c(5, 10, 20, 50, 100), keys = TRUE, dom = 'Blfrtip',buttons = buttons_list,columnDefs = list(list(className = 'dt-head-center', targets = "_all"),list(className = 'text-left', targets = "_all"))),
colnames=c("Module","Gene Set Name","Gene Set Description","# Genes in Gene Set","# Genes in Overlap","Genes in Overlap","% Genes in overlap","P-value","FDR Q-value"),
escape = FALSE) %>%
DT::formatSignif(c('p_value','padj','overlap_perc'),2) %>%
DT::formatStyle('overlap_perc',background = DT::styleColorBar(c(0,1), 'lightblue'),backgroundSize = '98% 88%',backgroundRepeat = 'no-repeat', backgroundPosition = 'center') %>%
DT::formatStyle(columns = c(6), fontSize = '60%')
return(dt_genesetsall)
}
}
|
#goal: test mice
library(mice)
library(VIM)
head(nhanes)
md.pattern(nhanes)
p <- md.pairs(nhanes)
p
pbox(nhanes,pos=1,int=F,cex=0.7)
imp <- mice(nhanes)
imp$imp$chl
head(complete(imp))
head(complete(imp,2))
summary(lm(chl~age+hyp,data=nhanes))
confint(lm(chl~age+hyp,data=nhanes))
fit <- with(imp,lm(chl~age+hyp))
summary(pool(fit))
n <- 100
x1 <- rnorm(n)
x2 <- 0.5*x1+rnorm(n)
cor(x1,x2)
y <- 1*x1+0.5*x2+rnorm(n)
summary(lm(y~x1+x2))
idx1 <- sample(c(1:n),0.2*n)
idx2 <- sample(c(1:n),0.2*n)
x1.new <- x1
x1.new[idx1] <- NA
x2.new <- x2
x2.new[idx2] <- NA
data.new <- data.frame(y,x1.new,x2.new)
model2 <- lm(y~x1.new+x2.new)
summary(model2)
md.pattern(data.new)
md.pairs(data.new)
imp <- mice(data.new,m=10)
head(complete(imp))
fit <- with(imp,lm(y~x1.new+x2.new))
summary(pool(fit))
library(mice)
imp <- mice(airquality,method="mean",m=1,maxit=1)
fit <- lm(Ozone~Solar.R,data=airquality)
pred <- predict(fit,newdata=ic(airquality))
imp <- mice(airquality,seed=1,print=FALSE)
fit <- with(imp,lm(Ozone~Wind+Temp+Solar.R))
tab <- summary(pool(fit))
tab
logit_trans <- function(x){exp(x)/(1+exp(x))}
library(MASS)
n <- 1000
y <- mvrnorm(n,mu=c(0,0),Sigma= matrix(c(1,0.5,0.5,1),2,2))
y1 <- y[,1]
y2 <- y[,2]
alpha1 <- 0
alpha2 <- 1
alpha3 <- 0
p <- alpha1+logit_trans(y1)*alpha2+logit_trans(y2)*alpha3
r <- 1- rbinom(n,1,p)
idx <- which(r==1)
y1_new <- y1
y2_new <- y2[idx]
boxplot(y1,y2)
boxplot(y1_new,y2_new)
| /stractch/test_mice.R | no_license | andrewhaoyu/breast_cancer_data_analysis | R | false | false | 1,438 | r | #goal: test mice
library(mice)
library(VIM)
head(nhanes)
md.pattern(nhanes)
p <- md.pairs(nhanes)
p
pbox(nhanes,pos=1,int=F,cex=0.7)
imp <- mice(nhanes)
imp$imp$chl
head(complete(imp))
head(complete(imp,2))
summary(lm(chl~age+hyp,data=nhanes))
confint(lm(chl~age+hyp,data=nhanes))
fit <- with(imp,lm(chl~age+hyp))
summary(pool(fit))
n <- 100
x1 <- rnorm(n)
x2 <- 0.5*x1+rnorm(n)
cor(x1,x2)
y <- 1*x1+0.5*x2+rnorm(n)
summary(lm(y~x1+x2))
idx1 <- sample(c(1:n),0.2*n)
idx2 <- sample(c(1:n),0.2*n)
x1.new <- x1
x1.new[idx1] <- NA
x2.new <- x2
x2.new[idx2] <- NA
data.new <- data.frame(y,x1.new,x2.new)
model2 <- lm(y~x1.new+x2.new)
summary(model2)
md.pattern(data.new)
md.pairs(data.new)
imp <- mice(data.new,m=10)
head(complete(imp))
fit <- with(imp,lm(y~x1.new+x2.new))
summary(pool(fit))
library(mice)
imp <- mice(airquality,method="mean",m=1,maxit=1)
fit <- lm(Ozone~Solar.R,data=airquality)
pred <- predict(fit,newdata=ic(airquality))
imp <- mice(airquality,seed=1,print=FALSE)
fit <- with(imp,lm(Ozone~Wind+Temp+Solar.R))
tab <- summary(pool(fit))
tab
logit_trans <- function(x){exp(x)/(1+exp(x))}
library(MASS)
n <- 1000
y <- mvrnorm(n,mu=c(0,0),Sigma= matrix(c(1,0.5,0.5,1),2,2))
y1 <- y[,1]
y2 <- y[,2]
alpha1 <- 0
alpha2 <- 1
alpha3 <- 0
p <- alpha1+logit_trans(y1)*alpha2+logit_trans(y2)*alpha3
r <- 1- rbinom(n,1,p)
idx <- which(r==1)
y1_new <- y1
y2_new <- y2[idx]
boxplot(y1,y2)
boxplot(y1_new,y2_new)
|
library(plyr)
library(ggplot2)
library(cluster)
library(lattice)
library(graphics)
library(grid)
library(gridExtra)
gd=read.csv(file.choose(),header = T,sep=",")
head(gd)
names(gd)
pd=as.matrix(gd[,c("Donors","A.positive","B.positive","O.positive","A.negative","AB.negative","B.positive","O.positive","AB.positive","Latitude","Longitude")])
od=pd[,10:11]
od[1:10,]
wss<-numeric(15)
for(k in 1:15)
wss[k]<-sum(kmeans(od,centers = k,nstart = 25)$withinss)
plot(1:15,wss,type = "b",xlab="number of clusters",ylab = "wss")
km=kmeans(od,4,nstart = 25)
km
c(wss[3],sum(km$withinss))
df=as.data.frame(pd[,10:11])
df
df$cluster=factor(km$cluster)
centers=as.data.frame(km$centers)
plot1=ggplot(data=df,aes(x=Latitude,y=Longitude,color=cluster))+geom_point()+geom_point(data=centers,aes(x=Latitude,y=Longitude,color=as.factor(c(1,2,3,4))),size=10,alpha=.3)
plot1
print("The no of donation centers in cluster 1 is")
(km$size[1])
print("The no of donation centers in cluster 2 is")
(km$size[2])
print("The no of donation centers in cluster 3 is")
(km$size[3])
print("The no of donation centers in cluster 4 is")
(km$size[4])
write.csv(km$centers,file="F:/Resultant Donor Camp Centers.csv")
| /Donor Analysis using R/module2.r | no_license | Vigneshwar-Ram/R | R | false | false | 1,262 | r | library(plyr)
library(ggplot2)
library(cluster)
library(lattice)
library(graphics)
library(grid)
library(gridExtra)
gd=read.csv(file.choose(),header = T,sep=",")
head(gd)
names(gd)
pd=as.matrix(gd[,c("Donors","A.positive","B.positive","O.positive","A.negative","AB.negative","B.positive","O.positive","AB.positive","Latitude","Longitude")])
od=pd[,10:11]
od[1:10,]
wss<-numeric(15)
for(k in 1:15)
wss[k]<-sum(kmeans(od,centers = k,nstart = 25)$withinss)
plot(1:15,wss,type = "b",xlab="number of clusters",ylab = "wss")
km=kmeans(od,4,nstart = 25)
km
c(wss[3],sum(km$withinss))
df=as.data.frame(pd[,10:11])
df
df$cluster=factor(km$cluster)
centers=as.data.frame(km$centers)
plot1=ggplot(data=df,aes(x=Latitude,y=Longitude,color=cluster))+geom_point()+geom_point(data=centers,aes(x=Latitude,y=Longitude,color=as.factor(c(1,2,3,4))),size=10,alpha=.3)
plot1
print("The no of donation centers in cluster 1 is")
(km$size[1])
print("The no of donation centers in cluster 2 is")
(km$size[2])
print("The no of donation centers in cluster 3 is")
(km$size[3])
print("The no of donation centers in cluster 4 is")
(km$size[4])
write.csv(km$centers,file="F:/Resultant Donor Camp Centers.csv")
|
library(potts)
set.seed(42)
ncolor <- as.integer(4)
alpha <- rnorm(ncolor) * 0.01
beta <- log(1 + sqrt(ncolor))
theta <- c(alpha, beta)
nrow <- 25
ncol <- 20
x <- matrix(1, nrow = nrow, ncol = ncol)
foo <- packPotts(x, ncolor)
outfun <- function(tt) {
qux <- outer(tt, tt)
c(tt, qux[lower.tri(qux, diag = TRUE)])
}
outfun(c(485, 2, 9, 4, 954))
out <- potts(foo, theta, nbatch = 5, blen = 3, nspac = 2, debug = TRUE,
outfun = outfun)
names(out)
niter <- out$nbatch * out$blen * out$nspac
.Random.seed <- out$initial.seed
out.too <- potts(foo, theta, nbatch = niter)
tt <- out.too$batch
ttaug <- t(apply(tt, 1, outfun))
identical(tt, ttaug[ , 1:ncol(tt)])
nout <- ncol(ttaug)
ncol(out$batch) == nout
##### check batch means #####
foo <- ttaug[seq(1, niter) %% out$nspac == 0, ]
foo <- array(as.vector(foo), c(out$blen, out$nbatch, nout))
foo <- apply(foo, c(2, 3), mean)
identical(foo, out$batch)
| /package/potts/tests/outfun.R | no_license | cjgeyer/potts | R | false | false | 951 | r |
library(potts)
set.seed(42)
ncolor <- as.integer(4)
alpha <- rnorm(ncolor) * 0.01
beta <- log(1 + sqrt(ncolor))
theta <- c(alpha, beta)
nrow <- 25
ncol <- 20
x <- matrix(1, nrow = nrow, ncol = ncol)
foo <- packPotts(x, ncolor)
outfun <- function(tt) {
qux <- outer(tt, tt)
c(tt, qux[lower.tri(qux, diag = TRUE)])
}
outfun(c(485, 2, 9, 4, 954))
out <- potts(foo, theta, nbatch = 5, blen = 3, nspac = 2, debug = TRUE,
outfun = outfun)
names(out)
niter <- out$nbatch * out$blen * out$nspac
.Random.seed <- out$initial.seed
out.too <- potts(foo, theta, nbatch = niter)
tt <- out.too$batch
ttaug <- t(apply(tt, 1, outfun))
identical(tt, ttaug[ , 1:ncol(tt)])
nout <- ncol(ttaug)
ncol(out$batch) == nout
##### check batch means #####
foo <- ttaug[seq(1, niter) %% out$nspac == 0, ]
foo <- array(as.vector(foo), c(out$blen, out$nbatch, nout))
foo <- apply(foo, c(2, 3), mean)
identical(foo, out$batch)
|
#################################
##### PROVA CON 1 LIVELLO ######
#################################
rm(list=ls())
load("/Users/giorgioarcara/Documents/Lavori Unipd/Progetto Mass Count/ERP qualche/Exp numerosity 2 ERP analysis/ExpNumerosity R analysis/R Data/Exp2Second.RData")
source("/Users/giorgioarcara/Documents/R_code_Giorgio/unsorted_code/erplist2long.R")
library(erpR)
data(ERPsets)
datall_word = erplist2long("Exp1_word_subj", 1:20, erplist = ERPsets, startmsec = -200, endmsec=1500, name.dep="Ampl")
datall_word$type="word"
datall_nonword = erplist2long("Exp1_nonword_subj", 1:20, erplist = ERPsets, startmsec = -200, endmsec=1500, name.dep="Ampl")
datall_nonword$type="nonword"
datall=rbind(datall_word, datall_nonword)
#https://github.com/craddm/EEG_Workshop/blob/master/exercises/Exercise_4_mass_univariate.Rmd
# nest the dataset by timepoints
library(tidyverse)
library(broom)
time_el_nest = nest(datall, -timepoints, -electrode)
tic()
time_el_nest <- mutate(time_el_nest,
stats = map(data, ~aov(Ampl ~ type+Error(Subject/type),
data = .x)))
toc()
# NOTE: approximately the same time of apply.
#################################
##### PROVA CON 2 LIVELLI ######
#################################
load("/Users/giorgioarcara/Documents/Lavori Unipd/Progetto Mass Count/ERP qualche/Exp numerosity 2 ERP analysis/ExpNumerosity R analysis/R Data/Exp2Second.RData")
library(erpR)
library(tictoc)
library(tidyverse)
library(broom)
source("/Users/giorgioarcara/Documents/R_code_Giorgio/unsorted_code/erplist2long.R")
source("/Users/giorgioarcara/Documents/R_code_Giorgio/unsorted_code/erplist2long.R")
Subjects= c(1:4, 6:27)
my_startmsec = -500
my_endmsec = 1500
dat_m_alcuni_sg = erplist2long("m_alcuni_sg_", Subjects, erplist = Exp2Second, startmsec = my_startmsec, endmsec=my_endmsec, name.dep="Ampl")
dat_m_alcuni_sg$type="alcuni"
dat_m_alcuni_sg$singpl="sg"
dat_m_alcuni_pl = erplist2long("m_alcuni_pl_", Subjects, erplist = Exp2Second, startmsec = my_startmsec, endmsec=my_endmsec, name.dep="Ampl")
dat_m_alcuni_pl$type="alcuni"
dat_m_alcuni_pl$singpl="pl"
dat_m_un_sg = erplist2long("m_un_sg_", Subjects, erplist = Exp2Second, startmsec = my_startmsec, endmsec=my_endmsec, name.dep="Ampl")
dat_m_un_sg$type="un"
dat_m_un_sg$singpl="sg"
dat_m_un_pl = erplist2long("m_un_pl_", Subjects, erplist = Exp2Second, startmsec = my_startmsec, endmsec=my_endmsec, name.dep="Ampl")
dat_m_un_pl$type="un"
dat_m_un_pl$singpl="pl"
datall = rbind(dat_m_alcuni_sg, dat_m_alcuni_pl, dat_m_un_sg, dat_m_un_pl)
datall$type=factor(datall$type)
datall$singpl=factor(datall$singpl)
time_el_nest = nest(datall, -timepoints, -electrode)
tic()
time_el_nest <- mutate(time_el_nest,
stats = map(data, ~summary(aov(Ampl ~ type*singpl+Error(Subject/(type*singpl)),
data = .x))))
toc()
####################
### TO UNLIST ######
####################
time_el_nest <- mutate (time_el_nest, res = map(stats, unlist))
pvals = map_dbl(time_el_nest$res, "Error: Subject:type.Pr(>F)1")
| /unsorted_code/Prove_Mass.ANOVA.test_tidyverse.R | permissive | giorgioarcara/R-code-Misc | R | false | false | 3,135 | r | #################################
##### PROVA CON 1 LIVELLO ######
#################################
rm(list=ls())
load("/Users/giorgioarcara/Documents/Lavori Unipd/Progetto Mass Count/ERP qualche/Exp numerosity 2 ERP analysis/ExpNumerosity R analysis/R Data/Exp2Second.RData")
source("/Users/giorgioarcara/Documents/R_code_Giorgio/unsorted_code/erplist2long.R")
library(erpR)
data(ERPsets)
datall_word = erplist2long("Exp1_word_subj", 1:20, erplist = ERPsets, startmsec = -200, endmsec=1500, name.dep="Ampl")
datall_word$type="word"
datall_nonword = erplist2long("Exp1_nonword_subj", 1:20, erplist = ERPsets, startmsec = -200, endmsec=1500, name.dep="Ampl")
datall_nonword$type="nonword"
datall=rbind(datall_word, datall_nonword)
#https://github.com/craddm/EEG_Workshop/blob/master/exercises/Exercise_4_mass_univariate.Rmd
# nest the dataset by timepoints
library(tidyverse)
library(broom)
time_el_nest = nest(datall, -timepoints, -electrode)
tic()
time_el_nest <- mutate(time_el_nest,
stats = map(data, ~aov(Ampl ~ type+Error(Subject/type),
data = .x)))
toc()
# NOTE: approximately the same time of apply.
#################################
##### PROVA CON 2 LIVELLI ######
#################################
load("/Users/giorgioarcara/Documents/Lavori Unipd/Progetto Mass Count/ERP qualche/Exp numerosity 2 ERP analysis/ExpNumerosity R analysis/R Data/Exp2Second.RData")
library(erpR)
library(tictoc)
library(tidyverse)
library(broom)
source("/Users/giorgioarcara/Documents/R_code_Giorgio/unsorted_code/erplist2long.R")
source("/Users/giorgioarcara/Documents/R_code_Giorgio/unsorted_code/erplist2long.R")
Subjects= c(1:4, 6:27)
my_startmsec = -500
my_endmsec = 1500
dat_m_alcuni_sg = erplist2long("m_alcuni_sg_", Subjects, erplist = Exp2Second, startmsec = my_startmsec, endmsec=my_endmsec, name.dep="Ampl")
dat_m_alcuni_sg$type="alcuni"
dat_m_alcuni_sg$singpl="sg"
dat_m_alcuni_pl = erplist2long("m_alcuni_pl_", Subjects, erplist = Exp2Second, startmsec = my_startmsec, endmsec=my_endmsec, name.dep="Ampl")
dat_m_alcuni_pl$type="alcuni"
dat_m_alcuni_pl$singpl="pl"
dat_m_un_sg = erplist2long("m_un_sg_", Subjects, erplist = Exp2Second, startmsec = my_startmsec, endmsec=my_endmsec, name.dep="Ampl")
dat_m_un_sg$type="un"
dat_m_un_sg$singpl="sg"
dat_m_un_pl = erplist2long("m_un_pl_", Subjects, erplist = Exp2Second, startmsec = my_startmsec, endmsec=my_endmsec, name.dep="Ampl")
dat_m_un_pl$type="un"
dat_m_un_pl$singpl="pl"
datall = rbind(dat_m_alcuni_sg, dat_m_alcuni_pl, dat_m_un_sg, dat_m_un_pl)
datall$type=factor(datall$type)
datall$singpl=factor(datall$singpl)
time_el_nest = nest(datall, -timepoints, -electrode)
tic()
time_el_nest <- mutate(time_el_nest,
stats = map(data, ~summary(aov(Ampl ~ type*singpl+Error(Subject/(type*singpl)),
data = .x))))
toc()
####################
### TO UNLIST ######
####################
time_el_nest <- mutate (time_el_nest, res = map(stats, unlist))
pvals = map_dbl(time_el_nest$res, "Error: Subject:type.Pr(>F)1")
|
# wk5, Ex2
library(tidyverse)
read_csv("a,b\n1,2,3\n4,5,6")
read_csv("a,b,c\n1,2\n1,2,3,4")
read_csv("a,b\n\"1") # The open quote \" is dropped because there is no paired close quote. There is only one value in the second row, so NA is coerced in the second column.
read_csv("a,b\n1,2\na,b")
read_csv("a;b\n1;3")
| /Tutorial - Lab/week5_ex_solutions/import_data_wk5_ex2_solution.R | no_license | tuananh8497/IDS_FOLDER | R | false | false | 313 | r | # wk5, Ex2
library(tidyverse)
read_csv("a,b\n1,2,3\n4,5,6")
read_csv("a,b,c\n1,2\n1,2,3,4")
read_csv("a,b\n\"1") # The open quote \" is dropped because there is no paired close quote. There is only one value in the second row, so NA is coerced in the second column.
read_csv("a,b\n1,2\na,b")
read_csv("a;b\n1;3")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IMPACT.R
\name{IMPACT.get_annotations}
\alias{IMPACT.get_annotations}
\title{Download \emph{IMPACT} annotations}
\usage{
IMPACT.get_annotations(
baseURL = "https://github.com/immunogenomics/IMPACT/raw/master/IMPACT707/Annotations",
chrom = NULL,
subset_DT = NULL,
nThread = 4,
all_snps_in_range = F,
verbose = T
)
}
\description{
Includes the raw annotation itself,
as well as per-SNP \emph{IMPACT} scores for each annotation.
}
\details{
Unfortunately, you have to download the entire chromosome file at once,
because they aren't Tabix indexed. To minimize the memory load,
this function only keeps the portion of the \emph{IMPACT} file that overlaps with the
coordinates in \code{subset_DT}.
}
\examples{
data("BST1")
annot_melt <- IMPACT.get_annotations(subset_DT=BST1)
}
\seealso{
Other IMPACT:
\code{\link{IMPACT.get_annotation_key}()},
\code{\link{IMPACT.get_top_annotations}()},
\code{\link{IMPACT.iterate_get_annotations}()},
\code{\link{IMPACT.postprocess_annotations}()},
\code{\link{IMPACT.snp_group_boxplot}()},
\code{\link{IMPACT_annotation_key}},
\code{\link{IMPACT_heatmap}()},
\code{\link{prepare_mat_meta}()}
}
\concept{IMPACT}
\keyword{internal}
| /man/IMPACT.get_annotations.Rd | permissive | maegsul/echolocatoR | R | false | true | 1,258 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IMPACT.R
\name{IMPACT.get_annotations}
\alias{IMPACT.get_annotations}
\title{Download \emph{IMPACT} annotations}
\usage{
IMPACT.get_annotations(
baseURL = "https://github.com/immunogenomics/IMPACT/raw/master/IMPACT707/Annotations",
chrom = NULL,
subset_DT = NULL,
nThread = 4,
all_snps_in_range = F,
verbose = T
)
}
\description{
Includes the raw annotation itself,
as well as per-SNP \emph{IMPACT} scores for each annotation.
}
\details{
Unfortunately, you have to download the entire chromosome file at once,
because they aren't Tabix indexed. To minimize the memory load,
this function only keeps the portion of the \emph{IMPACT} file that overlaps with the
coordinates in \code{subset_DT}.
}
\examples{
data("BST1")
annot_melt <- IMPACT.get_annotations(subset_DT=BST1)
}
\seealso{
Other IMPACT:
\code{\link{IMPACT.get_annotation_key}()},
\code{\link{IMPACT.get_top_annotations}()},
\code{\link{IMPACT.iterate_get_annotations}()},
\code{\link{IMPACT.postprocess_annotations}()},
\code{\link{IMPACT.snp_group_boxplot}()},
\code{\link{IMPACT_annotation_key}},
\code{\link{IMPACT_heatmap}()},
\code{\link{prepare_mat_meta}()}
}
\concept{IMPACT}
\keyword{internal}
|
setup({
sc <- testthat_spark_connection()
text_tbl <- testthat_tbl("test_text")
# These lines should set a pipeline that will ultimately create the columns needed for testing the annotator
assembler <- nlp_document_assembler(sc, input_col = "text", output_col = "document")
sentdetect <- nlp_sentence_detector(sc, input_cols = c("document"), output_col = "sentence")
tokenizer <- nlp_tokenizer(sc, input_cols = c("sentence"), output_col = "token")
pipeline <- ml_pipeline(assembler, sentdetect, tokenizer)
test_data <- ml_fit_and_transform(pipeline, text_tbl)
assign("sc", sc, envir = parent.frame())
assign("pipeline", pipeline, envir = parent.frame())
assign("test_data", test_data, envir = parent.frame())
})
teardown({
rm(sc, envir = .GlobalEnv)
rm(pipeline, envir = .GlobalEnv)
rm(test_data, envir = .GlobalEnv)
})
# test_that("word_embeddings param setting", {
# test_args <- list(
# input_cols = c("string1", "string2"),
# output_col = "string1",
# storage_path = "/tmp/embeddings",
# storage_path_format = "TEXT",
# dimension = 300,
# storage_ref = "string1",
# lazy_annotator = FALSE,
# read_cache_size = 1000,
# write_buffer_size = 1000,
# include_storage = FALSE,
# case_sensitive = TRUE
# )
#
# test_param_setting(sc, nlp_word_embeddings, test_args)
# })
test_that("nlp_word_embeddings spark_connection", {
test_annotator <- nlp_word_embeddings(sc, input_cols = c("document", "token"), output_col = "word_embeddings",
storage_path = here::here("tests", "testthat", "data", "random_embeddings_dim4.txt"),
storage_path_format = "TEXT", dimension = 4)
fit_model <- ml_fit(test_annotator, test_data)
transformed_data <- ml_transform(fit_model, test_data)
expect_true("word_embeddings" %in% colnames(transformed_data))
})
test_that("nlp_word_embeddings ml_pipeline", {
test_annotator <- nlp_word_embeddings(pipeline, input_cols = c("document", "token"), output_col = "word_embeddings",
storage_path = here::here("tests", "testthat", "data", "random_embeddings_dim4.txt"),
storage_path_format = "TEXT", dimension = 4)
transformed_data <- ml_fit_and_transform(test_annotator, test_data)
expect_true("word_embeddings" %in% colnames(transformed_data))
})
test_that("nlp_word_embeddings tbl_spark", {
fit_model <- nlp_word_embeddings(test_data, input_cols = c("document", "token"), output_col = "word_embeddings",
storage_path = here::here("tests", "testthat", "data", "random_embeddings_dim4.txt"),
storage_path_format = "TEXT", dimension = 4)
transformed_data <- ml_transform(fit_model, test_data)
expect_true("word_embeddings" %in% colnames(transformed_data))
})
test_that("nlp_word_embeddings pretrained model", {
model <- nlp_word_embeddings_pretrained(sc, input_cols = c("document", "token"), output_col = "word_embeddings")
pipeline <- ml_add_stage(pipeline, model)
transformed_data <- ml_fit_and_transform(pipeline, test_data)
expect_true("word_embeddings" %in% colnames(transformed_data))
})
test_that("nlp_word_embeddings_model", {
assembler <- nlp_document_assembler(sc, input_col = "text", output_col = "document")
sentdetect <- nlp_sentence_detector(sc, input_cols = c("document"), output_col = "sentence")
tokenizer <- nlp_tokenizer(sc, input_cols = c("sentence"), output_col = "token")
embeddings_path <- here::here("tests", "testthat", "data", "random_embeddings_dim4.txt")
#embeddings_helper <- nlp_load_embeddings(sc, path = embeddings_path, format = "TEXT", dims = 4, reference = "embeddings_ref")
embeddings <- nlp_word_embeddings_model(sc, input_cols = c("sentence", "token"), output_col = "embeddings",
dimension = 4)
emb_pipeline <- ml_pipeline(assembler, sentdetect, tokenizer, embeddings)
transformed_data <- ml_fit_and_transform(emb_pipeline, test_data)
expect_true("embeddings" %in% colnames(transformed_data))
})
| /tests/testthat/testthat-word-embeddings.R | permissive | davigood1/sparknlp | R | false | false | 4,153 | r | setup({
sc <- testthat_spark_connection()
text_tbl <- testthat_tbl("test_text")
# These lines should set a pipeline that will ultimately create the columns needed for testing the annotator
assembler <- nlp_document_assembler(sc, input_col = "text", output_col = "document")
sentdetect <- nlp_sentence_detector(sc, input_cols = c("document"), output_col = "sentence")
tokenizer <- nlp_tokenizer(sc, input_cols = c("sentence"), output_col = "token")
pipeline <- ml_pipeline(assembler, sentdetect, tokenizer)
test_data <- ml_fit_and_transform(pipeline, text_tbl)
assign("sc", sc, envir = parent.frame())
assign("pipeline", pipeline, envir = parent.frame())
assign("test_data", test_data, envir = parent.frame())
})
teardown({
rm(sc, envir = .GlobalEnv)
rm(pipeline, envir = .GlobalEnv)
rm(test_data, envir = .GlobalEnv)
})
# test_that("word_embeddings param setting", {
# test_args <- list(
# input_cols = c("string1", "string2"),
# output_col = "string1",
# storage_path = "/tmp/embeddings",
# storage_path_format = "TEXT",
# dimension = 300,
# storage_ref = "string1",
# lazy_annotator = FALSE,
# read_cache_size = 1000,
# write_buffer_size = 1000,
# include_storage = FALSE,
# case_sensitive = TRUE
# )
#
# test_param_setting(sc, nlp_word_embeddings, test_args)
# })
test_that("nlp_word_embeddings spark_connection", {
test_annotator <- nlp_word_embeddings(sc, input_cols = c("document", "token"), output_col = "word_embeddings",
storage_path = here::here("tests", "testthat", "data", "random_embeddings_dim4.txt"),
storage_path_format = "TEXT", dimension = 4)
fit_model <- ml_fit(test_annotator, test_data)
transformed_data <- ml_transform(fit_model, test_data)
expect_true("word_embeddings" %in% colnames(transformed_data))
})
test_that("nlp_word_embeddings ml_pipeline", {
test_annotator <- nlp_word_embeddings(pipeline, input_cols = c("document", "token"), output_col = "word_embeddings",
storage_path = here::here("tests", "testthat", "data", "random_embeddings_dim4.txt"),
storage_path_format = "TEXT", dimension = 4)
transformed_data <- ml_fit_and_transform(test_annotator, test_data)
expect_true("word_embeddings" %in% colnames(transformed_data))
})
test_that("nlp_word_embeddings tbl_spark", {
fit_model <- nlp_word_embeddings(test_data, input_cols = c("document", "token"), output_col = "word_embeddings",
storage_path = here::here("tests", "testthat", "data", "random_embeddings_dim4.txt"),
storage_path_format = "TEXT", dimension = 4)
transformed_data <- ml_transform(fit_model, test_data)
expect_true("word_embeddings" %in% colnames(transformed_data))
})
test_that("nlp_word_embeddings pretrained model", {
model <- nlp_word_embeddings_pretrained(sc, input_cols = c("document", "token"), output_col = "word_embeddings")
pipeline <- ml_add_stage(pipeline, model)
transformed_data <- ml_fit_and_transform(pipeline, test_data)
expect_true("word_embeddings" %in% colnames(transformed_data))
})
test_that("nlp_word_embeddings_model", {
assembler <- nlp_document_assembler(sc, input_col = "text", output_col = "document")
sentdetect <- nlp_sentence_detector(sc, input_cols = c("document"), output_col = "sentence")
tokenizer <- nlp_tokenizer(sc, input_cols = c("sentence"), output_col = "token")
embeddings_path <- here::here("tests", "testthat", "data", "random_embeddings_dim4.txt")
#embeddings_helper <- nlp_load_embeddings(sc, path = embeddings_path, format = "TEXT", dims = 4, reference = "embeddings_ref")
embeddings <- nlp_word_embeddings_model(sc, input_cols = c("sentence", "token"), output_col = "embeddings",
dimension = 4)
emb_pipeline <- ml_pipeline(assembler, sentdetect, tokenizer, embeddings)
transformed_data <- ml_fit_and_transform(emb_pipeline, test_data)
expect_true("embeddings" %in% colnames(transformed_data))
})
|
\name{data.4.toy.network}
\alias{data.4.toy.network}
\docType{data}
\title{List containing the results from the application of the function \code{data.generation} for a toy dataset based on the \code{breastCancerVDX} dataset}
\description{
This dataset contains a list with the results from the application of the function \code{data.generation} for a toy dataset based on the \code{breastCancerVDX} dataset. It is based on the Molecular Functions ontology from GO by the MSigBD. Only MF with at least 5 genes were considered. See the package Vignette for a detailed usage.}
\usage{data(data.4.toy.network)
}
\format{
A list containing the information in the following way:
\itemize{
\item \code{gene.data} A matrix with gene expression data. Rows are genes (HUGO ids) and columns are samples.
\item \code{set.data} A matrix with expression data for the gene sets. Rows are gene sets and columns are samples.
\item \code{affy.loc} A vector that contains the row numbers corresponding to the genes that are reported in the annotation system chosen
\item \code{gene2set.mat} A matrix that where each row represents a gene and each column contains the gene set to which this gene is reported based on the annotation system chosen.
\item \code{set2gene.mat} A matrix that where each row represents a gene set and each column contains the genes reported to be in the set based on the annotation system chosen.
\item \code{Set.obj} A vector with the gene sets set as objective nodes in the network.
\item \code{Set.src} A vector with the gene sets set as source nodes in the network.
\item \code{G.obj} A vector with the genes set as objective nodes in the network.
\item \code{G.src} A vector with the genes set as source nodes in the network.
}
}
\details{
This data set is calculated for the particular case were it is of interest to obtain the Functional Network for a toy dataset based on the \code{breastCancerVDX} dataset and the annotation system chosen is the MF from GO. The phenotype chose are the samples with Estrogen receptor negative (ER-).
}
%\itemize{
%}
%\source{
%url{http://www.broadinstitute.org/gsea/msigdb/collections.jsp#C5}
%}
\references{
Quiroz-Zarate A, Haibe-Kains, B and Quackenbush J (2013). "Manuscript in preparation".
}
\author{
A. Quiroz-Zarate.
\email{aquiroz@jimmy.harvard.edu}
}
\examples{
# See Package Vignette for a detailed example of the usage of this dataset
# vigenette("FunctionalNetworks")
}
\keyword{gmt file}
\keyword{Network}
\keyword{Gene expression data} | /man/data.4.toy.network.Rd | no_license | cran/FunctionalNetworks | R | false | false | 2,539 | rd | \name{data.4.toy.network}
\alias{data.4.toy.network}
\docType{data}
\title{List containing the results from the application of the function \code{data.generation} for a toy dataset based on the \code{breastCancerVDX} dataset}
\description{
This dataset contains a list with the results from the application of the function \code{data.generation} for a toy dataset based on the \code{breastCancerVDX} dataset. It is based on the Molecular Functions ontology from GO by the MSigBD. Only MF with at least 5 genes were considered. See the package Vignette for a detailed usage.}
\usage{data(data.4.toy.network)
}
\format{
A list containing the information in the following way:
\itemize{
\item \code{gene.data} A matrix with gene expression data. Rows are genes (HUGO ids) and columns are samples.
\item \code{set.data} A matrix with expression data for the gene sets. Rows are gene sets and columns are samples.
\item \code{affy.loc} A vector that contains the row numbers corresponding to the genes that are reported in the annotation system chosen
\item \code{gene2set.mat} A matrix that where each row represents a gene and each column contains the gene set to which this gene is reported based on the annotation system chosen.
\item \code{set2gene.mat} A matrix that where each row represents a gene set and each column contains the genes reported to be in the set based on the annotation system chosen.
\item \code{Set.obj} A vector with the gene sets set as objective nodes in the network.
\item \code{Set.src} A vector with the gene sets set as source nodes in the network.
\item \code{G.obj} A vector with the genes set as objective nodes in the network.
\item \code{G.src} A vector with the genes set as source nodes in the network.
}
}
\details{
This data set is calculated for the particular case were it is of interest to obtain the Functional Network for a toy dataset based on the \code{breastCancerVDX} dataset and the annotation system chosen is the MF from GO. The phenotype chose are the samples with Estrogen receptor negative (ER-).
}
%\itemize{
%}
%\source{
%url{http://www.broadinstitute.org/gsea/msigdb/collections.jsp#C5}
%}
\references{
Quiroz-Zarate A, Haibe-Kains, B and Quackenbush J (2013). "Manuscript in preparation".
}
\author{
A. Quiroz-Zarate.
\email{aquiroz@jimmy.harvard.edu}
}
\examples{
# See Package Vignette for a detailed example of the usage of this dataset
# vigenette("FunctionalNetworks")
}
\keyword{gmt file}
\keyword{Network}
\keyword{Gene expression data} |
#### Plant macrofossils ####
# Tower Lake as a test core
library("neotoma")
library("vegan")
datasets <- get_dataset(9738)
core_data <- get_download(datasets)
i=1
macro1chron <- core_data[[i]]$sample.meta
counts <- core_data[[i]]$counts
macro1counts <- counts[match(rownames(counts), macro1chron$sample.id), ]
macro1all <- cbind(macro1chron, macro1counts)
i=2
pollen1chron <- core_data[[i]]$sample.meta
counts <- core_data[[i]]$counts
pollen1counts <- counts[match(rownames(counts), pollen1chron$sample.id), ]
pollen1all <- cbind(pollen1chron, pollen1counts)
i=3
macro2chron <- core_data[[i]]$sample.meta
counts <- core_data[[i]]$counts
macro2counts <- counts[match(rownames(counts), macro2chron$sample.id), ]
macro2all <- cbind(macro2chron, macro2counts)
i=4
pollen2chron <- core_data[[i]]$sample.meta
counts <- core_data[[i]]$counts
pollen2counts <- counts[match(rownames(counts), pollen2chron$sample.id), ]
pollen2all <- cbind(pollen2chron, pollen2counts)
i=5
macro3chron <- core_data[[i]]$sample.meta
counts <- core_data[[i]]$counts
macro3counts <- counts[match(rownames(counts), macro3chron$sample.id), ]
macro3all <- cbind(macro3chron, macro3counts)
i=6
pollen3chron <- core_data[[i]]$sample.meta
counts <- core_data[[i]]$counts
pollen3counts <- counts[match(rownames(counts), pollen3chron$sample.id), ]
pollen3all <- cbind(pollen3chron, pollen3counts)
# check datasets
range(macro1chron$age)
range(pollen1chron$age)
range(macro2chron$age)
range(pollen2chron$age)
range(macro3chron$age)
range(pollen3chron$age)
# macro1
# pollen1
# macro2
# pollen2
# macro3
# pollen3
#
# order is: 2, 3, 1, from youngest to oldest
# track changes in diversity through time. keep track of the number of additions, number of extirpations, and standing richness, for each time step
# From Brown et al. 2001: "We measured colonization by counting the number of
# taxa present in one time step that were not present in the previous time
# step. We determined extinction by counting the number of taxa that were
# present in one time step that were absent in the subsequent time step."
# --> This will be referred to in this code as actual colonization and extinctions
# --> macro1div$adds, macro1div$exts
# "Therefore, we plotted the actual time series of taxonomic richness
# --> e.g., macro1div$richness)
# as well as the cumulative colonizations (the number of species that would have
# accumulated if no extinctions had offset the observed colonizations), and the
# cumulative extinctions (the number of species that would have remained if no
# colonizations had offset the observed ex- tinctions). In calculating the
# cumulative colonization and extinction curves, only a single event of
# colonization or extinction, respectively, was permitted for each species. It
# frequently happened in the real data sets that a taxon went extinct and
# subsequently recolonized or vice versa, but in these analyses we counted only
# the first colonization or the first extinction event for each taxon."
# --> this will be referred to as cumulative colonizations and extinctions
# calculate actual colonization and extinction, as well as standing richness ####
macro1div <- matrix(ncol=3, nrow=nrow(macro1all))
colnames(macro1div) <- c("adds", "exts", "richness")
rownames(macro1div)<- macro1all$depth
macro1div <- as.data.frame(macro1div)
macro1pa <- decostand(macro1counts, method="pa")
macro1div[, 'richness'] <- rowSums(macro1pa)
j <- 1
macro1div[j, 'adds'] <- macro1div[j, 'exts'] <- 0
for (j in 2:nrow(macro1pa)){
balance <- macro1pa[j-1,] - macro1pa[j,]
#additions (time runs forward)
if (length(which(balance == 1))>0){
macro1div[j, 'adds'] <- length(which(balance == 1))
}else{
macro1div[j, 'adds'] <- 0
}
if (length(which(balance == -1))>0){
macro1div[j, 'exts'] <- length(which(balance == -1))
}else{
macro1div[j, 'exts'] <- 0
}
}
actualcol <- vector(mode="numeric", length=nrow(macro1div))
actualext <- vector(mode="numeric", length=nrow(macro1div))
startingpoint <- macro1div$richness[length(macro1div$richness)]
actualcol[nrow(macro1div)]<- startingpoint
actualext[nrow(macro1div)]<- startingpoint
for (i in (nrow(macro1div)-1):1){
actualcol[i] <- actualcol[i+1] + macro1div[i+1,'adds']
actualext[i] <- actualext[i+1] - macro1div[i+1,'exts']
}
# plot actual numbers
plot(macro1div$richness ~ macro1chron$age, xlim=c(10000, 0), ylim=c(-190, 190), pch=16, type="b", col="black")
points(actualcol ~ macro1chron$age, pch=15, type="b", col="red")
points(actualext ~ macro1chron$age, pch=17, type="b", col="blue")
# calculate cumulative colonizations and extinctions ####
macro1pacumul <- matrix(data=0, nrow=nrow(macro1pa), ncol=ncol(macro1pa))
# +1 = extinctions (e.g., 1 (present) - 0 (absent))
# -1 = colonization (e.g., 0 (absent) - 1 (present))
# calculate col and ext for whole matrix, then take first and last occurrence of +1 and -1
macro1cum_temp <- macro1pa[2:nrow(macro1pa),] - macro1pa[1:nrow(macro1pa)-1,]
for (j in 1:ncol(macro1cum_temp)){
exts<- which(macro1cum_temp[,j]==1)
colons<- which(macro1cum_temp[,j]==-1)
firstext<- max(exts)
firstcol<- max(colons)
macro1pacumul[firstcol,j] <- -1
macro1pacumul[firstext,j] <- 1
}
macro1cumul <- matrix(data=0, ncol=2, nrow=nrow(macro1all))
colnames(macro1cumul) <- c("cumuladds", "cumulexts")
macro1cumul<- as.data.frame(macro1cumul)
for (j in 1:nrow(macro1pacumul)){
macro1cumul[j,"cumulexts"] <- length(which(macro1pacumul[j,] == 1))
macro1cumul[j,"cumuladds"] <- length(which(macro1pacumul[j,] == -1))
}
colonizations <- vector(mode="numeric", length=nrow(macro1cumul))
extinctions <- vector(mode="numeric", length=nrow(macro1cumul))
startingpoint <- macro1div$richness[length(macro1div$richness)]
colonizations[nrow(macro1cumul)]<- startingpoint
extinctions[nrow(macro1cumul)]<- startingpoint
for (i in (nrow(macro1cumul)-1):1){
colonizations[i] <- colonizations[i+1] + macro1cumul[i,'cumuladds']
extinctions[i] <- extinctions[i+1] - macro1cumul[i,'cumulexts']
}
# plot cumulative numbers
plot(macro1div$richness ~ macro1chron$age, xlim=c(10000, 0), ylim=c(-15, 30), pch=16, type="b", col="black")
points(colonizations ~ macro1chron$age, pch=15, type="b", col="red")
points(extinctions ~ macro1chron$age, pch=17, type="b", col="blue")
par(mfrow=c(2, 1))
# plot actual numbers
plot(macro1div$richness ~ macro1chron$age, xlim=c(10000, 0), ylim=c(-190, 190),
pch=16, type="b", col="black", main="Actual col and ext")
points(actualcol ~ macro1chron$age, pch=15, type="b", col="red")
points(actualext ~ macro1chron$age, pch=17, type="b", col="blue")
# plot cumulative numbers
plot(macro1div$richness ~ macro1chron$age, xlim=c(10000, 0), ylim=c(-15, 30),
pch=16, type="b", col="black", main="Cumulative col and ext")
points(colonizations ~ macro1chron$age, pch=15, type="b", col="red")
points(extinctions ~ macro1chron$age, pch=17, type="b", col="blue")
#### JESSICA NOTES: START HERE #### OK, this doesn't seem to add up. Perhaps for
#the cumulative plot, I need to re-do standing richness, with the assumptions
#stated (e.g., that only first col and first ext matter?)
| /code/PlantMacros.R | no_license | jessicablois/DivThruTime | R | false | false | 7,292 | r | #### Plant macrofossils ####
# Tower Lake as a test core
library("neotoma")
library("vegan")
datasets <- get_dataset(9738)
core_data <- get_download(datasets)
i=1
macro1chron <- core_data[[i]]$sample.meta
counts <- core_data[[i]]$counts
macro1counts <- counts[match(rownames(counts), macro1chron$sample.id), ]
macro1all <- cbind(macro1chron, macro1counts)
i=2
pollen1chron <- core_data[[i]]$sample.meta
counts <- core_data[[i]]$counts
pollen1counts <- counts[match(rownames(counts), pollen1chron$sample.id), ]
pollen1all <- cbind(pollen1chron, pollen1counts)
i=3
macro2chron <- core_data[[i]]$sample.meta
counts <- core_data[[i]]$counts
macro2counts <- counts[match(rownames(counts), macro2chron$sample.id), ]
macro2all <- cbind(macro2chron, macro2counts)
i=4
pollen2chron <- core_data[[i]]$sample.meta
counts <- core_data[[i]]$counts
pollen2counts <- counts[match(rownames(counts), pollen2chron$sample.id), ]
pollen2all <- cbind(pollen2chron, pollen2counts)
i=5
macro3chron <- core_data[[i]]$sample.meta
counts <- core_data[[i]]$counts
macro3counts <- counts[match(rownames(counts), macro3chron$sample.id), ]
macro3all <- cbind(macro3chron, macro3counts)
i=6
pollen3chron <- core_data[[i]]$sample.meta
counts <- core_data[[i]]$counts
pollen3counts <- counts[match(rownames(counts), pollen3chron$sample.id), ]
pollen3all <- cbind(pollen3chron, pollen3counts)
# check datasets
range(macro1chron$age)
range(pollen1chron$age)
range(macro2chron$age)
range(pollen2chron$age)
range(macro3chron$age)
range(pollen3chron$age)
# macro1
# pollen1
# macro2
# pollen2
# macro3
# pollen3
#
# order is: 2, 3, 1, from youngest to oldest
# track changes in diversity through time. keep track of the number of additions, number of extirpations, and standing richness, for each time step
# From Brown et al. 2001: "We measured colonization by counting the number of
# taxa present in one time step that were not present in the previous time
# step. We determined extinction by counting the number of taxa that were
# present in one time step that were absent in the subsequent time step."
# --> This will be referred to in this code as actual colonization and extinctions
# --> macro1div$adds, macro1div$exts
# "Therefore, we plotted the actual time series of taxonomic richness
# --> e.g., macro1div$richness)
# as well as the cumulative colonizations (the number of species that would have
# accumulated if no extinctions had offset the observed colonizations), and the
# cumulative extinctions (the number of species that would have remained if no
# colonizations had offset the observed ex- tinctions). In calculating the
# cumulative colonization and extinction curves, only a single event of
# colonization or extinction, respectively, was permitted for each species. It
# frequently happened in the real data sets that a taxon went extinct and
# subsequently recolonized or vice versa, but in these analyses we counted only
# the first colonization or the first extinction event for each taxon."
# --> this will be referred to as cumulative colonizations and extinctions
# calculate actual colonization and extinction, as well as standing richness ####
macro1div <- matrix(ncol=3, nrow=nrow(macro1all))
colnames(macro1div) <- c("adds", "exts", "richness")
rownames(macro1div)<- macro1all$depth
macro1div <- as.data.frame(macro1div)
macro1pa <- decostand(macro1counts, method="pa")
macro1div[, 'richness'] <- rowSums(macro1pa)
j <- 1
macro1div[j, 'adds'] <- macro1div[j, 'exts'] <- 0
for (j in 2:nrow(macro1pa)){
balance <- macro1pa[j-1,] - macro1pa[j,]
#additions (time runs forward)
if (length(which(balance == 1))>0){
macro1div[j, 'adds'] <- length(which(balance == 1))
}else{
macro1div[j, 'adds'] <- 0
}
if (length(which(balance == -1))>0){
macro1div[j, 'exts'] <- length(which(balance == -1))
}else{
macro1div[j, 'exts'] <- 0
}
}
actualcol <- vector(mode="numeric", length=nrow(macro1div))
actualext <- vector(mode="numeric", length=nrow(macro1div))
startingpoint <- macro1div$richness[length(macro1div$richness)]
actualcol[nrow(macro1div)]<- startingpoint
actualext[nrow(macro1div)]<- startingpoint
for (i in (nrow(macro1div)-1):1){
actualcol[i] <- actualcol[i+1] + macro1div[i+1,'adds']
actualext[i] <- actualext[i+1] - macro1div[i+1,'exts']
}
# plot actual numbers
plot(macro1div$richness ~ macro1chron$age, xlim=c(10000, 0), ylim=c(-190, 190), pch=16, type="b", col="black")
points(actualcol ~ macro1chron$age, pch=15, type="b", col="red")
points(actualext ~ macro1chron$age, pch=17, type="b", col="blue")
# calculate cumulative colonizations and extinctions ####
macro1pacumul <- matrix(data=0, nrow=nrow(macro1pa), ncol=ncol(macro1pa))
# +1 = extinctions (e.g., 1 (present) - 0 (absent))
# -1 = colonization (e.g., 0 (absent) - 1 (present))
# calculate col and ext for whole matrix, then take first and last occurrence of +1 and -1
macro1cum_temp <- macro1pa[2:nrow(macro1pa),] - macro1pa[1:nrow(macro1pa)-1,]
for (j in 1:ncol(macro1cum_temp)){
exts<- which(macro1cum_temp[,j]==1)
colons<- which(macro1cum_temp[,j]==-1)
firstext<- max(exts)
firstcol<- max(colons)
macro1pacumul[firstcol,j] <- -1
macro1pacumul[firstext,j] <- 1
}
macro1cumul <- matrix(data=0, ncol=2, nrow=nrow(macro1all))
colnames(macro1cumul) <- c("cumuladds", "cumulexts")
macro1cumul<- as.data.frame(macro1cumul)
for (j in 1:nrow(macro1pacumul)){
macro1cumul[j,"cumulexts"] <- length(which(macro1pacumul[j,] == 1))
macro1cumul[j,"cumuladds"] <- length(which(macro1pacumul[j,] == -1))
}
colonizations <- vector(mode="numeric", length=nrow(macro1cumul))
extinctions <- vector(mode="numeric", length=nrow(macro1cumul))
startingpoint <- macro1div$richness[length(macro1div$richness)]
colonizations[nrow(macro1cumul)]<- startingpoint
extinctions[nrow(macro1cumul)]<- startingpoint
for (i in (nrow(macro1cumul)-1):1){
colonizations[i] <- colonizations[i+1] + macro1cumul[i,'cumuladds']
extinctions[i] <- extinctions[i+1] - macro1cumul[i,'cumulexts']
}
# plot cumulative numbers
plot(macro1div$richness ~ macro1chron$age, xlim=c(10000, 0), ylim=c(-15, 30), pch=16, type="b", col="black")
points(colonizations ~ macro1chron$age, pch=15, type="b", col="red")
points(extinctions ~ macro1chron$age, pch=17, type="b", col="blue")
par(mfrow=c(2, 1))
# plot actual numbers
plot(macro1div$richness ~ macro1chron$age, xlim=c(10000, 0), ylim=c(-190, 190),
pch=16, type="b", col="black", main="Actual col and ext")
points(actualcol ~ macro1chron$age, pch=15, type="b", col="red")
points(actualext ~ macro1chron$age, pch=17, type="b", col="blue")
# plot cumulative numbers
plot(macro1div$richness ~ macro1chron$age, xlim=c(10000, 0), ylim=c(-15, 30),
pch=16, type="b", col="black", main="Cumulative col and ext")
points(colonizations ~ macro1chron$age, pch=15, type="b", col="red")
points(extinctions ~ macro1chron$age, pch=17, type="b", col="blue")
#### JESSICA NOTES: START HERE #### OK, this doesn't seem to add up. Perhaps for
#the cumulative plot, I need to re-do standing richness, with the assumptions
#stated (e.g., that only first col and first ext matter?)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GSproc.R
\name{GSproc}
\alias{GSproc}
\title{Gram-Schmidt procedure}
\usage{
GSproc(polyK, ivec, weight = NULL)
}
\arguments{
\item{polyK}{One list including \code{$Y} and \code{$phy} with:
\code{$Y} a matrix for which the ith column will be used
to add one orthogonal vector to the (i-1)th vectors of the
current orthogonal base;
and \code{$phy} such as the current orthogonal base is
given by the (i-1)th first columns of matrix \code{polyK$phy}.}
\item{ivec}{Defines i, the current vector of \code{polyK$Y} and
the current orthogonal base of \code{pParam$phy}.}
\item{weight}{The weighing vector.}
}
\value{
\code{uNew} The model parameterization, that is:
The residual orthogonal vector that can be included into
the current orthogonal base. If the current base is empty,
\code{uNew} is equal to the input vector of \code{$Y};
if the base is complete, \code{uNew} equals 0.
}
\description{
Computes regressors coefficients
using the Gram-Schmidt procedure.
}
\author{
Sylvain Mangiarotti
}
| /man/GSproc.Rd | no_license | cran/GPoM | R | false | true | 1,074 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GSproc.R
\name{GSproc}
\alias{GSproc}
\title{Gram-Schmidt procedure}
\usage{
GSproc(polyK, ivec, weight = NULL)
}
\arguments{
\item{polyK}{One list including \code{$Y} and \code{$phy} with:
\code{$Y} a matrix for which the ith column will be used
to add one orthogonal vector to the (i-1)th vectors of the
current orthogonal base;
and \code{$phy} such as the current orthogonal base is
given by the (i-1)th first columns of matrix \code{polyK$phy}.}
\item{ivec}{Defines i, the current vector of \code{polyK$Y} and
the current orthogonal base of \code{pParam$phy}.}
\item{weight}{The weighing vector.}
}
\value{
\code{uNew} The model parameterization, that is:
The residual orthogonal vector that can be included into
the current orthogonal base. If the current base is empty,
\code{uNew} is equal to the input vector of \code{$Y};
if the base is complete, \code{uNew} equals 0.
}
\description{
Computes regressors coefficients
using the Gram-Schmidt procedure.
}
\author{
Sylvain Mangiarotti
}
|
#' Get a list of members who have left the Senate or House or have announced plans to do so.
#'
#' @template nytcgkey
#' @param memberid_1,memberid_2 The member's unique ID number (alphanumeric). To find a
#' member's ID number, get the list of members for the appropriate House
#' or Senate. You can also use the Biographical Directory of the United
#' States Congress to get a member's ID. In search results, each member's
#' name is linked to a record by index ID (e.g.,
#' http://bioguide.congress.gov/scripts/biodisplay.pl?index=C001041).
#' Use the index ID as member-id in your request.
#' @param congress_no The number of the Congress during which the members served.
#' @param chamber One of 'house' or 'senate.
#' @return List of new members of he current Congress.
#' @export
#' @examples \dontrun{
#' cg_membervotecompare('S001181', 'A000368', 112, 'senate')
#' }
cg_membervotecompare <- function(memberid_1 = NULL, memberid_2 = NULL,
congress_no = NULL, chamber = NULL, key = NULL, ...) {
url2 <- paste(paste0(cg_base(), "members/"), memberid_1, '/votes/',
memberid_2, '/', congress_no, '/', chamber, '.json', sep = '')
args <- list('api-key' = check_key(key, "NYTIMES_CG_KEY"))
res <- rtimes_GET(url2, args, ...)
df <- data.frame(res$results[[1]], stringsAsFactors = FALSE)
list(status = res$status, copyright = res$copyright, meta = NULL, data = df)
}
| /R/cg_membervotecompare.R | permissive | MissIslander/rtimes | R | false | false | 1,414 | r | #' Get a list of members who have left the Senate or House or have announced plans to do so.
#'
#' @template nytcgkey
#' @param memberid_1,memberid_2 The member's unique ID number (alphanumeric). To find a
#' member's ID number, get the list of members for the appropriate House
#' or Senate. You can also use the Biographical Directory of the United
#' States Congress to get a member's ID. In search results, each member's
#' name is linked to a record by index ID (e.g.,
#' http://bioguide.congress.gov/scripts/biodisplay.pl?index=C001041).
#' Use the index ID as member-id in your request.
#' @param congress_no The number of the Congress during which the members served.
#' @param chamber One of 'house' or 'senate.
#' @return List of new members of he current Congress.
#' @export
#' @examples \dontrun{
#' cg_membervotecompare('S001181', 'A000368', 112, 'senate')
#' }
cg_membervotecompare <- function(memberid_1 = NULL, memberid_2 = NULL,
congress_no = NULL, chamber = NULL, key = NULL, ...) {
url2 <- paste(paste0(cg_base(), "members/"), memberid_1, '/votes/',
memberid_2, '/', congress_no, '/', chamber, '.json', sep = '')
args <- list('api-key' = check_key(key, "NYTIMES_CG_KEY"))
res <- rtimes_GET(url2, args, ...)
df <- data.frame(res$results[[1]], stringsAsFactors = FALSE)
list(status = res$status, copyright = res$copyright, meta = NULL, data = df)
}
|
## makeCacheMatrix is a function that stores a matrix as an imput.
## The result of this function is a list of 3 elements, which can store inverted matrix
## if this have been calculated. To acces the matrix use "object"$get()
makeCacheMatrix <- function(x = matrix()) { # input x is a matrix
m <- NULL # m is the inverse of the matrix. Reset each time
# makeCacheMatrix is called
set <- function(y) { # modification of an stored matrix
x <<- y
m <<- NULL # m is rest so the inverse matrix can be calculated
}
get <- function() { # store the original matrix
x
}
setinv <- function(inv) { # this is called by cacheSolve() the first time
m <<- inv # store the inversed matric using superassignment
}
getinv <- function() { # store the cached inversed matrix
m
}
list(get = get, setinv = setinv, getinv = getinv)
}
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and the matrix has
## not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x) { # input is an object created by makeCacheMatrix
m <- x$getinv() # accesses the object 'x' to get inversed matrix
if(!is.null(m)) { # TRUE if the inversed matrix was created before
message("getting cached data")
return(m) # print the inversed matrix and exit cacheSolve
} # in FALSE, the inversed matrix will be calculated
data <- x$get() # the original matrix is store in data
m <- solve(data) # the inversed matrix is calculated
x$setinv(m) # the inversed matrix is stored in the list
# created by makeCacheMatrix
m # the inversed matrix is printed
}
#####################################################################################################
## Here is a test to check if the function returns an inversed matrix
# invertible matrix example
n <- makeCacheMatrix(matrix(c(1,1,1,3,4,3,3,3,4),3,3))
n$get()
cacheSolve(n)
# > Inversed matrix
cacheSolve(n)
# > "getting cached data" and inversed matrix
# test if inversed matrix is correct, M * M-1 = I. Should result in the identity matrix
n$get() %*% n$getinv()
# or using this function will return TRUE if M * M-1 = I
CheckInv <- function(mx){
mdim <- dim(mx$get())[1]
Identity <- matrix(0,mdim,mdim)
for(i in 1:mdim){
for(j in 1:mdim){
if(i==j){
Identity[i,j] <- 1
} else {
Identity[i,j] <- 0
}
}
}
all.equal(mx$get() %*% mx$getinv(), Identity)
}
CheckInv(n)
| /cachematrix.R | no_license | jmatte/ProgrammingAssignment2 | R | false | false | 3,354 | r | ## makeCacheMatrix is a function that stores a matrix as an imput.
## The result of this function is a list of 3 elements, which can store inverted matrix
## if this have been calculated. To acces the matrix use "object"$get()
makeCacheMatrix <- function(x = matrix()) { # input x is a matrix
m <- NULL # m is the inverse of the matrix. Reset each time
# makeCacheMatrix is called
set <- function(y) { # modification of an stored matrix
x <<- y
m <<- NULL # m is rest so the inverse matrix can be calculated
}
get <- function() { # store the original matrix
x
}
setinv <- function(inv) { # this is called by cacheSolve() the first time
m <<- inv # store the inversed matric using superassignment
}
getinv <- function() { # store the cached inversed matrix
m
}
list(get = get, setinv = setinv, getinv = getinv)
}
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and the matrix has
## not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x) { # input is an object created by makeCacheMatrix
m <- x$getinv() # accesses the object 'x' to get inversed matrix
if(!is.null(m)) { # TRUE if the inversed matrix was created before
message("getting cached data")
return(m) # print the inversed matrix and exit cacheSolve
} # in FALSE, the inversed matrix will be calculated
data <- x$get() # the original matrix is store in data
m <- solve(data) # the inversed matrix is calculated
x$setinv(m) # the inversed matrix is stored in the list
# created by makeCacheMatrix
m # the inversed matrix is printed
}
#####################################################################################################
## Here is a test to check if the function returns an inversed matrix
# invertible matrix example
n <- makeCacheMatrix(matrix(c(1,1,1,3,4,3,3,3,4),3,3))
n$get()
cacheSolve(n)
# > Inversed matrix
cacheSolve(n)
# > "getting cached data" and inversed matrix
# test if inversed matrix is correct, M * M-1 = I. Should result in the identity matrix
n$get() %*% n$getinv()
# or using this function will return TRUE if M * M-1 = I
CheckInv <- function(mx){
mdim <- dim(mx$get())[1]
Identity <- matrix(0,mdim,mdim)
for(i in 1:mdim){
for(j in 1:mdim){
if(i==j){
Identity[i,j] <- 1
} else {
Identity[i,j] <- 0
}
}
}
all.equal(mx$get() %*% mx$getinv(), Identity)
}
CheckInv(n)
|
#
# vim:set ff=unix expandtab ts=2 sw=2:
sigsList <- function(g, where) {
methods <- findMethods(g, where)
value <- methods@signatures
args <- methods@arguments
if (length(value)) {
length(args) <- length(value[[1]])
value <- lapply(value, function(x) {
names(x) <- args
x
})
}
value
}
| /pkg/R/sigsList.R | no_license | mamueller/linkeddocs | R | false | false | 358 | r | #
# vim:set ff=unix expandtab ts=2 sw=2:
sigsList <- function(g, where) {
methods <- findMethods(g, where)
value <- methods@signatures
args <- methods@arguments
if (length(value)) {
length(args) <- length(value[[1]])
value <- lapply(value, function(x) {
names(x) <- args
x
})
}
value
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_seabird.R
\name{read_seabird}
\alias{read_seabird}
\title{Read in Seabird .cnv files that have common parameters}
\usage{
read_seabird(autoname = TRUE, recursive = FALSE)
}
\arguments{
\item{autoname}{Should the function attempt to extract the names of the variables from raw data or should they be left blank and specified by the user using colnames()? Defaults to TRUE}
\item{recursive}{Should the function look into sub-folders of the current folder?}
}
\value{
A single dataframe containing all the cast dale and a label column indicating the filename of the .cnv file
}
\description{
This function is meant to import raw cnv files into an R data frame.
}
| /man/read_seabird.Rd | no_license | boshek/seabirdR | R | false | true | 745 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_seabird.R
\name{read_seabird}
\alias{read_seabird}
\title{Read in Seabird .cnv files that have common parameters}
\usage{
read_seabird(autoname = TRUE, recursive = FALSE)
}
\arguments{
\item{autoname}{Should the function attempt to extract the names of the variables from raw data or should they be left blank and specified by the user using colnames()? Defaults to TRUE}
\item{recursive}{Should the function look into sub-folders of the current folder?}
}
\value{
A single dataframe containing all the cast dale and a label column indicating the filename of the .cnv file
}
\description{
This function is meant to import raw cnv files into an R data frame.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shinySpaniel.R
\name{runShinySpaniel}
\alias{runShinySpaniel}
\title{RunShinySpaniel}
\usage{
runShinySpaniel()
}
\value{
Runs a Shiny App
}
\description{
A function to visualise Spatial Transcriptomics. It requires a prepocessed
Seurat Object or a SingleCellExperiment object as well as a rasterised image
saved as an .rds object. There are 4 plots available in the app showing:
a) the number of genes detected per spot,
b) the number of reads detected per spot,
c) clustering results,
d) the gene expression of a selected gene."
To view the clustering results the columns of the meta.data or colData
containing clustering results must be prefixed with cluster_ . This can be
done by using the markClusterCol() function included in Spaniel.
}
\examples{
## mark the columns of metadata/colData that contain clustering
## information see ?markClusterCol for more details#'
sObj <- readRDS(file.path(system.file(package = "Spaniel"),
"extdata/SeuratData.rds"))
sObj <- markClusterCol(sObj, "res")
### parse background image
imgFile <- file.path(system.file(package = "Spaniel"),
"HE_Rep1_resized.jpg")
img <- parseImage(imgFile)
## run shinySpaniel (upload data.rds and image.rds in the shiny app)
## Not Run:
# runShinySpaniel()
}
| /man/runShinySpaniel.Rd | permissive | stephenwilliams22/Spaniel | R | false | true | 1,354 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shinySpaniel.R
\name{runShinySpaniel}
\alias{runShinySpaniel}
\title{RunShinySpaniel}
\usage{
runShinySpaniel()
}
\value{
Runs a Shiny App
}
\description{
A function to visualise Spatial Transcriptomics. It requires a prepocessed
Seurat Object or a SingleCellExperiment object as well as a rasterised image
saved as an .rds object. There are 4 plots available in the app showing:
a) the number of genes detected per spot,
b) the number of reads detected per spot,
c) clustering results,
d) the gene expression of a selected gene."
To view the clustering results the columns of the meta.data or colData
containing clustering results must be prefixed with cluster_ . This can be
done by using the markClusterCol() function included in Spaniel.
}
\examples{
## mark the columns of metadata/colData that contain clustering
## information see ?markClusterCol for more details#'
sObj <- readRDS(file.path(system.file(package = "Spaniel"),
"extdata/SeuratData.rds"))
sObj <- markClusterCol(sObj, "res")
### parse background image
imgFile <- file.path(system.file(package = "Spaniel"),
"HE_Rep1_resized.jpg")
img <- parseImage(imgFile)
## run shinySpaniel (upload data.rds and image.rds in the shiny app)
## Not Run:
# runShinySpaniel()
}
|
#### This code is to categorize the GAM predictions for Atlantic Sturgeon occurrence using DINEOF outputs.
require(magick)
require(RColorBrewer)
require(ncdf4)
require(raster)
require(lubridate)
require(matlab)
require(fields)
working_path<-'/home/mshatley/scripts/sturgeon/'
output_dir <- '/data/Aqua/sturgeon/'
last <- function(x) { return( x[length(x)] ) }
breakpoints<- c(0,0.01, 0.05,1)
colors <- c("green4", "gold", "red")
prob <- 0.95
setwd(working_path)
load("v2_ts_dxdoy_temp_443.Rdata") ### base model as of 6/12/2017
load("DINeof_stuff.Rdata")
#setwd("/home/mwbreece/realtime/future")
#f<- nc_open("http://128.175.28.250:8080/thredds/dodsC/Aqua1DayFilled.nc")
f<- nc_open("http://basin.ceoe.udel.edu/thredds/dodsC/gapfilled_1day_aqua.nc")
tm<- ncvar_get(f, "time")
lon <- ncvar_get(f, "lon") ### loading the longitude from the THREDDS server
#lon<-ncvar_get(f, 'longitude')
lat <- ncvar_get(f, "lat") ### loading the latitude from the THREDDS server
#lat<-ncvar_get(f, 'latitude')
fore_time<- ncvar_get(f, "forecast_time")
fore_sst<- ncvar_get(f,"forecast_sst", start = c(1, 1, 1,length(tm)), count = c(length(lon), length(lat),4,1))
fore_443 <- ncvar_get(f, "forecast_a_443_qaa", start = c(1, 1, 1,length(tm)), count = c(length(lon), length(lat),4,1))
data_status <- ncvar_get(f, 'data_status', start = c(length(tm)), count = c(1))
nc_close(f)
cur_tm <- as.Date(last(tm), origin = "1970-01-01")
fu1_tm <- as.Date(last(tm+1), origin = "1970-01-01")
fu2_tm <- as.Date(last(tm+2), origin = "1970-01-01")
fu3_tm <- as.Date(last(tm+3), origin = "1970-01-01")
###gapfilled #################
DofY <-yday(cur_tm)
yd<- matrix(DofY, length(lon), length(lat))
ydr<- flip(raster(t(yd)),2)
sstr<- flip(raster(t(fore_sst[,,1])),2)
a_443_qaar<- flip(raster(t(fore_443[,,1])),2)
env_stack<- stack(sstr, a_443_qaar, Depthr, ydr) #sturgeon
names(env_stack)<- c("sst", "a_443_qaa", "Depth", "yd") #sturgeon
#setwd("/home/mwbreece/GAMM/plot/img")
p <- predict(env_stack, GAM$gam, filename="file4.img", type="response", fun=predict, overwrite=T, na.rm=T)
gf_p<-fliplr(t(as.matrix(p))) ##gap filled predictions
p[] <- replace(p[], p[]>0.1, 0.1)
#######categories
#setwd("/home/mwbreece/GAMM/BAM")
load("raster_masks.Rdata")
load("lon_lat_nefop.Rdata")
load("nefop_grid.Rdata")
ind1<-which(sub_lon == min(lon))
ind2<- which(sub_lon == max(lon))
ind3<- which(sub_lat == min(lat))
ind4<- which(sub_lat == max(lat))
nefop_grid[ind1:ind2, ind3:ind4]<- fliplr(t(as.matrix(p)))
p<- flip(raster(t(nefop_grid)),2)
p.lt5_up_mask <- raster::mask(p, d.lt5_up_mask)
p.5.10_up_mask <- raster::mask(p, d.5.10_up_mask)
p.10.15_up_mask <- raster::mask(p, d.10.15_up_mask)
p.gt15_up_mask <- raster::mask(p, d.gt15_up_mask)
p.lt5_mid_mask <- raster::mask(p, d.lt5_mid_mask)
p.5.10_mid_mask <- raster::mask(p, d.5.10_mid_mask)
p.10.15_mid_mask <- raster::mask(p, d.10.15_mid_mask)
p.gt15_mid_mask <- raster::mask(p, d.gt15_mid_mask)
p.lt5_low_mask <- raster::mask(p, d.lt5_low_mask)
p.5.10_low_mask <- raster::mask(p, d.5.10_low_mask)
p.10.15_low_mask <- raster::mask(p, d.10.15_low_mask)
p.gt15_low_mask <- raster::mask(p, d.gt15_low_mask)
p.lt5_river_mask <- raster::mask(p, d.lt5_river_mask)
p.5.10_river_mask <- raster::mask(p, d.5.10_river_mask)
p.10.15_river_mask <- raster::mask(p, d.10.15_river_mask)
p.gt15_river_mask <- raster::mask(p, d.gt15_river_mask)
p.lt5_ocean_mask <- raster::mask(p, d.lt5_ocean_mask)
p.5.10_ocean_mask <- raster::mask(p, d.5.10_ocean_mask)
p.10.15_ocean_mask <- raster::mask(p, d.10.15_ocean_mask)
p.gt15_ocean_mask <- raster::mask(p, d.gt15_ocean_mask)
p.cat_lt5_river<- replace(d.lt5_river_mask, d.lt5_river_mask <= 0, quantile(p.lt5_river_mask, probs = prob))
p.cat_5.10_river<- replace(d.5.10_river_mask, d.5.10_river_mask <= 0, quantile(p.5.10_river_mask, probs = prob))
p.cat_10.15_river<- replace(d.10.15_river_mask, d.10.15_river_mask <= 0, quantile(p.10.15_river_mask, probs = prob))
p.cat_gt15_river<- replace(d.gt15_river_mask, d.gt15_river_mask <= 0, quantile(p.gt15_river_mask, probs = prob))
p.cat_lt5_up<- replace(d.lt5_up_mask, d.lt5_up_mask <= 0, quantile(p.lt5_up_mask, probs = prob))
p.cat_5.10_up<- replace(d.5.10_up_mask, d.5.10_up_mask <= 0, quantile(p.5.10_up_mask, probs = prob))
p.cat_10.15_up<- replace(d.10.15_up_mask, d.10.15_up_mask <= 0, quantile(p.10.15_up_mask, probs = prob))
p.cat_gt15_up<- replace(d.gt15_up_mask, d.gt15_up_mask <= 0, quantile(p.gt15_up_mask, probs = prob))
p.cat_lt5_mid<- replace(d.lt5_mid_mask, d.lt5_mid_mask <= 0, quantile(p.lt5_mid_mask, probs = prob))
p.cat_5.10_mid<- replace(d.5.10_mid_mask, d.5.10_mid_mask <= 0, quantile(p.5.10_mid_mask, probs = prob))
p.cat_10.15_mid<- replace(d.10.15_mid_mask, d.10.15_mid_mask <= 0, quantile(p.10.15_mid_mask, probs = prob))
p.cat_gt15_mid<- replace(d.gt15_mid_mask, d.gt15_mid_mask <= 0, quantile(p.gt15_mid_mask, probs = prob))
p.cat_lt5_low<- replace(d.lt5_low_mask, d.lt5_low_mask <= 0, quantile(p.lt5_low_mask, probs = prob))
p.cat_5.10_low<- replace(d.5.10_low_mask, d.5.10_low_mask <= 0, quantile(p.5.10_low_mask, probs = prob))
p.cat_10.15_low<- replace(d.10.15_low_mask, d.10.15_low_mask <= 0, quantile(p.10.15_low_mask, probs = prob))
p.cat_gt15_low<- replace(d.gt15_low_mask, d.gt15_low_mask <= 0, quantile(p.gt15_low_mask, probs = prob))
p.cat_lt5_ocean<- replace(d.lt5_ocean_mask, d.lt5_ocean_mask <= 0, quantile(p.lt5_ocean_mask, probs = prob))
p.cat_5.10_ocean<- replace(d.5.10_ocean_mask, d.5.10_ocean_mask <= 0, quantile(p.5.10_ocean_mask, probs = prob))
p.cat_10.15_ocean<- replace(d.10.15_ocean_mask, d.10.15_ocean_mask <= 0, quantile(p.10.15_ocean_mask, probs = prob))
p.cat_gt15_ocean<- replace(d.gt15_ocean_mask, d.gt15_ocean_mask <= 0, quantile(p.gt15_ocean_mask, probs = prob))
test<- merge(p.cat_lt5_river,p.cat_5.10_river,p.cat_10.15_river, p.cat_gt15_river, p.cat_lt5_up, p.cat_5.10_up, p.cat_10.15_up, p.cat_gt15_up, p.cat_lt5_mid, p.cat_5.10_mid, p.cat_10.15_mid, p.cat_gt15_mid, p.cat_lt5_low,p.cat_5.10_low, p.cat_10.15_low, p.cat_gt15_low, p.cat_lt5_ocean, p.cat_5.10_ocean, p.cat_10.15_ocean, p.cat_gt15_ocean)
gapfilled <- fliplr(t(as.matrix(test)))[ind1:ind2, ind3:ind4]
#######################future 1 day#################################
DofY <-yday(fu1_tm)
yd<- matrix(DofY, length(lon), length(lat))
ydr<- flip(raster(t(yd)),2)
sstr<- flip(raster(t(fore_sst[,,2])),2)
a_443_qaar<- flip(raster(t(fore_443[,,2])),2)
env_stack<- stack(sstr, a_443_qaar, Depthr, ydr) #sturgeon
names(env_stack)<- c("sst", "a_443_qaa", "Depth", "yd") #sturgeon
#setwd("/home/mwbreece/GAMM/plot/img")
p <- predict(env_stack, GAM$gam, filename="file4.img", type="response", fun=predict, overwrite=T, na.rm=T)
fu1_p<-fliplr(t(as.matrix(p))) ##predictions for 1 day in future
p[] <- replace(p[], p[]>0.1, 0.1)
#######categories 1day future
#setwd("/home/mwbreece/GAMM/BAM")
#already loaded don't need to do again...
#load("raster_masks.Rdata")
#load("lon_lat_nefop.Rdata")
#load("nefop_grid.Rdata")
ind1<-which(sub_lon == min(lon))
ind2<- which(sub_lon == max(lon))
ind3<- which(sub_lat == min(lat))
ind4<- which(sub_lat == max(lat))
nefop_grid[ind1:ind2, ind3:ind4]<- fliplr(t(as.matrix(p)))
p<- flip(raster(t(nefop_grid)),2)
p.lt5_up_mask <- raster::mask(p, d.lt5_up_mask)
p.5.10_up_mask <- raster::mask(p, d.5.10_up_mask)
p.10.15_up_mask <- raster::mask(p, d.10.15_up_mask)
p.gt15_up_mask <- raster::mask(p, d.gt15_up_mask)
p.lt5_mid_mask <- raster::mask(p, d.lt5_mid_mask)
p.5.10_mid_mask <- raster::mask(p, d.5.10_mid_mask)
p.10.15_mid_mask <- raster::mask(p, d.10.15_mid_mask)
p.gt15_mid_mask <- raster::mask(p, d.gt15_mid_mask)
p.lt5_low_mask <- raster::mask(p, d.lt5_low_mask)
p.5.10_low_mask <- raster::mask(p, d.5.10_low_mask)
p.10.15_low_mask <- raster::mask(p, d.10.15_low_mask)
p.gt15_low_mask <- raster::mask(p, d.gt15_low_mask)
p.lt5_river_mask <- raster::mask(p, d.lt5_river_mask)
p.5.10_river_mask <- raster::mask(p, d.5.10_river_mask)
p.10.15_river_mask <- raster::mask(p, d.10.15_river_mask)
p.gt15_river_mask <- raster::mask(p, d.gt15_river_mask)
p.lt5_ocean_mask <- raster::mask(p, d.lt5_ocean_mask)
p.5.10_ocean_mask <- raster::mask(p, d.5.10_ocean_mask)
p.10.15_ocean_mask <- raster::mask(p, d.10.15_ocean_mask)
p.gt15_ocean_mask <- raster::mask(p, d.gt15_ocean_mask)
p.cat_lt5_river<- replace(d.lt5_river_mask, d.lt5_river_mask <= 0, quantile(p.lt5_river_mask, probs = prob))
p.cat_5.10_river<- replace(d.5.10_river_mask, d.5.10_river_mask <= 0, quantile(p.5.10_river_mask, probs = prob))
p.cat_10.15_river<- replace(d.10.15_river_mask, d.10.15_river_mask <= 0, quantile(p.10.15_river_mask, probs = prob))
p.cat_gt15_river<- replace(d.gt15_river_mask, d.gt15_river_mask <= 0, quantile(p.gt15_river_mask, probs = prob))
p.cat_lt5_up<- replace(d.lt5_up_mask, d.lt5_up_mask <= 0, quantile(p.lt5_up_mask, probs = prob))
p.cat_5.10_up<- replace(d.5.10_up_mask, d.5.10_up_mask <= 0, quantile(p.5.10_up_mask, probs = prob))
p.cat_10.15_up<- replace(d.10.15_up_mask, d.10.15_up_mask <= 0, quantile(p.10.15_up_mask, probs = prob))
p.cat_gt15_up<- replace(d.gt15_up_mask, d.gt15_up_mask <= 0, quantile(p.gt15_up_mask, probs = prob))
p.cat_lt5_mid<- replace(d.lt5_mid_mask, d.lt5_mid_mask <= 0, quantile(p.lt5_mid_mask, probs = prob))
p.cat_5.10_mid<- replace(d.5.10_mid_mask, d.5.10_mid_mask <= 0, quantile(p.5.10_mid_mask, probs = prob))
p.cat_10.15_mid<- replace(d.10.15_mid_mask, d.10.15_mid_mask <= 0, quantile(p.10.15_mid_mask, probs = prob))
p.cat_gt15_mid<- replace(d.gt15_mid_mask, d.gt15_mid_mask <= 0, quantile(p.gt15_mid_mask, probs = prob))
p.cat_lt5_low<- replace(d.lt5_low_mask, d.lt5_low_mask <= 0, quantile(p.lt5_low_mask, probs = prob))
p.cat_5.10_low<- replace(d.5.10_low_mask, d.5.10_low_mask <= 0, quantile(p.5.10_low_mask, probs = prob))
p.cat_10.15_low<- replace(d.10.15_low_mask, d.10.15_low_mask <= 0, quantile(p.10.15_low_mask, probs = prob))
p.cat_gt15_low<- replace(d.gt15_low_mask, d.gt15_low_mask <= 0, quantile(p.gt15_low_mask, probs = prob))
p.cat_lt5_ocean<- replace(d.lt5_ocean_mask, d.lt5_ocean_mask <= 0, quantile(p.lt5_ocean_mask, probs = prob))
p.cat_5.10_ocean<- replace(d.5.10_ocean_mask, d.5.10_ocean_mask <= 0, quantile(p.5.10_ocean_mask, probs = prob))
p.cat_10.15_ocean<- replace(d.10.15_ocean_mask, d.10.15_ocean_mask <= 0, quantile(p.10.15_ocean_mask, probs = prob))
p.cat_gt15_ocean<- replace(d.gt15_ocean_mask, d.gt15_ocean_mask <= 0, quantile(p.gt15_ocean_mask, probs = prob))
test<- merge(p.cat_lt5_river,p.cat_5.10_river,p.cat_10.15_river, p.cat_gt15_river, p.cat_lt5_up, p.cat_5.10_up, p.cat_10.15_up, p.cat_gt15_up, p.cat_lt5_mid, p.cat_5.10_mid, p.cat_10.15_mid, p.cat_gt15_mid, p.cat_lt5_low,p.cat_5.10_low, p.cat_10.15_low, p.cat_gt15_low, p.cat_lt5_ocean, p.cat_5.10_ocean, p.cat_10.15_ocean, p.cat_gt15_ocean)
fu1_day<-fliplr(t(as.matrix(test)))[ind1:ind2, ind3:ind4]
#####future 2 days
DofY <-yday(fu2_tm)
yd<- matrix(DofY, length(lon), length(lat))
ydr<- flip(raster(t(yd)),2)
sstr<- flip(raster(t(fore_sst[,,3])),2)
a_443_qaar<- flip(raster(t(fore_443[,,3])),2)
env_stack<- stack(sstr, a_443_qaar, Depthr, ydr) #sturgeon
names(env_stack)<- c("sst", "a_443_qaa", "Depth", "yd") #sturgeon
#setwd("/home/mwbreece/GAMM/plot/img")
p <- predict(env_stack, GAM$gam, filename="file4.img", type="response", fun=predict, overwrite=T, na.rm=T)
fu2_p<-fliplr(t(as.matrix(p))) ##predictions for 2 days in future
p[] <- replace(p[], p[]>0.1, 0.1)
#######categories 2day future
#setwd("/home/mwbreece/GAMM/BAM")
#load("raster_masks.Rdata")
#load("lon_lat_nefop.Rdata")
#load("nefop_grid.Rdata")
ind1<-which(sub_lon == min(lon))
ind2<- which(sub_lon == max(lon))
ind3<- which(sub_lat == min(lat))
ind4<- which(sub_lat == max(lat))
nefop_grid[ind1:ind2, ind3:ind4]<- fliplr(t(as.matrix(p)))
p<- flip(raster(t(nefop_grid)),2)
p.lt5_up_mask <- raster::mask(p, d.lt5_up_mask)
p.5.10_up_mask <- raster::mask(p, d.5.10_up_mask)
p.10.15_up_mask <- raster::mask(p, d.10.15_up_mask)
p.gt15_up_mask <- raster::mask(p, d.gt15_up_mask)
p.lt5_mid_mask <- raster::mask(p, d.lt5_mid_mask)
p.5.10_mid_mask <- raster::mask(p, d.5.10_mid_mask)
p.10.15_mid_mask <- raster::mask(p, d.10.15_mid_mask)
p.gt15_mid_mask <- raster::mask(p, d.gt15_mid_mask)
p.lt5_low_mask <- raster::mask(p, d.lt5_low_mask)
p.5.10_low_mask <- raster::mask(p, d.5.10_low_mask)
p.10.15_low_mask <- raster::mask(p, d.10.15_low_mask)
p.gt15_low_mask <- raster::mask(p, d.gt15_low_mask)
p.lt5_river_mask <- raster::mask(p, d.lt5_river_mask)
p.5.10_river_mask <- raster::mask(p, d.5.10_river_mask)
p.10.15_river_mask <- raster::mask(p, d.10.15_river_mask)
p.gt15_river_mask <- raster::mask(p, d.gt15_river_mask)
p.lt5_ocean_mask <- raster::mask(p, d.lt5_ocean_mask)
p.5.10_ocean_mask <- raster::mask(p, d.5.10_ocean_mask)
p.10.15_ocean_mask <- raster::mask(p, d.10.15_ocean_mask)
p.gt15_ocean_mask <- raster::mask(p, d.gt15_ocean_mask)
p.cat_lt5_river<- replace(d.lt5_river_mask, d.lt5_river_mask <= 0, quantile(p.lt5_river_mask, probs = prob))
p.cat_5.10_river<- replace(d.5.10_river_mask, d.5.10_river_mask <= 0, quantile(p.5.10_river_mask, probs = prob))
p.cat_10.15_river<- replace(d.10.15_river_mask, d.10.15_river_mask <= 0, quantile(p.10.15_river_mask, probs = prob))
p.cat_gt15_river<- replace(d.gt15_river_mask, d.gt15_river_mask <= 0, quantile(p.gt15_river_mask, probs = prob))
p.cat_lt5_up<- replace(d.lt5_up_mask, d.lt5_up_mask <= 0, quantile(p.lt5_up_mask, probs = prob))
p.cat_5.10_up<- replace(d.5.10_up_mask, d.5.10_up_mask <= 0, quantile(p.5.10_up_mask, probs = prob))
p.cat_10.15_up<- replace(d.10.15_up_mask, d.10.15_up_mask <= 0, quantile(p.10.15_up_mask, probs = prob))
p.cat_gt15_up<- replace(d.gt15_up_mask, d.gt15_up_mask <= 0, quantile(p.gt15_up_mask, probs = prob))
p.cat_lt5_mid<- replace(d.lt5_mid_mask, d.lt5_mid_mask <= 0, quantile(p.lt5_mid_mask, probs = prob))
p.cat_5.10_mid<- replace(d.5.10_mid_mask, d.5.10_mid_mask <= 0, quantile(p.5.10_mid_mask, probs = prob))
p.cat_10.15_mid<- replace(d.10.15_mid_mask, d.10.15_mid_mask <= 0, quantile(p.10.15_mid_mask, probs = prob))
p.cat_gt15_mid<- replace(d.gt15_mid_mask, d.gt15_mid_mask <= 0, quantile(p.gt15_mid_mask, probs = prob))
p.cat_lt5_low<- replace(d.lt5_low_mask, d.lt5_low_mask <= 0, quantile(p.lt5_low_mask, probs = prob))
p.cat_5.10_low<- replace(d.5.10_low_mask, d.5.10_low_mask <= 0, quantile(p.5.10_low_mask, probs = prob))
p.cat_10.15_low<- replace(d.10.15_low_mask, d.10.15_low_mask <= 0, quantile(p.10.15_low_mask, probs = prob))
p.cat_gt15_low<- replace(d.gt15_low_mask, d.gt15_low_mask <= 0, quantile(p.gt15_low_mask, probs = prob))
p.cat_lt5_ocean<- replace(d.lt5_ocean_mask, d.lt5_ocean_mask <= 0, quantile(p.lt5_ocean_mask, probs = prob))
p.cat_5.10_ocean<- replace(d.5.10_ocean_mask, d.5.10_ocean_mask <= 0, quantile(p.5.10_ocean_mask, probs = prob))
p.cat_10.15_ocean<- replace(d.10.15_ocean_mask, d.10.15_ocean_mask <= 0, quantile(p.10.15_ocean_mask, probs = prob))
p.cat_gt15_ocean<- replace(d.gt15_ocean_mask, d.gt15_ocean_mask <= 0, quantile(p.gt15_ocean_mask, probs = prob))
test<- merge(p.cat_lt5_river,p.cat_5.10_river,p.cat_10.15_river, p.cat_gt15_river, p.cat_lt5_up, p.cat_5.10_up, p.cat_10.15_up, p.cat_gt15_up, p.cat_lt5_mid, p.cat_5.10_mid, p.cat_10.15_mid, p.cat_gt15_mid, p.cat_lt5_low,p.cat_5.10_low, p.cat_10.15_low, p.cat_gt15_low, p.cat_lt5_ocean, p.cat_5.10_ocean, p.cat_10.15_ocean, p.cat_gt15_ocean)
fu2_day<- fliplr(t(as.matrix(test)))[ind1:ind2, ind3:ind4]
################## future 3 day #################
DofY <-yday(fu3_tm)
yd<- matrix(DofY, length(lon), length(lat))
ydr<- flip(raster(t(yd)),2)
sstr<- flip(raster(t(fore_sst[,,4])),2)
a_443_qaar<- flip(raster(t(fore_443[,,4])),2)
env_stack<- stack(sstr, a_443_qaar, Depthr, ydr) #sturgeon
names(env_stack)<- c("sst", "a_443_qaa", "Depth", "yd") #sturgeon
#setwd("/home/mwbreece/GAMM/plot/img")
p <- predict(env_stack, GAM$gam, filename="file4.img", type="response", fun=predict, overwrite=T, na.rm=T)
fu3_p<-fliplr(t(as.matrix(p))) ##predictions for 3 days in future
p[] <- replace(p[], p[]>0.1, 0.1)
#######categories 3day future
#setwd("/home/mwbreece/GAMM/BAM")
#load("raster_masks.Rdata")
#load("lon_lat_nefop.Rdata")
#load("nefop_grid.Rdata")
ind1<-which(sub_lon == min(lon))
ind2<- which(sub_lon == max(lon))
ind3<- which(sub_lat == min(lat))
ind4<- which(sub_lat == max(lat))
nefop_grid[ind1:ind2, ind3:ind4]<- fliplr(t(as.matrix(p)))
p<- flip(raster(t(nefop_grid)),2)
p.lt5_up_mask <- raster::mask(p, d.lt5_up_mask)
p.5.10_up_mask <- raster::mask(p, d.5.10_up_mask)
p.10.15_up_mask <- raster::mask(p, d.10.15_up_mask)
p.gt15_up_mask <- raster::mask(p, d.gt15_up_mask)
p.lt5_mid_mask <- raster::mask(p, d.lt5_mid_mask)
p.5.10_mid_mask <- raster::mask(p, d.5.10_mid_mask)
p.10.15_mid_mask <- raster::mask(p, d.10.15_mid_mask)
p.gt15_mid_mask <- raster::mask(p, d.gt15_mid_mask)
p.lt5_low_mask <- raster::mask(p, d.lt5_low_mask)
p.5.10_low_mask <- raster::mask(p, d.5.10_low_mask)
p.10.15_low_mask <- raster::mask(p, d.10.15_low_mask)
p.gt15_low_mask <- raster::mask(p, d.gt15_low_mask)
p.lt5_river_mask <- raster::mask(p, d.lt5_river_mask)
p.5.10_river_mask <- raster::mask(p, d.5.10_river_mask)
p.10.15_river_mask <- raster::mask(p, d.10.15_river_mask)
p.gt15_river_mask <- raster::mask(p, d.gt15_river_mask)
p.lt5_ocean_mask <- raster::mask(p, d.lt5_ocean_mask)
p.5.10_ocean_mask <- raster::mask(p, d.5.10_ocean_mask)
p.10.15_ocean_mask <- raster::mask(p, d.10.15_ocean_mask)
p.gt15_ocean_mask <- raster::mask(p, d.gt15_ocean_mask)
p.cat_lt5_river<- replace(d.lt5_river_mask, d.lt5_river_mask <= 0, quantile(p.lt5_river_mask, probs = prob))
p.cat_5.10_river<- replace(d.5.10_river_mask, d.5.10_river_mask <= 0, quantile(p.5.10_river_mask, probs = prob))
p.cat_10.15_river<- replace(d.10.15_river_mask, d.10.15_river_mask <= 0, quantile(p.10.15_river_mask, probs = prob))
p.cat_gt15_river<- replace(d.gt15_river_mask, d.gt15_river_mask <= 0, quantile(p.gt15_river_mask, probs = prob))
p.cat_lt5_up<- replace(d.lt5_up_mask, d.lt5_up_mask <= 0, quantile(p.lt5_up_mask, probs = prob))
p.cat_5.10_up<- replace(d.5.10_up_mask, d.5.10_up_mask <= 0, quantile(p.5.10_up_mask, probs = prob))
p.cat_10.15_up<- replace(d.10.15_up_mask, d.10.15_up_mask <= 0, quantile(p.10.15_up_mask, probs = prob))
p.cat_gt15_up<- replace(d.gt15_up_mask, d.gt15_up_mask <= 0, quantile(p.gt15_up_mask, probs = prob))
p.cat_lt5_mid<- replace(d.lt5_mid_mask, d.lt5_mid_mask <= 0, quantile(p.lt5_mid_mask, probs = prob))
p.cat_5.10_mid<- replace(d.5.10_mid_mask, d.5.10_mid_mask <= 0, quantile(p.5.10_mid_mask, probs = prob))
p.cat_10.15_mid<- replace(d.10.15_mid_mask, d.10.15_mid_mask <= 0, quantile(p.10.15_mid_mask, probs = prob))
p.cat_gt15_mid<- replace(d.gt15_mid_mask, d.gt15_mid_mask <= 0, quantile(p.gt15_mid_mask, probs = prob))
p.cat_lt5_low<- replace(d.lt5_low_mask, d.lt5_low_mask <= 0, quantile(p.lt5_low_mask, probs = prob))
p.cat_5.10_low<- replace(d.5.10_low_mask, d.5.10_low_mask <= 0, quantile(p.5.10_low_mask, probs = prob))
p.cat_10.15_low<- replace(d.10.15_low_mask, d.10.15_low_mask <= 0, quantile(p.10.15_low_mask, probs = prob))
p.cat_gt15_low<- replace(d.gt15_low_mask, d.gt15_low_mask <= 0, quantile(p.gt15_low_mask, probs = prob))
p.cat_lt5_ocean<- replace(d.lt5_ocean_mask, d.lt5_ocean_mask <= 0, quantile(p.lt5_ocean_mask, probs = prob))
p.cat_5.10_ocean<- replace(d.5.10_ocean_mask, d.5.10_ocean_mask <= 0, quantile(p.5.10_ocean_mask, probs = prob))
p.cat_10.15_ocean<- replace(d.10.15_ocean_mask, d.10.15_ocean_mask <= 0, quantile(p.10.15_ocean_mask, probs = prob))
p.cat_gt15_ocean<- replace(d.gt15_ocean_mask, d.gt15_ocean_mask <= 0, quantile(p.gt15_ocean_mask, probs = prob))
test<- merge(p.cat_lt5_river,p.cat_5.10_river,p.cat_10.15_river, p.cat_gt15_river, p.cat_lt5_up, p.cat_5.10_up, p.cat_10.15_up, p.cat_gt15_up, p.cat_lt5_mid, p.cat_5.10_mid, p.cat_10.15_mid, p.cat_gt15_mid, p.cat_lt5_low,p.cat_5.10_low, p.cat_10.15_low, p.cat_gt15_low, p.cat_lt5_ocean, p.cat_5.10_ocean, p.cat_10.15_ocean, p.cat_gt15_ocean)
fu3_day<- fliplr(t(as.matrix(test)))[ind1:ind2, ind3:ind4]
#layer 1 is gapfilled
#layer 2 is fu1_day
#layer 3 is fu2_day
#layer 4 is fu3_day
#day is last(tm)
#longitude is sub_lon
#latitude is sub_lat
########
## fixed to here so that lat and lon are the same as the original netcdf
tm_dim <- ncdim_def("time", "Days since 1970-01-01 00:00:00", c(as.integer(last(tm))), unlim = TRUE)
mdltm_dim <- ncdim_def("forecast_time", "Days since 1970-01-01 00:00:00", c(last(tm), last(tm)+1, last(tm)+2, last(tm)+3))
#cat_lon_dim <- ncdim_def("categories_lon", "degrees_east", lon)
#cat_lat_dim <- ncdim_def("categories_lat", "degrees_north", lat)
lon_dim <- ncdim_def("lon", "degrees_east", lon)
lat_dim <- ncdim_def("lat", "degrees_north", lat)
var_defs = list()
var_defs[['sturgeon_predictions']] <- ncvar_def('Sturgeon_Predictions', 'Probability of Sturgeon Presence', list(lon_dim, lat_dim, mdltm_dim, tm_dim), -999, longname = 'Sturgeon Model Predictions', prec="single", compression=5)
var_defs[['sturgeon_categories']] <- ncvar_def('Sturgeon_Categories', 'Sturgeon Categories', list(lon_dim, lat_dim, mdltm_dim, tm_dim), -999, longname = 'Sturgeon Categories', prec="single", compression=5)
var_defs[['data_status']] <- ncvar_def(name='data_status', units='NA', list(tm_dim), -999, longname = 'Data Status Flag', prec="integer", compression=5)
print("start nc file")
newnc <- nc_create(sprintf('%s/model_predictions.%s.nc4',output_dir, cur_tm), var_defs, force_v4=TRUE)
ncvar_put(newnc, var_defs[['sturgeon_predictions']], replace(gf_p, is.na(gf_p), -999), start=c(1,1,1, 1), count=c(lon_dim$len, lat_dim$len, 1, 1))
ncvar_put(newnc, var_defs[['sturgeon_predictions']], replace(fu1_p, is.na(fu1_p), -999), start=c(1,1,2, 1), count=c(lon_dim$len, lat_dim$len, 1, 1))
ncvar_put(newnc, var_defs[['sturgeon_predictions']], replace(fu2_p, is.na(fu2_p), -999), start=c(1,1,3, 1), count=c(lon_dim$len, lat_dim$len, 1, 1))
ncvar_put(newnc, var_defs[['sturgeon_predictions']], replace(fu3_p, is.na(fu3_p), -999), start=c(1,1,4, 1), count=c(lon_dim$len, lat_dim$len, 1, 1))
ncvar_put(newnc, var_defs[['sturgeon_categories']], replace(gapfilled, is.na(gapfilled), -999), start=c(1,1,1, 1), count=c(lon_dim$len, lat_dim$len, 1, 1))
ncvar_put(newnc, var_defs[['sturgeon_categories']], replace(fu1_day, is.na(fu1_day), -999), start=c(1,1,2, 1), count=c(lon_dim$len, lat_dim$len, 1, 1))
ncvar_put(newnc, var_defs[['sturgeon_categories']], replace(fu2_day, is.na(fu2_day), -999), start=c(1,1,3, 1), count=c(lon_dim$len, lat_dim$len, 1, 1))
ncvar_put(newnc, var_defs[['sturgeon_categories']], replace(fu3_day, is.na(fu3_day), -999), start=c(1,1,4, 1), count=c(lon_dim$len, lat_dim$len, 1, 1))
ncatt_put(newnc, 'data_status', 'Forecast_Status_Flag', '1 indicates the data may be degraded due to a lack of satellite coverage in the 3 days leading up to the forecast. Caution should be used')
ncvar_put(newnc, var_defs[['data_status']], data_status)
nc_close(newnc)
| /polar/aqua/sturgeon_modeling/Stur_cats.R | no_license | jsimkins2/UD_SRS | R | false | false | 22,987 | r |
#### This code is to categorize the GAM predictions for Atlantic Sturgeon occurrence using DINEOF outputs.
require(magick)
require(RColorBrewer)
require(ncdf4)
require(raster)
require(lubridate)
require(matlab)
require(fields)
working_path<-'/home/mshatley/scripts/sturgeon/'
output_dir <- '/data/Aqua/sturgeon/'
last <- function(x) { return( x[length(x)] ) }
breakpoints<- c(0,0.01, 0.05,1)
colors <- c("green4", "gold", "red")
prob <- 0.95
setwd(working_path)
load("v2_ts_dxdoy_temp_443.Rdata") ### base model as of 6/12/2017
load("DINeof_stuff.Rdata")
#setwd("/home/mwbreece/realtime/future")
#f<- nc_open("http://128.175.28.250:8080/thredds/dodsC/Aqua1DayFilled.nc")
f<- nc_open("http://basin.ceoe.udel.edu/thredds/dodsC/gapfilled_1day_aqua.nc")
tm<- ncvar_get(f, "time")
lon <- ncvar_get(f, "lon") ### loading the longitude from the THREDDS server
#lon<-ncvar_get(f, 'longitude')
lat <- ncvar_get(f, "lat") ### loading the latitude from the THREDDS server
#lat<-ncvar_get(f, 'latitude')
fore_time<- ncvar_get(f, "forecast_time")
fore_sst<- ncvar_get(f,"forecast_sst", start = c(1, 1, 1,length(tm)), count = c(length(lon), length(lat),4,1))
fore_443 <- ncvar_get(f, "forecast_a_443_qaa", start = c(1, 1, 1,length(tm)), count = c(length(lon), length(lat),4,1))
data_status <- ncvar_get(f, 'data_status', start = c(length(tm)), count = c(1))
nc_close(f)
cur_tm <- as.Date(last(tm), origin = "1970-01-01")
fu1_tm <- as.Date(last(tm+1), origin = "1970-01-01")
fu2_tm <- as.Date(last(tm+2), origin = "1970-01-01")
fu3_tm <- as.Date(last(tm+3), origin = "1970-01-01")
###gapfilled #################
DofY <-yday(cur_tm)
yd<- matrix(DofY, length(lon), length(lat))
ydr<- flip(raster(t(yd)),2)
sstr<- flip(raster(t(fore_sst[,,1])),2)
a_443_qaar<- flip(raster(t(fore_443[,,1])),2)
env_stack<- stack(sstr, a_443_qaar, Depthr, ydr) #sturgeon
names(env_stack)<- c("sst", "a_443_qaa", "Depth", "yd") #sturgeon
#setwd("/home/mwbreece/GAMM/plot/img")
p <- predict(env_stack, GAM$gam, filename="file4.img", type="response", fun=predict, overwrite=T, na.rm=T)
gf_p<-fliplr(t(as.matrix(p))) ##gap filled predictions
p[] <- replace(p[], p[]>0.1, 0.1)
#######categories
#setwd("/home/mwbreece/GAMM/BAM")
load("raster_masks.Rdata")
load("lon_lat_nefop.Rdata")
load("nefop_grid.Rdata")
ind1<-which(sub_lon == min(lon))
ind2<- which(sub_lon == max(lon))
ind3<- which(sub_lat == min(lat))
ind4<- which(sub_lat == max(lat))
nefop_grid[ind1:ind2, ind3:ind4]<- fliplr(t(as.matrix(p)))
p<- flip(raster(t(nefop_grid)),2)
p.lt5_up_mask <- raster::mask(p, d.lt5_up_mask)
p.5.10_up_mask <- raster::mask(p, d.5.10_up_mask)
p.10.15_up_mask <- raster::mask(p, d.10.15_up_mask)
p.gt15_up_mask <- raster::mask(p, d.gt15_up_mask)
p.lt5_mid_mask <- raster::mask(p, d.lt5_mid_mask)
p.5.10_mid_mask <- raster::mask(p, d.5.10_mid_mask)
p.10.15_mid_mask <- raster::mask(p, d.10.15_mid_mask)
p.gt15_mid_mask <- raster::mask(p, d.gt15_mid_mask)
p.lt5_low_mask <- raster::mask(p, d.lt5_low_mask)
p.5.10_low_mask <- raster::mask(p, d.5.10_low_mask)
p.10.15_low_mask <- raster::mask(p, d.10.15_low_mask)
p.gt15_low_mask <- raster::mask(p, d.gt15_low_mask)
p.lt5_river_mask <- raster::mask(p, d.lt5_river_mask)
p.5.10_river_mask <- raster::mask(p, d.5.10_river_mask)
p.10.15_river_mask <- raster::mask(p, d.10.15_river_mask)
p.gt15_river_mask <- raster::mask(p, d.gt15_river_mask)
p.lt5_ocean_mask <- raster::mask(p, d.lt5_ocean_mask)
p.5.10_ocean_mask <- raster::mask(p, d.5.10_ocean_mask)
p.10.15_ocean_mask <- raster::mask(p, d.10.15_ocean_mask)
p.gt15_ocean_mask <- raster::mask(p, d.gt15_ocean_mask)
p.cat_lt5_river<- replace(d.lt5_river_mask, d.lt5_river_mask <= 0, quantile(p.lt5_river_mask, probs = prob))
p.cat_5.10_river<- replace(d.5.10_river_mask, d.5.10_river_mask <= 0, quantile(p.5.10_river_mask, probs = prob))
p.cat_10.15_river<- replace(d.10.15_river_mask, d.10.15_river_mask <= 0, quantile(p.10.15_river_mask, probs = prob))
p.cat_gt15_river<- replace(d.gt15_river_mask, d.gt15_river_mask <= 0, quantile(p.gt15_river_mask, probs = prob))
p.cat_lt5_up<- replace(d.lt5_up_mask, d.lt5_up_mask <= 0, quantile(p.lt5_up_mask, probs = prob))
p.cat_5.10_up<- replace(d.5.10_up_mask, d.5.10_up_mask <= 0, quantile(p.5.10_up_mask, probs = prob))
p.cat_10.15_up<- replace(d.10.15_up_mask, d.10.15_up_mask <= 0, quantile(p.10.15_up_mask, probs = prob))
p.cat_gt15_up<- replace(d.gt15_up_mask, d.gt15_up_mask <= 0, quantile(p.gt15_up_mask, probs = prob))
p.cat_lt5_mid<- replace(d.lt5_mid_mask, d.lt5_mid_mask <= 0, quantile(p.lt5_mid_mask, probs = prob))
p.cat_5.10_mid<- replace(d.5.10_mid_mask, d.5.10_mid_mask <= 0, quantile(p.5.10_mid_mask, probs = prob))
p.cat_10.15_mid<- replace(d.10.15_mid_mask, d.10.15_mid_mask <= 0, quantile(p.10.15_mid_mask, probs = prob))
p.cat_gt15_mid<- replace(d.gt15_mid_mask, d.gt15_mid_mask <= 0, quantile(p.gt15_mid_mask, probs = prob))
p.cat_lt5_low<- replace(d.lt5_low_mask, d.lt5_low_mask <= 0, quantile(p.lt5_low_mask, probs = prob))
p.cat_5.10_low<- replace(d.5.10_low_mask, d.5.10_low_mask <= 0, quantile(p.5.10_low_mask, probs = prob))
p.cat_10.15_low<- replace(d.10.15_low_mask, d.10.15_low_mask <= 0, quantile(p.10.15_low_mask, probs = prob))
p.cat_gt15_low<- replace(d.gt15_low_mask, d.gt15_low_mask <= 0, quantile(p.gt15_low_mask, probs = prob))
p.cat_lt5_ocean<- replace(d.lt5_ocean_mask, d.lt5_ocean_mask <= 0, quantile(p.lt5_ocean_mask, probs = prob))
p.cat_5.10_ocean<- replace(d.5.10_ocean_mask, d.5.10_ocean_mask <= 0, quantile(p.5.10_ocean_mask, probs = prob))
p.cat_10.15_ocean<- replace(d.10.15_ocean_mask, d.10.15_ocean_mask <= 0, quantile(p.10.15_ocean_mask, probs = prob))
p.cat_gt15_ocean<- replace(d.gt15_ocean_mask, d.gt15_ocean_mask <= 0, quantile(p.gt15_ocean_mask, probs = prob))
test<- merge(p.cat_lt5_river,p.cat_5.10_river,p.cat_10.15_river, p.cat_gt15_river, p.cat_lt5_up, p.cat_5.10_up, p.cat_10.15_up, p.cat_gt15_up, p.cat_lt5_mid, p.cat_5.10_mid, p.cat_10.15_mid, p.cat_gt15_mid, p.cat_lt5_low,p.cat_5.10_low, p.cat_10.15_low, p.cat_gt15_low, p.cat_lt5_ocean, p.cat_5.10_ocean, p.cat_10.15_ocean, p.cat_gt15_ocean)
gapfilled <- fliplr(t(as.matrix(test)))[ind1:ind2, ind3:ind4]
#######################future 1 day#################################
DofY <-yday(fu1_tm)
yd<- matrix(DofY, length(lon), length(lat))
ydr<- flip(raster(t(yd)),2)
sstr<- flip(raster(t(fore_sst[,,2])),2)
a_443_qaar<- flip(raster(t(fore_443[,,2])),2)
env_stack<- stack(sstr, a_443_qaar, Depthr, ydr) #sturgeon
names(env_stack)<- c("sst", "a_443_qaa", "Depth", "yd") #sturgeon
#setwd("/home/mwbreece/GAMM/plot/img")
p <- predict(env_stack, GAM$gam, filename="file4.img", type="response", fun=predict, overwrite=T, na.rm=T)
fu1_p<-fliplr(t(as.matrix(p))) ##predictions for 1 day in future
p[] <- replace(p[], p[]>0.1, 0.1)
#######categories 1day future
#setwd("/home/mwbreece/GAMM/BAM")
#already loaded don't need to do again...
#load("raster_masks.Rdata")
#load("lon_lat_nefop.Rdata")
#load("nefop_grid.Rdata")
ind1<-which(sub_lon == min(lon))
ind2<- which(sub_lon == max(lon))
ind3<- which(sub_lat == min(lat))
ind4<- which(sub_lat == max(lat))
nefop_grid[ind1:ind2, ind3:ind4]<- fliplr(t(as.matrix(p)))
p<- flip(raster(t(nefop_grid)),2)
p.lt5_up_mask <- raster::mask(p, d.lt5_up_mask)
p.5.10_up_mask <- raster::mask(p, d.5.10_up_mask)
p.10.15_up_mask <- raster::mask(p, d.10.15_up_mask)
p.gt15_up_mask <- raster::mask(p, d.gt15_up_mask)
p.lt5_mid_mask <- raster::mask(p, d.lt5_mid_mask)
p.5.10_mid_mask <- raster::mask(p, d.5.10_mid_mask)
p.10.15_mid_mask <- raster::mask(p, d.10.15_mid_mask)
p.gt15_mid_mask <- raster::mask(p, d.gt15_mid_mask)
p.lt5_low_mask <- raster::mask(p, d.lt5_low_mask)
p.5.10_low_mask <- raster::mask(p, d.5.10_low_mask)
p.10.15_low_mask <- raster::mask(p, d.10.15_low_mask)
p.gt15_low_mask <- raster::mask(p, d.gt15_low_mask)
p.lt5_river_mask <- raster::mask(p, d.lt5_river_mask)
p.5.10_river_mask <- raster::mask(p, d.5.10_river_mask)
p.10.15_river_mask <- raster::mask(p, d.10.15_river_mask)
p.gt15_river_mask <- raster::mask(p, d.gt15_river_mask)
p.lt5_ocean_mask <- raster::mask(p, d.lt5_ocean_mask)
p.5.10_ocean_mask <- raster::mask(p, d.5.10_ocean_mask)
p.10.15_ocean_mask <- raster::mask(p, d.10.15_ocean_mask)
p.gt15_ocean_mask <- raster::mask(p, d.gt15_ocean_mask)
p.cat_lt5_river<- replace(d.lt5_river_mask, d.lt5_river_mask <= 0, quantile(p.lt5_river_mask, probs = prob))
p.cat_5.10_river<- replace(d.5.10_river_mask, d.5.10_river_mask <= 0, quantile(p.5.10_river_mask, probs = prob))
p.cat_10.15_river<- replace(d.10.15_river_mask, d.10.15_river_mask <= 0, quantile(p.10.15_river_mask, probs = prob))
p.cat_gt15_river<- replace(d.gt15_river_mask, d.gt15_river_mask <= 0, quantile(p.gt15_river_mask, probs = prob))
p.cat_lt5_up<- replace(d.lt5_up_mask, d.lt5_up_mask <= 0, quantile(p.lt5_up_mask, probs = prob))
p.cat_5.10_up<- replace(d.5.10_up_mask, d.5.10_up_mask <= 0, quantile(p.5.10_up_mask, probs = prob))
p.cat_10.15_up<- replace(d.10.15_up_mask, d.10.15_up_mask <= 0, quantile(p.10.15_up_mask, probs = prob))
p.cat_gt15_up<- replace(d.gt15_up_mask, d.gt15_up_mask <= 0, quantile(p.gt15_up_mask, probs = prob))
p.cat_lt5_mid<- replace(d.lt5_mid_mask, d.lt5_mid_mask <= 0, quantile(p.lt5_mid_mask, probs = prob))
p.cat_5.10_mid<- replace(d.5.10_mid_mask, d.5.10_mid_mask <= 0, quantile(p.5.10_mid_mask, probs = prob))
p.cat_10.15_mid<- replace(d.10.15_mid_mask, d.10.15_mid_mask <= 0, quantile(p.10.15_mid_mask, probs = prob))
p.cat_gt15_mid<- replace(d.gt15_mid_mask, d.gt15_mid_mask <= 0, quantile(p.gt15_mid_mask, probs = prob))
p.cat_lt5_low<- replace(d.lt5_low_mask, d.lt5_low_mask <= 0, quantile(p.lt5_low_mask, probs = prob))
p.cat_5.10_low<- replace(d.5.10_low_mask, d.5.10_low_mask <= 0, quantile(p.5.10_low_mask, probs = prob))
p.cat_10.15_low<- replace(d.10.15_low_mask, d.10.15_low_mask <= 0, quantile(p.10.15_low_mask, probs = prob))
p.cat_gt15_low<- replace(d.gt15_low_mask, d.gt15_low_mask <= 0, quantile(p.gt15_low_mask, probs = prob))
p.cat_lt5_ocean<- replace(d.lt5_ocean_mask, d.lt5_ocean_mask <= 0, quantile(p.lt5_ocean_mask, probs = prob))
p.cat_5.10_ocean<- replace(d.5.10_ocean_mask, d.5.10_ocean_mask <= 0, quantile(p.5.10_ocean_mask, probs = prob))
p.cat_10.15_ocean<- replace(d.10.15_ocean_mask, d.10.15_ocean_mask <= 0, quantile(p.10.15_ocean_mask, probs = prob))
p.cat_gt15_ocean<- replace(d.gt15_ocean_mask, d.gt15_ocean_mask <= 0, quantile(p.gt15_ocean_mask, probs = prob))
test<- merge(p.cat_lt5_river,p.cat_5.10_river,p.cat_10.15_river, p.cat_gt15_river, p.cat_lt5_up, p.cat_5.10_up, p.cat_10.15_up, p.cat_gt15_up, p.cat_lt5_mid, p.cat_5.10_mid, p.cat_10.15_mid, p.cat_gt15_mid, p.cat_lt5_low,p.cat_5.10_low, p.cat_10.15_low, p.cat_gt15_low, p.cat_lt5_ocean, p.cat_5.10_ocean, p.cat_10.15_ocean, p.cat_gt15_ocean)
fu1_day<-fliplr(t(as.matrix(test)))[ind1:ind2, ind3:ind4]
#####future 2 days
DofY <-yday(fu2_tm)
yd<- matrix(DofY, length(lon), length(lat))
ydr<- flip(raster(t(yd)),2)
sstr<- flip(raster(t(fore_sst[,,3])),2)
a_443_qaar<- flip(raster(t(fore_443[,,3])),2)
env_stack<- stack(sstr, a_443_qaar, Depthr, ydr) #sturgeon
names(env_stack)<- c("sst", "a_443_qaa", "Depth", "yd") #sturgeon
#setwd("/home/mwbreece/GAMM/plot/img")
p <- predict(env_stack, GAM$gam, filename="file4.img", type="response", fun=predict, overwrite=T, na.rm=T)
fu2_p<-fliplr(t(as.matrix(p))) ##predictions for 2 days in future
p[] <- replace(p[], p[]>0.1, 0.1)
#######categories 2day future
#setwd("/home/mwbreece/GAMM/BAM")
#load("raster_masks.Rdata")
#load("lon_lat_nefop.Rdata")
#load("nefop_grid.Rdata")
ind1<-which(sub_lon == min(lon))
ind2<- which(sub_lon == max(lon))
ind3<- which(sub_lat == min(lat))
ind4<- which(sub_lat == max(lat))
nefop_grid[ind1:ind2, ind3:ind4]<- fliplr(t(as.matrix(p)))
p<- flip(raster(t(nefop_grid)),2)
p.lt5_up_mask <- raster::mask(p, d.lt5_up_mask)
p.5.10_up_mask <- raster::mask(p, d.5.10_up_mask)
p.10.15_up_mask <- raster::mask(p, d.10.15_up_mask)
p.gt15_up_mask <- raster::mask(p, d.gt15_up_mask)
p.lt5_mid_mask <- raster::mask(p, d.lt5_mid_mask)
p.5.10_mid_mask <- raster::mask(p, d.5.10_mid_mask)
p.10.15_mid_mask <- raster::mask(p, d.10.15_mid_mask)
p.gt15_mid_mask <- raster::mask(p, d.gt15_mid_mask)
p.lt5_low_mask <- raster::mask(p, d.lt5_low_mask)
p.5.10_low_mask <- raster::mask(p, d.5.10_low_mask)
p.10.15_low_mask <- raster::mask(p, d.10.15_low_mask)
p.gt15_low_mask <- raster::mask(p, d.gt15_low_mask)
p.lt5_river_mask <- raster::mask(p, d.lt5_river_mask)
p.5.10_river_mask <- raster::mask(p, d.5.10_river_mask)
p.10.15_river_mask <- raster::mask(p, d.10.15_river_mask)
p.gt15_river_mask <- raster::mask(p, d.gt15_river_mask)
p.lt5_ocean_mask <- raster::mask(p, d.lt5_ocean_mask)
p.5.10_ocean_mask <- raster::mask(p, d.5.10_ocean_mask)
p.10.15_ocean_mask <- raster::mask(p, d.10.15_ocean_mask)
p.gt15_ocean_mask <- raster::mask(p, d.gt15_ocean_mask)
p.cat_lt5_river<- replace(d.lt5_river_mask, d.lt5_river_mask <= 0, quantile(p.lt5_river_mask, probs = prob))
p.cat_5.10_river<- replace(d.5.10_river_mask, d.5.10_river_mask <= 0, quantile(p.5.10_river_mask, probs = prob))
p.cat_10.15_river<- replace(d.10.15_river_mask, d.10.15_river_mask <= 0, quantile(p.10.15_river_mask, probs = prob))
p.cat_gt15_river<- replace(d.gt15_river_mask, d.gt15_river_mask <= 0, quantile(p.gt15_river_mask, probs = prob))
p.cat_lt5_up<- replace(d.lt5_up_mask, d.lt5_up_mask <= 0, quantile(p.lt5_up_mask, probs = prob))
p.cat_5.10_up<- replace(d.5.10_up_mask, d.5.10_up_mask <= 0, quantile(p.5.10_up_mask, probs = prob))
p.cat_10.15_up<- replace(d.10.15_up_mask, d.10.15_up_mask <= 0, quantile(p.10.15_up_mask, probs = prob))
p.cat_gt15_up<- replace(d.gt15_up_mask, d.gt15_up_mask <= 0, quantile(p.gt15_up_mask, probs = prob))
p.cat_lt5_mid<- replace(d.lt5_mid_mask, d.lt5_mid_mask <= 0, quantile(p.lt5_mid_mask, probs = prob))
p.cat_5.10_mid<- replace(d.5.10_mid_mask, d.5.10_mid_mask <= 0, quantile(p.5.10_mid_mask, probs = prob))
p.cat_10.15_mid<- replace(d.10.15_mid_mask, d.10.15_mid_mask <= 0, quantile(p.10.15_mid_mask, probs = prob))
p.cat_gt15_mid<- replace(d.gt15_mid_mask, d.gt15_mid_mask <= 0, quantile(p.gt15_mid_mask, probs = prob))
p.cat_lt5_low<- replace(d.lt5_low_mask, d.lt5_low_mask <= 0, quantile(p.lt5_low_mask, probs = prob))
p.cat_5.10_low<- replace(d.5.10_low_mask, d.5.10_low_mask <= 0, quantile(p.5.10_low_mask, probs = prob))
p.cat_10.15_low<- replace(d.10.15_low_mask, d.10.15_low_mask <= 0, quantile(p.10.15_low_mask, probs = prob))
p.cat_gt15_low<- replace(d.gt15_low_mask, d.gt15_low_mask <= 0, quantile(p.gt15_low_mask, probs = prob))
p.cat_lt5_ocean<- replace(d.lt5_ocean_mask, d.lt5_ocean_mask <= 0, quantile(p.lt5_ocean_mask, probs = prob))
p.cat_5.10_ocean<- replace(d.5.10_ocean_mask, d.5.10_ocean_mask <= 0, quantile(p.5.10_ocean_mask, probs = prob))
p.cat_10.15_ocean<- replace(d.10.15_ocean_mask, d.10.15_ocean_mask <= 0, quantile(p.10.15_ocean_mask, probs = prob))
p.cat_gt15_ocean<- replace(d.gt15_ocean_mask, d.gt15_ocean_mask <= 0, quantile(p.gt15_ocean_mask, probs = prob))
test<- merge(p.cat_lt5_river,p.cat_5.10_river,p.cat_10.15_river, p.cat_gt15_river, p.cat_lt5_up, p.cat_5.10_up, p.cat_10.15_up, p.cat_gt15_up, p.cat_lt5_mid, p.cat_5.10_mid, p.cat_10.15_mid, p.cat_gt15_mid, p.cat_lt5_low,p.cat_5.10_low, p.cat_10.15_low, p.cat_gt15_low, p.cat_lt5_ocean, p.cat_5.10_ocean, p.cat_10.15_ocean, p.cat_gt15_ocean)
fu2_day<- fliplr(t(as.matrix(test)))[ind1:ind2, ind3:ind4]
################## future 3 day #################
DofY <-yday(fu3_tm)
yd<- matrix(DofY, length(lon), length(lat))
ydr<- flip(raster(t(yd)),2)
sstr<- flip(raster(t(fore_sst[,,4])),2)
a_443_qaar<- flip(raster(t(fore_443[,,4])),2)
env_stack<- stack(sstr, a_443_qaar, Depthr, ydr) #sturgeon
names(env_stack)<- c("sst", "a_443_qaa", "Depth", "yd") #sturgeon
#setwd("/home/mwbreece/GAMM/plot/img")
p <- predict(env_stack, GAM$gam, filename="file4.img", type="response", fun=predict, overwrite=T, na.rm=T)
fu3_p<-fliplr(t(as.matrix(p))) ##predictions for 3 days in future
p[] <- replace(p[], p[]>0.1, 0.1)
#######categories 3day future
#setwd("/home/mwbreece/GAMM/BAM")
#load("raster_masks.Rdata")
#load("lon_lat_nefop.Rdata")
#load("nefop_grid.Rdata")
ind1<-which(sub_lon == min(lon))
ind2<- which(sub_lon == max(lon))
ind3<- which(sub_lat == min(lat))
ind4<- which(sub_lat == max(lat))
nefop_grid[ind1:ind2, ind3:ind4]<- fliplr(t(as.matrix(p)))
p<- flip(raster(t(nefop_grid)),2)
p.lt5_up_mask <- raster::mask(p, d.lt5_up_mask)
p.5.10_up_mask <- raster::mask(p, d.5.10_up_mask)
p.10.15_up_mask <- raster::mask(p, d.10.15_up_mask)
p.gt15_up_mask <- raster::mask(p, d.gt15_up_mask)
p.lt5_mid_mask <- raster::mask(p, d.lt5_mid_mask)
p.5.10_mid_mask <- raster::mask(p, d.5.10_mid_mask)
p.10.15_mid_mask <- raster::mask(p, d.10.15_mid_mask)
p.gt15_mid_mask <- raster::mask(p, d.gt15_mid_mask)
p.lt5_low_mask <- raster::mask(p, d.lt5_low_mask)
p.5.10_low_mask <- raster::mask(p, d.5.10_low_mask)
p.10.15_low_mask <- raster::mask(p, d.10.15_low_mask)
p.gt15_low_mask <- raster::mask(p, d.gt15_low_mask)
p.lt5_river_mask <- raster::mask(p, d.lt5_river_mask)
p.5.10_river_mask <- raster::mask(p, d.5.10_river_mask)
p.10.15_river_mask <- raster::mask(p, d.10.15_river_mask)
p.gt15_river_mask <- raster::mask(p, d.gt15_river_mask)
p.lt5_ocean_mask <- raster::mask(p, d.lt5_ocean_mask)
p.5.10_ocean_mask <- raster::mask(p, d.5.10_ocean_mask)
p.10.15_ocean_mask <- raster::mask(p, d.10.15_ocean_mask)
p.gt15_ocean_mask <- raster::mask(p, d.gt15_ocean_mask)
p.cat_lt5_river<- replace(d.lt5_river_mask, d.lt5_river_mask <= 0, quantile(p.lt5_river_mask, probs = prob))
p.cat_5.10_river<- replace(d.5.10_river_mask, d.5.10_river_mask <= 0, quantile(p.5.10_river_mask, probs = prob))
p.cat_10.15_river<- replace(d.10.15_river_mask, d.10.15_river_mask <= 0, quantile(p.10.15_river_mask, probs = prob))
p.cat_gt15_river<- replace(d.gt15_river_mask, d.gt15_river_mask <= 0, quantile(p.gt15_river_mask, probs = prob))
p.cat_lt5_up<- replace(d.lt5_up_mask, d.lt5_up_mask <= 0, quantile(p.lt5_up_mask, probs = prob))
p.cat_5.10_up<- replace(d.5.10_up_mask, d.5.10_up_mask <= 0, quantile(p.5.10_up_mask, probs = prob))
p.cat_10.15_up<- replace(d.10.15_up_mask, d.10.15_up_mask <= 0, quantile(p.10.15_up_mask, probs = prob))
p.cat_gt15_up<- replace(d.gt15_up_mask, d.gt15_up_mask <= 0, quantile(p.gt15_up_mask, probs = prob))
p.cat_lt5_mid<- replace(d.lt5_mid_mask, d.lt5_mid_mask <= 0, quantile(p.lt5_mid_mask, probs = prob))
p.cat_5.10_mid<- replace(d.5.10_mid_mask, d.5.10_mid_mask <= 0, quantile(p.5.10_mid_mask, probs = prob))
p.cat_10.15_mid<- replace(d.10.15_mid_mask, d.10.15_mid_mask <= 0, quantile(p.10.15_mid_mask, probs = prob))
p.cat_gt15_mid<- replace(d.gt15_mid_mask, d.gt15_mid_mask <= 0, quantile(p.gt15_mid_mask, probs = prob))
p.cat_lt5_low<- replace(d.lt5_low_mask, d.lt5_low_mask <= 0, quantile(p.lt5_low_mask, probs = prob))
p.cat_5.10_low<- replace(d.5.10_low_mask, d.5.10_low_mask <= 0, quantile(p.5.10_low_mask, probs = prob))
p.cat_10.15_low<- replace(d.10.15_low_mask, d.10.15_low_mask <= 0, quantile(p.10.15_low_mask, probs = prob))
p.cat_gt15_low<- replace(d.gt15_low_mask, d.gt15_low_mask <= 0, quantile(p.gt15_low_mask, probs = prob))
p.cat_lt5_ocean<- replace(d.lt5_ocean_mask, d.lt5_ocean_mask <= 0, quantile(p.lt5_ocean_mask, probs = prob))
p.cat_5.10_ocean<- replace(d.5.10_ocean_mask, d.5.10_ocean_mask <= 0, quantile(p.5.10_ocean_mask, probs = prob))
p.cat_10.15_ocean<- replace(d.10.15_ocean_mask, d.10.15_ocean_mask <= 0, quantile(p.10.15_ocean_mask, probs = prob))
p.cat_gt15_ocean<- replace(d.gt15_ocean_mask, d.gt15_ocean_mask <= 0, quantile(p.gt15_ocean_mask, probs = prob))
test<- merge(p.cat_lt5_river,p.cat_5.10_river,p.cat_10.15_river, p.cat_gt15_river, p.cat_lt5_up, p.cat_5.10_up, p.cat_10.15_up, p.cat_gt15_up, p.cat_lt5_mid, p.cat_5.10_mid, p.cat_10.15_mid, p.cat_gt15_mid, p.cat_lt5_low,p.cat_5.10_low, p.cat_10.15_low, p.cat_gt15_low, p.cat_lt5_ocean, p.cat_5.10_ocean, p.cat_10.15_ocean, p.cat_gt15_ocean)
fu3_day<- fliplr(t(as.matrix(test)))[ind1:ind2, ind3:ind4]
#layer 1 is gapfilled
#layer 2 is fu1_day
#layer 3 is fu2_day
#layer 4 is fu3_day
#day is last(tm)
#longitude is sub_lon
#latitude is sub_lat
########
## fixed to here so that lat and lon are the same as the original netcdf
tm_dim <- ncdim_def("time", "Days since 1970-01-01 00:00:00", c(as.integer(last(tm))), unlim = TRUE)
mdltm_dim <- ncdim_def("forecast_time", "Days since 1970-01-01 00:00:00", c(last(tm), last(tm)+1, last(tm)+2, last(tm)+3))
#cat_lon_dim <- ncdim_def("categories_lon", "degrees_east", lon)
#cat_lat_dim <- ncdim_def("categories_lat", "degrees_north", lat)
lon_dim <- ncdim_def("lon", "degrees_east", lon)
lat_dim <- ncdim_def("lat", "degrees_north", lat)
var_defs = list()
var_defs[['sturgeon_predictions']] <- ncvar_def('Sturgeon_Predictions', 'Probability of Sturgeon Presence', list(lon_dim, lat_dim, mdltm_dim, tm_dim), -999, longname = 'Sturgeon Model Predictions', prec="single", compression=5)
var_defs[['sturgeon_categories']] <- ncvar_def('Sturgeon_Categories', 'Sturgeon Categories', list(lon_dim, lat_dim, mdltm_dim, tm_dim), -999, longname = 'Sturgeon Categories', prec="single", compression=5)
var_defs[['data_status']] <- ncvar_def(name='data_status', units='NA', list(tm_dim), -999, longname = 'Data Status Flag', prec="integer", compression=5)
print("start nc file")
newnc <- nc_create(sprintf('%s/model_predictions.%s.nc4',output_dir, cur_tm), var_defs, force_v4=TRUE)
ncvar_put(newnc, var_defs[['sturgeon_predictions']], replace(gf_p, is.na(gf_p), -999), start=c(1,1,1, 1), count=c(lon_dim$len, lat_dim$len, 1, 1))
ncvar_put(newnc, var_defs[['sturgeon_predictions']], replace(fu1_p, is.na(fu1_p), -999), start=c(1,1,2, 1), count=c(lon_dim$len, lat_dim$len, 1, 1))
ncvar_put(newnc, var_defs[['sturgeon_predictions']], replace(fu2_p, is.na(fu2_p), -999), start=c(1,1,3, 1), count=c(lon_dim$len, lat_dim$len, 1, 1))
ncvar_put(newnc, var_defs[['sturgeon_predictions']], replace(fu3_p, is.na(fu3_p), -999), start=c(1,1,4, 1), count=c(lon_dim$len, lat_dim$len, 1, 1))
ncvar_put(newnc, var_defs[['sturgeon_categories']], replace(gapfilled, is.na(gapfilled), -999), start=c(1,1,1, 1), count=c(lon_dim$len, lat_dim$len, 1, 1))
ncvar_put(newnc, var_defs[['sturgeon_categories']], replace(fu1_day, is.na(fu1_day), -999), start=c(1,1,2, 1), count=c(lon_dim$len, lat_dim$len, 1, 1))
ncvar_put(newnc, var_defs[['sturgeon_categories']], replace(fu2_day, is.na(fu2_day), -999), start=c(1,1,3, 1), count=c(lon_dim$len, lat_dim$len, 1, 1))
ncvar_put(newnc, var_defs[['sturgeon_categories']], replace(fu3_day, is.na(fu3_day), -999), start=c(1,1,4, 1), count=c(lon_dim$len, lat_dim$len, 1, 1))
ncatt_put(newnc, 'data_status', 'Forecast_Status_Flag', '1 indicates the data may be degraded due to a lack of satellite coverage in the 3 days leading up to the forecast. Caution should be used')
ncvar_put(newnc, var_defs[['data_status']], data_status)
nc_close(newnc)
|
\name{fire}
\alias{fire}
\docType{data}
\title{
Fire data from Yellowstone National Park
}
\description{
Fires from 1988 constituted the largest conflagration in the history of Yellowstone National Park. This dataframe lists burned areas for ten Yellowstone stream catchments (Robinson et al. 1994).
}
\usage{data(fire)}
\format{
A data frame with 10 observations on the following 2 variables.
\describe{
\item{\code{fire}}{Burn area in, in hectares\eqn{^2}.}
\item{\code{stream}}{A factor with levels \code{Blacktail} \code{Cache} \code{EF.Blacktail} \code{Fairy} \code{Hellroaring} \code{Iron.Springs} \code{Pebble} \code{Rose} \code{SF.Cache} \code{Twin}}
}
}
\source{
Robinson, C. T., Minshall, G. W., and S. R. Rushforth (1994) The effects of the 1988
wildfires on diatom assemblages in the streams of Yellowstone National Park. Technical Report NPS/NRYELL/NRTR-93/XX.
}
\keyword{datasets}
| /man/fire.rd | no_license | cran/asbio | R | false | false | 946 | rd | \name{fire}
\alias{fire}
\docType{data}
\title{
Fire data from Yellowstone National Park
}
\description{
Fires from 1988 constituted the largest conflagration in the history of Yellowstone National Park. This dataframe lists burned areas for ten Yellowstone stream catchments (Robinson et al. 1994).
}
\usage{data(fire)}
\format{
A data frame with 10 observations on the following 2 variables.
\describe{
\item{\code{fire}}{Burn area in, in hectares\eqn{^2}.}
\item{\code{stream}}{A factor with levels \code{Blacktail} \code{Cache} \code{EF.Blacktail} \code{Fairy} \code{Hellroaring} \code{Iron.Springs} \code{Pebble} \code{Rose} \code{SF.Cache} \code{Twin}}
}
}
\source{
Robinson, C. T., Minshall, G. W., and S. R. Rushforth (1994) The effects of the 1988
wildfires on diatom assemblages in the streams of Yellowstone National Park. Technical Report NPS/NRYELL/NRTR-93/XX.
}
\keyword{datasets}
|
.onLoad <- function(libname, pkgname){
if (requireNamespace("pkgload", quietly = TRUE)) {
if (pkgload::is_dev_package("Ryacas")) {
# Package was loaded using pkgload/devtools
#path <- pkgload:::shim_system.file(package = "Ryacas", "yacas")
# Uses pkgload:::shim_system.file because pkgload overwrites system.file
path <- system.file(package = "Ryacas", "yacas")
# Force initialise so that a new instance is created.
# Especially useful during development with e.g. devtools::load_all().
yacas_init_force(path)
}
}
}
| /R/zzz.R | no_license | alexewd/ryacas | R | false | false | 577 | r | .onLoad <- function(libname, pkgname){
if (requireNamespace("pkgload", quietly = TRUE)) {
if (pkgload::is_dev_package("Ryacas")) {
# Package was loaded using pkgload/devtools
#path <- pkgload:::shim_system.file(package = "Ryacas", "yacas")
# Uses pkgload:::shim_system.file because pkgload overwrites system.file
path <- system.file(package = "Ryacas", "yacas")
# Force initialise so that a new instance is created.
# Especially useful during development with e.g. devtools::load_all().
yacas_init_force(path)
}
}
}
|
# NOTE: edit this variable to point to the right path for the data file
path <- "~/Downloads/household_power_consumption.txt"
data_file <- file(path)
# read in the first line manually to get the headers...
headers <- unlist(strsplit(readLines(data_file, n=1), split = ";"))
close(data_file)
# ...because we skip several rows to only load the two days of data we really want
household_data <-
read.table(path, sep = ";", col.names = headers, skip = 66638,
nrows = 2880, na.strings = c("?"))
# Now write a histogram to disk as a PNG image file
png("plot1.png")
hist(household_data$Global_active_power, col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)")
dev.off()
| /plot1.R | no_license | alexvollmer/ExData_Plotting1 | R | false | false | 727 | r | # NOTE: edit this variable to point to the right path for the data file
path <- "~/Downloads/household_power_consumption.txt"
data_file <- file(path)
# read in the first line manually to get the headers...
headers <- unlist(strsplit(readLines(data_file, n=1), split = ";"))
close(data_file)
# ...because we skip several rows to only load the two days of data we really want
household_data <-
read.table(path, sep = ";", col.names = headers, skip = 66638,
nrows = 2880, na.strings = c("?"))
# Now write a histogram to disk as a PNG image file
png("plot1.png")
hist(household_data$Global_active_power, col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)")
dev.off()
|
# This code converts the MAIAC aqua hdf files first into tiff files and finaly into csv file
rm(list=ls())
library(raster)
library(rgdal)
library(rgeos)
library(gdalUtils)
library(plyr)
library(leaflet)
library(magrittr)
library(sp)
# World
# world = getMap(resolution = "high")
# pol = world[!is.na(world$NAME) & world$NAME == "Switzerland", ]
# pol = spTransform(pol, "+proj=utm +zone=32 +ellps=WGS84 +datum=WGS84 +units=m +no_defs")
# pol = gBuffer(pol, width = 10000)
# pol = spTransform(pol, "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
hdf_to_tif = function(path, n) {
sds = get_subdatasets(path)
x = strsplit(sds[n], ":")[[1]]
x = paste0(x[1], ":", x[2], ':"', x[3], '":', x[4], ":", x[5], ":", x[6])
system(
paste0(
"gdal_translate -of GTiff ",
"\"",
x,
"\" ",
gsub(".hdf", paste0("_", n, ".tif"), path, fixed = TRUE)
)
)
raster(gsub(".hdf", paste0("_", n, ".tif"), path, fixed = TRUE)) # Convert to raster
}
pol=readOGR(dsn="N:/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Qgis/General/Project_border/Project_aoi","Project_border_latlon")
pol = spTransform(pol, "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
# Input Directories- for new MAIAC data (08.2016)
aod_dir = "N:/Projects/P028.IL.Israel.MAIAC.PM.V2/raw/MAIAC_data_082016"
ref_grid = "N:/Projects/P028.IL.Israel.MAIAC.PM.V2/raw/MAIAC_data_082016/MAIACLatlon.h03v03.hdf"
#Input Directories- for old MAIAC data (2014)
aod_dir = "N:/Projects/P028.IL.Israel.MAIAC.PM.V2/raw/MAIAC_data_082016/old_MAIAC_data"
ref_grid = "N:/Projects/P028.IL.Israel.MAIAC.PM.V2/raw/MAIAC_data_082016/old_MAIAC_data/MAIACLatlon.h06v07.hdf"
###################################################################
# STEP 1 - Reading HDF files
# t = "h01v02"
# Read 'lon' and 'lat' rasters from static grid
# grid = get_subdatasets(ref_grid)
# lon = grid[2] %>% readGDAL %>% raster
# lat = grid[1] %>% readGDAL %>% raster
sds = get_subdatasets(ref_grid) # Read current file
lon = hdf_to_tif(ref_grid, 2)
lat = hdf_to_tif(ref_grid, 1)
# Creare 'row' and 'col' rasters
row = lon
row[] = rowFromCell(lon, 1:ncell(lon))
col = lon
col[] = colFromCell(lon, 1:ncell(lon))
# Combine to multi-band raster
grid = stack(row, col, lon, lat)
names(grid) = c("row", "col", "lon", "lat")
# Convert to data.frame
grid = as.data.frame(grid)
# Spatial subset
coordinates(grid) = ~ lon + lat
proj4string(grid) = "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
grid = grid[pol, ]
#####################
# Write grid table
# leaflet() %>% addTiles %>% addPolygons(data = gConvexHull(grid[sample(1:nrow(grid), 1000), ]))
# writeOGR(grid, ".", "grid_h01v02", driver = "ESRI Shapefile")
#####################
# Convert to data.frame
grid = as.data.frame(grid)
for(year in 2011) {
# Read HDF files list from AOD directory
setwd(file.path(aod_dir, year))
files = list.files(pattern = "MAIACAAOT.h06v07.*\\.hdf$", recursive = TRUE) # note that MAIACTAOT is for TERRA data and MAIACAAOT is for AQUA data
result = list()
for(f in files) {
# Read data
sds = get_subdatasets(f)
#
Optical_Depth= hdf_to_tif(f, grep("grid1km:Optical_Depth", sds))
AOT_Uncertainty = hdf_to_tif(f, grep("grid1km:AOT_Uncertainty", sds))
# Optical_Depth_Land = sds[grepl("Optical_Depth_Land", sds)] %>% readGDAL %>% raster
# AOT_Uncertainty = sds[grepl("AOT_Uncertainty", sds)] %>% readGDAL %>% raster
# AOT_QA = sds[grepl("AOT_QA", sds)] %>% readGDAL %>% raster
row = Optical_Depth
row[] = rowFromCell(Optical_Depth, 1:ncell(Optical_Depth))
col = Optical_Depth
col[] = colFromCell(Optical_Depth, 1:ncell(Optical_Depth))
r = stack(row, col, Optical_Depth, AOT_Uncertainty)
names(r) = c("row", "col", "Optical_Depth", "AOT_Uncertainty")
r = as.data.frame(r)
# Join with 'grid'
r = join(r, grid, c("row", "col"))
r = r[!is.na(r$lon) & !is.na(r$lat), ]
# r = r[indices, ]
# Add filename
r$date =
f %>%
strsplit("\\.") %>%
sapply("[", 3) %>%
substr(1, 7) %>%
as.Date(format = "%Y%j")
# Combine results
result[[f]] = r
}
result = do.call(rbind.fill, result)
}
write.csv(
result,
paste0("N:/Projects/P028.IL.Israel.MAIAC.PM.V2/work/MAIAC_data_082016/old_MAIAC_data/2011/MAIACAAOT_Israel_", year, ".csv"),
row.names = FALSE
)
aqua=read.csv("N:/Projects/P028.IL.Israel.MAIAC.PM.V2/raw/MAIAC_data_082016/old_MAIAC_data/2011/MAIACAAOT_Israel_2011.csv")
aqua$aodid<-paste(formatC(round(aqua$lon,3),format='f',3),formatC(round(aqua$lat,3),format='f',3),sep="-")
aqua<-as.data.table(aqua)
aqua_grid=aqua[!duplicated(aodid)]
aqua_grid<-as.data.frame(aqua_grid)
setwd("N:/Projects/P028.IL.Israel.MAIAC.PM.V2/raw/MAIAC_data_082016/old_MAIAC_data/2011")
write.csv(aqua_grid,"aqua_unique_grid.csv")
###################################################################
# STEP 2 - Processing
# library(plyr)
#
# aod_dir = "/home/michael/MAIAC_AOD_P024/2011"
# setwd(aod_dir)
#
# file =
# list.files(pattern = paste0("MAIACTAOT_", t, "_.*\\.csv$"))
#
# dat = read.csv(i, stringsAsFactors = FALSE)
#
#
# # dat$row = NULL
# # dat$col = NULL
#
# cols = c(
# "MESA_Study_Site",
# "row",
# "col",
# "lon",
# "lat",
# "date",
# "Optical_Depth_047",
# "AOT_Uncertainty",
# "AOT_QA",
# "file"
# )
# dat = dat[, cols]
#
# y =
# i %>%
# strsplit("_") %>%
# sapply("[", 3) %>%
# strsplit("\\.") %>%
# sapply("[", 1)
#
# write.csv(
# dat,
# paste0("MAIACTAOT_Switzerland_final.csv"),
# row.names = FALSE
# )
#
###################################################################
| /Archive/read_hdf_maiac_aqua_old_data.R | no_license | alexandrashtein/Model-code | R | false | false | 5,980 | r | # This code converts the MAIAC aqua hdf files first into tiff files and finaly into csv file
rm(list=ls())
library(raster)
library(rgdal)
library(rgeos)
library(gdalUtils)
library(plyr)
library(leaflet)
library(magrittr)
library(sp)
# World
# world = getMap(resolution = "high")
# pol = world[!is.na(world$NAME) & world$NAME == "Switzerland", ]
# pol = spTransform(pol, "+proj=utm +zone=32 +ellps=WGS84 +datum=WGS84 +units=m +no_defs")
# pol = gBuffer(pol, width = 10000)
# pol = spTransform(pol, "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
hdf_to_tif = function(path, n) {
sds = get_subdatasets(path)
x = strsplit(sds[n], ":")[[1]]
x = paste0(x[1], ":", x[2], ':"', x[3], '":', x[4], ":", x[5], ":", x[6])
system(
paste0(
"gdal_translate -of GTiff ",
"\"",
x,
"\" ",
gsub(".hdf", paste0("_", n, ".tif"), path, fixed = TRUE)
)
)
raster(gsub(".hdf", paste0("_", n, ".tif"), path, fixed = TRUE)) # Convert to raster
}
pol=readOGR(dsn="N:/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Qgis/General/Project_border/Project_aoi","Project_border_latlon")
pol = spTransform(pol, "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
# Input Directories- for new MAIAC data (08.2016)
aod_dir = "N:/Projects/P028.IL.Israel.MAIAC.PM.V2/raw/MAIAC_data_082016"
ref_grid = "N:/Projects/P028.IL.Israel.MAIAC.PM.V2/raw/MAIAC_data_082016/MAIACLatlon.h03v03.hdf"
#Input Directories- for old MAIAC data (2014)
aod_dir = "N:/Projects/P028.IL.Israel.MAIAC.PM.V2/raw/MAIAC_data_082016/old_MAIAC_data"
ref_grid = "N:/Projects/P028.IL.Israel.MAIAC.PM.V2/raw/MAIAC_data_082016/old_MAIAC_data/MAIACLatlon.h06v07.hdf"
###################################################################
# STEP 1 - Reading HDF files
# t = "h01v02"
# Read 'lon' and 'lat' rasters from static grid
# grid = get_subdatasets(ref_grid)
# lon = grid[2] %>% readGDAL %>% raster
# lat = grid[1] %>% readGDAL %>% raster
sds = get_subdatasets(ref_grid) # Read current file
lon = hdf_to_tif(ref_grid, 2)
lat = hdf_to_tif(ref_grid, 1)
# Creare 'row' and 'col' rasters
row = lon
row[] = rowFromCell(lon, 1:ncell(lon))
col = lon
col[] = colFromCell(lon, 1:ncell(lon))
# Combine to multi-band raster
grid = stack(row, col, lon, lat)
names(grid) = c("row", "col", "lon", "lat")
# Convert to data.frame
grid = as.data.frame(grid)
# Spatial subset
coordinates(grid) = ~ lon + lat
proj4string(grid) = "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
grid = grid[pol, ]
#####################
# Write grid table
# leaflet() %>% addTiles %>% addPolygons(data = gConvexHull(grid[sample(1:nrow(grid), 1000), ]))
# writeOGR(grid, ".", "grid_h01v02", driver = "ESRI Shapefile")
#####################
# Convert to data.frame
grid = as.data.frame(grid)
for(year in 2011) {
# Read HDF files list from AOD directory
setwd(file.path(aod_dir, year))
files = list.files(pattern = "MAIACAAOT.h06v07.*\\.hdf$", recursive = TRUE) # note that MAIACTAOT is for TERRA data and MAIACAAOT is for AQUA data
result = list()
for(f in files) {
# Read data
sds = get_subdatasets(f)
#
Optical_Depth= hdf_to_tif(f, grep("grid1km:Optical_Depth", sds))
AOT_Uncertainty = hdf_to_tif(f, grep("grid1km:AOT_Uncertainty", sds))
# Optical_Depth_Land = sds[grepl("Optical_Depth_Land", sds)] %>% readGDAL %>% raster
# AOT_Uncertainty = sds[grepl("AOT_Uncertainty", sds)] %>% readGDAL %>% raster
# AOT_QA = sds[grepl("AOT_QA", sds)] %>% readGDAL %>% raster
row = Optical_Depth
row[] = rowFromCell(Optical_Depth, 1:ncell(Optical_Depth))
col = Optical_Depth
col[] = colFromCell(Optical_Depth, 1:ncell(Optical_Depth))
r = stack(row, col, Optical_Depth, AOT_Uncertainty)
names(r) = c("row", "col", "Optical_Depth", "AOT_Uncertainty")
r = as.data.frame(r)
# Join with 'grid'
r = join(r, grid, c("row", "col"))
r = r[!is.na(r$lon) & !is.na(r$lat), ]
# r = r[indices, ]
# Add filename
r$date =
f %>%
strsplit("\\.") %>%
sapply("[", 3) %>%
substr(1, 7) %>%
as.Date(format = "%Y%j")
# Combine results
result[[f]] = r
}
result = do.call(rbind.fill, result)
}
write.csv(
result,
paste0("N:/Projects/P028.IL.Israel.MAIAC.PM.V2/work/MAIAC_data_082016/old_MAIAC_data/2011/MAIACAAOT_Israel_", year, ".csv"),
row.names = FALSE
)
aqua=read.csv("N:/Projects/P028.IL.Israel.MAIAC.PM.V2/raw/MAIAC_data_082016/old_MAIAC_data/2011/MAIACAAOT_Israel_2011.csv")
aqua$aodid<-paste(formatC(round(aqua$lon,3),format='f',3),formatC(round(aqua$lat,3),format='f',3),sep="-")
aqua<-as.data.table(aqua)
aqua_grid=aqua[!duplicated(aodid)]
aqua_grid<-as.data.frame(aqua_grid)
setwd("N:/Projects/P028.IL.Israel.MAIAC.PM.V2/raw/MAIAC_data_082016/old_MAIAC_data/2011")
write.csv(aqua_grid,"aqua_unique_grid.csv")
###################################################################
# STEP 2 - Processing
# library(plyr)
#
# aod_dir = "/home/michael/MAIAC_AOD_P024/2011"
# setwd(aod_dir)
#
# file =
# list.files(pattern = paste0("MAIACTAOT_", t, "_.*\\.csv$"))
#
# dat = read.csv(i, stringsAsFactors = FALSE)
#
#
# # dat$row = NULL
# # dat$col = NULL
#
# cols = c(
# "MESA_Study_Site",
# "row",
# "col",
# "lon",
# "lat",
# "date",
# "Optical_Depth_047",
# "AOT_Uncertainty",
# "AOT_QA",
# "file"
# )
# dat = dat[, cols]
#
# y =
# i %>%
# strsplit("_") %>%
# sapply("[", 3) %>%
# strsplit("\\.") %>%
# sapply("[", 1)
#
# write.csv(
# dat,
# paste0("MAIACTAOT_Switzerland_final.csv"),
# row.names = FALSE
# )
#
###################################################################
|
#read in all the sim data files- this was done in pieces to keep it computationally doable
#get a list of file names:
setwd("simresults/")
file_list<-list.files()
#loop through the files, merge 'em together
simulation.results <- do.call("rbind",lapply(file_list,
FUN=function(files){read.csv(files, header=TRUE)}))
setwd("..")
#Encoding results
#All scripted breaks found =1
#extra breaks found = 2
#missing breaks (when more than one break in sim data) =3
#right number of breaks but not all match =4
# total failure to find correct breaks =0
#because our function was throwing an error about results as factors, we encoded
#them numerically as above. But before we do any operations on it we should
#at least convert the outcome integers to factors so we don't accidentally
#do any numeric operations on them.
#note the other columns of the data frame are also basically ordinal/catagorical, but
#we want to retain their order for plotting purposes, so we'll just proceed
#with caution there
simulation.results$victory<-as.factor(simulation.results$victory)
#now we need to take th data produced and summarize it for plotting
library(plyr)
#count number of times a unique observation was recorded
summarize.results<-count(simulation.results,
c("Nyears", "startPop", "noise", "nbreaksin",
"startK", "startR", "changeK", "changeR", "victory"))
#count the number of times a unique scenario was attemped (should be pretty uniform but
# there are someetimes cases where the fit failed) (for a denominator!)
tot.tests<-count(simulation.results,
c("Nyears", "startPop", "noise", "nbreaksin",
"startK", "startR", "changeK", "changeR"))
#rename the freq column so we don't have naming issues with a merge
colnames(tot.tests)[colnames(tot.tests) == 'freq']<-'total.tests'
summarize.results<-merge(summarize.results, tot.tests)
summarize.results$proportion<-summarize.results$freq/summarize.results$total
#all right, let's get plotting!
library(ggplot2)
#choose a color palette
pal<-c("#ffffb2", "#fecc5c", "#fd8d3c", "#e31a1c")
pal.nozero<-c("#fecc5c", "#fd8d3c", "#e31a1c") #for cases where no zero break scenarios are plotted
pal.noone<-c("#fd8d3c", "#e31a1c")
pal.notwo<-c("#e31a1c")
#we need to subset the data by factor we're varying.
###############
# Noise experiment
#start with successes
noise.experiment.correct<-summarize.results[which(summarize.results$changeK==25 &
summarize.results$changeR==25 &
summarize.results$victory==1 &
summarize.results$Nyears==20),]
noiseplot.correct<-ggplot(noise.experiment.correct, aes(noise, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% noise")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
noiseplot.correct
#next do partial sucesses
# case 2 is extra breaks found
noise.experiment.extra<-summarize.results[which(summarize.results$changeK==40 &
summarize.results$changeR==20 &
summarize.results$victory==2 &
summarize.results$Nyears==25),]
noiseplot.extra<-ggplot(noise.experiment.extra, aes(noise, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.nozero)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% noise")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
noiseplot.extra
#missing breaks in output
noise.experiment.missing<-summarize.results[which(summarize.results$changeK==40 &
summarize.results$changeR==20 &
summarize.results$victory==3 &
summarize.results$Nyears==25),]
noiseplot.missing<-ggplot(noise.experiment.missing, aes(noise, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.notwo)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% noise")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
noiseplot.missing
#mismatched breaks in output
noise.experiment.mismatch<-summarize.results[which(summarize.results$changeK==40 &
summarize.results$changeR==20 &
summarize.results$victory==4 &
summarize.results$Nyears==25),]
noiseplot.mismatch<-ggplot(noise.experiment.mismatch, aes(noise, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.noone)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% noise")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
noiseplot.mismatch
#complete failure to find breaks in output
noise.experiment.fail<-summarize.results[which(summarize.results$changeK==40 &
summarize.results$changeR==20 &
summarize.results$victory==0 &
summarize.results$Nyears==25),]
noiseplot.fail<-ggplot(noise.experiment.fail, aes(noise, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% noise")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
noiseplot.fail
#stack plots together
library(gridExtra)
library(grid)
noiseplot.correct1<-noiseplot.correct+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="A", size=5)
noiseplot.extra1<-noiseplot.extra+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="B", size=5)
noiseplot.missing1<-noiseplot.missing+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="C", size=5)
noiseplot.mismatch1<-noiseplot.mismatch+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="D", size=5)
noiseplot.fail1<-noiseplot.fail+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="E", size=5)
grid.arrange(arrangeGrob(arrangeGrob(noiseplot.correct1, noiseplot.extra1, ncol=2),
arrangeGrob(noiseplot.missing1, noiseplot.mismatch1, ncol=2),
arrangeGrob(noiseplot.fail1, ncol=1, widths=0.6), ncol=1,
left=textGrob("\n Proportion of outcomes", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("% noise",
gp=gpar(fontsize=16, fontface="bold"), vjust=-2)))
pdf("figs/noise_simulation_outcomes.pdf", height=10, width=7)
grid.arrange(arrangeGrob(arrangeGrob(noiseplot.correct1, noiseplot.extra1, ncol=2),
arrangeGrob(noiseplot.missing1, noiseplot.mismatch1, ncol=2),
arrangeGrob(noiseplot.fail1, ncol=1, widths=0.6), ncol=1,
left=textGrob("\n Proportion of outcomes", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("% noise",
gp=gpar(fontsize=16, fontface="bold"), vjust=-2)))
dev.off()
###############
# K experiment
#start with successes
changeK.experiment.correct<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeR==20 &
summarize.results$victory==1 &
summarize.results$Nyears==25),]
changeKplot.correct<-ggplot(changeK.experiment.correct, aes(changeK, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in K")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeKplot.correct
#next do partial sucesses
# case 2 is extra breaks found
changeK.experiment.extra<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeR==20 &
summarize.results$victory==2 &
summarize.results$Nyears==25),]
changeKplot.extra<-ggplot(changeK.experiment.extra, aes(changeK, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.nozero)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in K ")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeKplot.extra
#missing breaks in output
changeK.experiment.missing<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeR==20 &
summarize.results$victory==3 &
summarize.results$Nyears==25),]
changeKplot.missing<-ggplot(changeK.experiment.missing, aes(changeK, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.notwo)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in K ")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeKplot.missing
#mismatched breaks in output
changeK.experiment.mismatch<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeR==20 &
summarize.results$victory==4 &
summarize.results$Nyears==25),]
changeKplot.mismatch<-ggplot(changeK.experiment.mismatch, aes(changeK, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.noone)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in K ")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeKplot.mismatch
#complete failure to find breaks in output
changeK.experiment.fail<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeR==20 &
summarize.results$victory==0 &
summarize.results$Nyears==25),]
changeKplot.fail<-ggplot(changeK.experiment.fail, aes(changeK, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in K ")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeKplot.fail
#stack plots together
library(gridExtra)
library(grid)
changeKplot.correct1<-changeKplot.correct+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="A", size=5)
changeKplot.extra1<-changeKplot.extra+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="B", size=5)
changeKplot.missing1<-changeKplot.missing+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="C", size=5)
changeKplot.mismatch1<-changeKplot.mismatch+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="D", size=5)
changeKplot.fail1<-changeKplot.fail+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="E", size=5)
grid.arrange(arrangeGrob(arrangeGrob(changeKplot.correct1, changeKplot.extra1, ncol=2),
arrangeGrob(changeKplot.missing1, changeKplot.mismatch1, ncol=2),
arrangeGrob(changeKplot.fail1, ncol=1, widths=0.6), ncol=1,
left=textGrob("\n Proportion of outcomes", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("% change in K ",
gp=gpar(fontsize=16, fontface="bold"), vjust=-2)))
pdf("figs/changeK_simulation_outcomes.pdf", height=10, width=7)
grid.arrange(arrangeGrob(arrangeGrob(changeKplot.correct1, changeKplot.extra1, ncol=2),
arrangeGrob(changeKplot.missing1, changeKplot.mismatch1, ncol=2),
arrangeGrob(changeKplot.fail1, ncol=1, widths=0.6), ncol=1,
left=textGrob("\n Proportion of outcomes", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("% change in K ",
gp=gpar(fontsize=16, fontface="bold"), vjust=-2)))
dev.off()
###############
# r experiment
#start with successes
changeR.experiment.correct<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 &
summarize.results$victory==1 &
summarize.results$Nyears==25),]
changeRplot.correct<-ggplot(changeR.experiment.correct, aes(changeR, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in r")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeRplot.correct
#next do partial successes
# case 2 is extra breaks found
changeR.experiment.extra<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 &
summarize.results$victory==2 &
summarize.results$Nyears==25),]
changeRplot.extra<-ggplot(changeR.experiment.extra, aes(changeR, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.nozero)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in r ")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeRplot.extra
#missing breaks in output
changeR.experiment.missing<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 &
summarize.results$victory==3 &
summarize.results$Nyears==25),]
changeRplot.missing<-ggplot(changeR.experiment.missing, aes(changeR, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.notwo)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in r")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeRplot.missing
#mismatched breaks in output
changeR.experiment.mismatch<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 &
summarize.results$victory==4 &
summarize.results$Nyears==25),]
changeRplot.mismatch<-ggplot(changeR.experiment.mismatch, aes(changeR, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.noone)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in r ")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeRplot.mismatch
#complete failure to find breaks in output
changeR.experiment.fail<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 &
summarize.results$victory==0 &
summarize.results$Nyears==25),]
changeRplot.fail<-ggplot(changeR.experiment.fail, aes(changeR, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in r ")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeRplot.fail
#stack plots together
library(gridExtra)
library(grid)
changeRplot.correct1<-changeRplot.correct+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="A", size=5)
changeRplot.extra1<-changeRplot.extra+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="B", size=5)
changeRplot.missing1<-changeRplot.missing+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="C", size=5)
changeRplot.mismatch1<-changeRplot.mismatch+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="D", size=5)
changeRplot.fail1<-changeRplot.fail+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="E", size=5)
grid.arrange(arrangeGrob(arrangeGrob(changeRplot.correct1, changeRplot.extra1, ncol=2),
arrangeGrob(changeRplot.missing1, changeRplot.mismatch1, ncol=2),
arrangeGrob(changeRplot.fail1, ncol=1, widths=0.6), ncol=1,
left=textGrob("\n Proportion of outcomes", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("% change in r ",
gp=gpar(fontsize=16, fontface="bold"), vjust=-2)))
pdf("figs/changeR_simulation_outcomes.pdf", height=10, width=7)
grid.arrange(arrangeGrob(arrangeGrob(changeRplot.correct1, changeRplot.extra1, ncol=2),
arrangeGrob(changeRplot.missing1, changeRplot.mismatch1, ncol=2),
arrangeGrob(changeRplot.fail1, ncol=1, widths=0.6), ncol=1,
left=textGrob("\n Proportion of outcomes", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("% change in r ",
gp=gpar(fontsize=16, fontface="bold"), vjust=-2)))
dev.off()
###############
# Time series length experiment
#start with successes
Nyears.experiment.correct<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 & summarize.results$changeR==20 &
summarize.results$victory==1),]
Nyearsplot.correct<-ggplot(Nyears.experiment.correct, aes(Nyears, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("Series length")+
ylab("proportion of outcomes")+
xlim(24,35)+ylim(-0.2,1.1)
Nyearsplot.correct
#next do partial successes
# case 2 is extra breaks found
Nyears.experiment.extra<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 & summarize.results$changeR==20 &
summarize.results$victory==2),]
Nyearsplot.extra<-ggplot(Nyears.experiment.extra, aes(Nyears, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.nozero)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("Series length ")+
ylab("proportion of outcomes")+
xlim(24,35)+ylim(-0.2,1.1)
Nyearsplot.extra
#missing breaks in output
Nyears.experiment.missing<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 & summarize.results$changeR==20 &
summarize.results$victory==3),]
Nyearsplot.missing<-ggplot(Nyears.experiment.missing, aes(Nyears, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.notwo)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("Series length")+
ylab("proportion of outcomes")+
xlim(24,35)+ylim(-0.2,1.1)
Nyearsplot.missing
#mismatched breaks in output
Nyears.experiment.mismatch<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 & summarize.results$changeR==20 &
summarize.results$victory==4),]
Nyearsplot.mismatch<-ggplot(Nyears.experiment.mismatch, aes(Nyears, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.noone)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("Series length ")+
ylab("proportion of outcomes")+
xlim(24,35)+ylim(-0.2,1.1)
Nyearsplot.mismatch
#complete failure to find breaks in output
Nyears.experiment.fail<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 & summarize.results$changeR==20 &
summarize.results$victory==0),]
Nyearsplot.fail<-ggplot(Nyears.experiment.fail, aes(Nyears, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("Series length ")+
ylab("proportion of outcomes")+
xlim(24,35)+ylim(-0.2,1.1)
Nyearsplot.fail
#stack plots together
library(gridExtra)
library(grid)
Nyearsplot.correct1<-Nyearsplot.correct+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=10)+
annotate("text", x=34.2, y=1.03, label="A", size=5)
Nyearsplot.extra1<-Nyearsplot.extra+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=10)+
annotate("text", x=34.2, y=1.03, label="B", size=5)
Nyearsplot.missing1<-Nyearsplot.missing+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=10)+
annotate("text", x=34.2, y=1.03, label="C", size=5)
Nyearsplot.mismatch1<-Nyearsplot.mismatch+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=10)+
annotate("text", x=34.2, y=1.03, label="D", size=5)
Nyearsplot.fail1<-Nyearsplot.fail+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=10)+
annotate("text", x=34.2, y=1.03, label="E", size=5)
#pull legend out of plot
g_legend <- function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)}
leg<-g_legend(Nyearsplot.correct)
grid.arrange(arrangeGrob(arrangeGrob(Nyearsplot.correct1, Nyearsplot.extra1, ncol=2),
arrangeGrob(Nyearsplot.missing1, Nyearsplot.mismatch1, ncol=2),
arrangeGrob(Nyearsplot.fail1, leg, ncol=2), ncol=1,
left=textGrob("\n Proportion of outcomes", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("Series length ",
gp=gpar(fontsize=16, fontface="bold"), vjust=-2)))
pdf("figs/Nyears_simulation_outcomes.pdf", height=10, width=7)
grid.arrange(arrangeGrob(arrangeGrob(Nyearsplot.correct1, Nyearsplot.extra1, ncol=2),
arrangeGrob(Nyearsplot.missing1, Nyearsplot.mismatch1, ncol=2),
arrangeGrob(Nyearsplot.fail1, leg, ncol=2), ncol=1,
left=textGrob("\n Proportion of outcomes", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("Series length ",
gp=gpar(fontsize=16, fontface="bold"), vjust=-2)))
dev.off()
###############
# Outcome by incoming data experiment
#now we're looking at the data in the oposite way- so, we have an outcome, what is the
# probability of error of that outcome
summarize.results.breaksout<-count(simulation.results,
c("Nyears", "noise", "nbreaksin","nbreaksout",
"changeK", "changeR"))
#cut out scenarios changing k and R abecause this wouldn't be 'known' from this side
breaksout.results<-summarize.results.breaksout[which(summarize.results.breaksout$Nyears==25 &
summarize.results.breaksout$changeK==40 &
summarize.results.breaksout$changeR==20 &
summarize.results.breaksout$nbreaksout<4),]
#compute frequency of results of that type
breaksout.tot.tests<-ddply(breaksout.results,
c("Nyears", "noise", "nbreaksout",
"changeK", "changeR"), summarise,
tot.tests=sum(freq))
#merge in (for denominator)
summarize.results.breaksout.1<-merge(breaksout.results, breaksout.tot.tests)
summarize.results.breaksout.1$proportion<-summarize.results.breaksout.1$freq/summarize.results.breaksout.1$tot.tests
# Scenario- 0 breaks observed
breaksout.results.0<-summarize.results.breaksout.1[which(summarize.results.breaksout.1$nbreaksout==0),]
#plot by outcome
pal<-c("#ffffb2", "#fecc5c", "#fd8d3c", "#e31a1c")
noiseplot.breaksout.0<-ggplot(breaksout.results.0, aes(noise, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Actual number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% noise")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
noiseplot.breaksout.0
# Scenario- 1 breaks observed
breaksout.results.1<-summarize.results.breaksout.1[which(summarize.results.breaksout.1$nbreaksout==1),]
#plot by outcome
pal<-c("#ffffb2", "#fecc5c", "#fd8d3c", "#e31a1c")
noiseplot.breaksout.1<-ggplot(breaksout.results.1, aes(noise, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Actual number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% noise")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
noiseplot.breaksout.1
# Scenario- 2 breaks observed
breaksout.results.2<-summarize.results.breaksout.1[which(summarize.results.breaksout.1$nbreaksout==2),]
#plot by outcome
pal<-c("#ffffb2", "#fecc5c", "#fd8d3c", "#e31a1c")
noiseplot.breaksout.2<-ggplot(breaksout.results.2, aes(noise, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Actual number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% noise")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
noiseplot.breaksout.2
# Scenario- 3 breaks observed
breaksout.results.3<-summarize.results.breaksout.1[which(summarize.results.breaksout.1$nbreaksout==3),]
#plot by outcome
pal<-c("#ffffb2", "#fecc5c", "#fd8d3c", "#e31a1c")
noiseplot.breaksout.3<-ggplot(breaksout.results.3, aes(noise, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Actual number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% noise")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
noiseplot.breaksout.3
#stack plots together
library(gridExtra)
library(grid)
noiseplot.breaksout.01<-noiseplot.breaksout.0+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="A", size=5)
noiseplot.breaksout.11<-noiseplot.breaksout.1+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="B", size=5)
noiseplot.breaksout.21<-noiseplot.breaksout.2+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="C", size=5)
noiseplot.breaksout.31<-noiseplot.breaksout.3+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="D", size=5)
#pull legend out of plot
g_legend <- function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)}
leg<-g_legend(noiseplot.breaksout.3)
#create a blank grob to hold space where the legend would go next to D
blank <- grid.rect(gp=gpar(col="white"))
grid.arrange(arrangeGrob(arrangeGrob(noiseplot.breaksout.01, noiseplot.breaksout.11, leg, ncol=3, widths=c(30,30,40)),
arrangeGrob(noiseplot.breaksout.21, noiseplot.breaksout.31, blank, ncol=3, widths=c(30,30,40)),
ncol=1,
left=textGrob("\n Proportion of input scenarios", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("% noise",
gp=gpar(fontsize=16, fontface="bold"), vjust=-2)))
pdf("figs/Figure2.pdf", height=7, width=7)
grid.arrange(arrangeGrob(arrangeGrob(noiseplot.breaksout.01, noiseplot.breaksout.11, leg, ncol=3, widths=c(35,35,30)),
arrangeGrob(noiseplot.breaksout.21, noiseplot.breaksout.31, blank, ncol=3, widths=c(35,35,30)),
ncol=1,
left=textGrob("\n Proportion of input scenarios", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("% noise ",
gp=gpar(fontsize=16), vjust=-6)))
dev.off()
| /plot_simulation_results.R | no_license | 3shmawei/monarch_regime | R | false | false | 34,677 | r |
#read in all the sim data files- this was done in pieces to keep it computationally doable
#get a list of file names:
setwd("simresults/")
file_list<-list.files()
#loop through the files, merge 'em together
simulation.results <- do.call("rbind",lapply(file_list,
FUN=function(files){read.csv(files, header=TRUE)}))
setwd("..")
#Encoding results
#All scripted breaks found =1
#extra breaks found = 2
#missing breaks (when more than one break in sim data) =3
#right number of breaks but not all match =4
# total failure to find correct breaks =0
#because our function was throwing an error about results as factors, we encoded
#them numerically as above. But before we do any operations on it we should
#at least convert the outcome integers to factors so we don't accidentally
#do any numeric operations on them.
#note the other columns of the data frame are also basically ordinal/catagorical, but
#we want to retain their order for plotting purposes, so we'll just proceed
#with caution there
simulation.results$victory<-as.factor(simulation.results$victory)
#now we need to take th data produced and summarize it for plotting
library(plyr)
#count number of times a unique observation was recorded
summarize.results<-count(simulation.results,
c("Nyears", "startPop", "noise", "nbreaksin",
"startK", "startR", "changeK", "changeR", "victory"))
#count the number of times a unique scenario was attemped (should be pretty uniform but
# there are someetimes cases where the fit failed) (for a denominator!)
tot.tests<-count(simulation.results,
c("Nyears", "startPop", "noise", "nbreaksin",
"startK", "startR", "changeK", "changeR"))
#rename the freq column so we don't have naming issues with a merge
colnames(tot.tests)[colnames(tot.tests) == 'freq']<-'total.tests'
summarize.results<-merge(summarize.results, tot.tests)
summarize.results$proportion<-summarize.results$freq/summarize.results$total
#all right, let's get plotting!
library(ggplot2)
#choose a color palette
pal<-c("#ffffb2", "#fecc5c", "#fd8d3c", "#e31a1c")
pal.nozero<-c("#fecc5c", "#fd8d3c", "#e31a1c") #for cases where no zero break scenarios are plotted
pal.noone<-c("#fd8d3c", "#e31a1c")
pal.notwo<-c("#e31a1c")
#we need to subset the data by factor we're varying.
###############
# Noise experiment
#start with successes
noise.experiment.correct<-summarize.results[which(summarize.results$changeK==25 &
summarize.results$changeR==25 &
summarize.results$victory==1 &
summarize.results$Nyears==20),]
noiseplot.correct<-ggplot(noise.experiment.correct, aes(noise, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% noise")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
noiseplot.correct
#next do partial sucesses
# case 2 is extra breaks found
noise.experiment.extra<-summarize.results[which(summarize.results$changeK==40 &
summarize.results$changeR==20 &
summarize.results$victory==2 &
summarize.results$Nyears==25),]
noiseplot.extra<-ggplot(noise.experiment.extra, aes(noise, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.nozero)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% noise")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
noiseplot.extra
#missing breaks in output
noise.experiment.missing<-summarize.results[which(summarize.results$changeK==40 &
summarize.results$changeR==20 &
summarize.results$victory==3 &
summarize.results$Nyears==25),]
noiseplot.missing<-ggplot(noise.experiment.missing, aes(noise, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.notwo)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% noise")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
noiseplot.missing
#mismatched breaks in output
noise.experiment.mismatch<-summarize.results[which(summarize.results$changeK==40 &
summarize.results$changeR==20 &
summarize.results$victory==4 &
summarize.results$Nyears==25),]
noiseplot.mismatch<-ggplot(noise.experiment.mismatch, aes(noise, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.noone)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% noise")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
noiseplot.mismatch
#complete failure to find breaks in output
noise.experiment.fail<-summarize.results[which(summarize.results$changeK==40 &
summarize.results$changeR==20 &
summarize.results$victory==0 &
summarize.results$Nyears==25),]
noiseplot.fail<-ggplot(noise.experiment.fail, aes(noise, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% noise")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
noiseplot.fail
#stack plots together
library(gridExtra)
library(grid)
noiseplot.correct1<-noiseplot.correct+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="A", size=5)
noiseplot.extra1<-noiseplot.extra+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="B", size=5)
noiseplot.missing1<-noiseplot.missing+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="C", size=5)
noiseplot.mismatch1<-noiseplot.mismatch+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="D", size=5)
noiseplot.fail1<-noiseplot.fail+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="E", size=5)
grid.arrange(arrangeGrob(arrangeGrob(noiseplot.correct1, noiseplot.extra1, ncol=2),
arrangeGrob(noiseplot.missing1, noiseplot.mismatch1, ncol=2),
arrangeGrob(noiseplot.fail1, ncol=1, widths=0.6), ncol=1,
left=textGrob("\n Proportion of outcomes", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("% noise",
gp=gpar(fontsize=16, fontface="bold"), vjust=-2)))
pdf("figs/noise_simulation_outcomes.pdf", height=10, width=7)
grid.arrange(arrangeGrob(arrangeGrob(noiseplot.correct1, noiseplot.extra1, ncol=2),
arrangeGrob(noiseplot.missing1, noiseplot.mismatch1, ncol=2),
arrangeGrob(noiseplot.fail1, ncol=1, widths=0.6), ncol=1,
left=textGrob("\n Proportion of outcomes", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("% noise",
gp=gpar(fontsize=16, fontface="bold"), vjust=-2)))
dev.off()
###############
# K experiment
#start with successes
changeK.experiment.correct<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeR==20 &
summarize.results$victory==1 &
summarize.results$Nyears==25),]
changeKplot.correct<-ggplot(changeK.experiment.correct, aes(changeK, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in K")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeKplot.correct
#next do partial sucesses
# case 2 is extra breaks found
changeK.experiment.extra<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeR==20 &
summarize.results$victory==2 &
summarize.results$Nyears==25),]
changeKplot.extra<-ggplot(changeK.experiment.extra, aes(changeK, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.nozero)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in K ")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeKplot.extra
#missing breaks in output
changeK.experiment.missing<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeR==20 &
summarize.results$victory==3 &
summarize.results$Nyears==25),]
changeKplot.missing<-ggplot(changeK.experiment.missing, aes(changeK, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.notwo)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in K ")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeKplot.missing
#mismatched breaks in output
changeK.experiment.mismatch<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeR==20 &
summarize.results$victory==4 &
summarize.results$Nyears==25),]
changeKplot.mismatch<-ggplot(changeK.experiment.mismatch, aes(changeK, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.noone)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in K ")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeKplot.mismatch
#complete failure to find breaks in output
changeK.experiment.fail<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeR==20 &
summarize.results$victory==0 &
summarize.results$Nyears==25),]
changeKplot.fail<-ggplot(changeK.experiment.fail, aes(changeK, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in K ")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeKplot.fail
#stack plots together
library(gridExtra)
library(grid)
changeKplot.correct1<-changeKplot.correct+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="A", size=5)
changeKplot.extra1<-changeKplot.extra+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="B", size=5)
changeKplot.missing1<-changeKplot.missing+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="C", size=5)
changeKplot.mismatch1<-changeKplot.mismatch+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="D", size=5)
changeKplot.fail1<-changeKplot.fail+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="E", size=5)
grid.arrange(arrangeGrob(arrangeGrob(changeKplot.correct1, changeKplot.extra1, ncol=2),
arrangeGrob(changeKplot.missing1, changeKplot.mismatch1, ncol=2),
arrangeGrob(changeKplot.fail1, ncol=1, widths=0.6), ncol=1,
left=textGrob("\n Proportion of outcomes", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("% change in K ",
gp=gpar(fontsize=16, fontface="bold"), vjust=-2)))
pdf("figs/changeK_simulation_outcomes.pdf", height=10, width=7)
grid.arrange(arrangeGrob(arrangeGrob(changeKplot.correct1, changeKplot.extra1, ncol=2),
arrangeGrob(changeKplot.missing1, changeKplot.mismatch1, ncol=2),
arrangeGrob(changeKplot.fail1, ncol=1, widths=0.6), ncol=1,
left=textGrob("\n Proportion of outcomes", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("% change in K ",
gp=gpar(fontsize=16, fontface="bold"), vjust=-2)))
dev.off()
###############
# r experiment
#start with successes
changeR.experiment.correct<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 &
summarize.results$victory==1 &
summarize.results$Nyears==25),]
changeRplot.correct<-ggplot(changeR.experiment.correct, aes(changeR, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in r")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeRplot.correct
#next do partial successes
# case 2 is extra breaks found
changeR.experiment.extra<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 &
summarize.results$victory==2 &
summarize.results$Nyears==25),]
changeRplot.extra<-ggplot(changeR.experiment.extra, aes(changeR, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.nozero)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in r ")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeRplot.extra
#missing breaks in output
changeR.experiment.missing<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 &
summarize.results$victory==3 &
summarize.results$Nyears==25),]
changeRplot.missing<-ggplot(changeR.experiment.missing, aes(changeR, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.notwo)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in r")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeRplot.missing
#mismatched breaks in output
changeR.experiment.mismatch<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 &
summarize.results$victory==4 &
summarize.results$Nyears==25),]
changeRplot.mismatch<-ggplot(changeR.experiment.mismatch, aes(changeR, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.noone)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in r ")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeRplot.mismatch
#complete failure to find breaks in output
changeR.experiment.fail<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 &
summarize.results$victory==0 &
summarize.results$Nyears==25),]
changeRplot.fail<-ggplot(changeR.experiment.fail, aes(changeR, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% change in r ")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
changeRplot.fail
#stack plots together
library(gridExtra)
library(grid)
changeRplot.correct1<-changeRplot.correct+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="A", size=5)
changeRplot.extra1<-changeRplot.extra+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="B", size=5)
changeRplot.missing1<-changeRplot.missing+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="C", size=5)
changeRplot.mismatch1<-changeRplot.mismatch+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="D", size=5)
changeRplot.fail1<-changeRplot.fail+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="E", size=5)
grid.arrange(arrangeGrob(arrangeGrob(changeRplot.correct1, changeRplot.extra1, ncol=2),
arrangeGrob(changeRplot.missing1, changeRplot.mismatch1, ncol=2),
arrangeGrob(changeRplot.fail1, ncol=1, widths=0.6), ncol=1,
left=textGrob("\n Proportion of outcomes", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("% change in r ",
gp=gpar(fontsize=16, fontface="bold"), vjust=-2)))
pdf("figs/changeR_simulation_outcomes.pdf", height=10, width=7)
grid.arrange(arrangeGrob(arrangeGrob(changeRplot.correct1, changeRplot.extra1, ncol=2),
arrangeGrob(changeRplot.missing1, changeRplot.mismatch1, ncol=2),
arrangeGrob(changeRplot.fail1, ncol=1, widths=0.6), ncol=1,
left=textGrob("\n Proportion of outcomes", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("% change in r ",
gp=gpar(fontsize=16, fontface="bold"), vjust=-2)))
dev.off()
###############
# Time series length experiment
#start with successes
Nyears.experiment.correct<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 & summarize.results$changeR==20 &
summarize.results$victory==1),]
Nyearsplot.correct<-ggplot(Nyears.experiment.correct, aes(Nyears, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("Series length")+
ylab("proportion of outcomes")+
xlim(24,35)+ylim(-0.2,1.1)
Nyearsplot.correct
#next do partial successes
# case 2 is extra breaks found
Nyears.experiment.extra<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 & summarize.results$changeR==20 &
summarize.results$victory==2),]
Nyearsplot.extra<-ggplot(Nyears.experiment.extra, aes(Nyears, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.nozero)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("Series length ")+
ylab("proportion of outcomes")+
xlim(24,35)+ylim(-0.2,1.1)
Nyearsplot.extra
#missing breaks in output
Nyears.experiment.missing<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 & summarize.results$changeR==20 &
summarize.results$victory==3),]
Nyearsplot.missing<-ggplot(Nyears.experiment.missing, aes(Nyears, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.notwo)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("Series length")+
ylab("proportion of outcomes")+
xlim(24,35)+ylim(-0.2,1.1)
Nyearsplot.missing
#mismatched breaks in output
Nyears.experiment.mismatch<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 & summarize.results$changeR==20 &
summarize.results$victory==4),]
Nyearsplot.mismatch<-ggplot(Nyears.experiment.mismatch, aes(Nyears, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal.noone)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("Series length ")+
ylab("proportion of outcomes")+
xlim(24,35)+ylim(-0.2,1.1)
Nyearsplot.mismatch
#complete failure to find breaks in output
Nyears.experiment.fail<-summarize.results[which(summarize.results$noise==5 &
summarize.results$changeK==40 & summarize.results$changeR==20 &
summarize.results$victory==0),]
Nyearsplot.fail<-ggplot(Nyears.experiment.fail, aes(Nyears, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("Series length ")+
ylab("proportion of outcomes")+
xlim(24,35)+ylim(-0.2,1.1)
Nyearsplot.fail
#stack plots together
library(gridExtra)
library(grid)
Nyearsplot.correct1<-Nyearsplot.correct+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=10)+
annotate("text", x=34.2, y=1.03, label="A", size=5)
Nyearsplot.extra1<-Nyearsplot.extra+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=10)+
annotate("text", x=34.2, y=1.03, label="B", size=5)
Nyearsplot.missing1<-Nyearsplot.missing+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=10)+
annotate("text", x=34.2, y=1.03, label="C", size=5)
Nyearsplot.mismatch1<-Nyearsplot.mismatch+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=10)+
annotate("text", x=34.2, y=1.03, label="D", size=5)
Nyearsplot.fail1<-Nyearsplot.fail+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=10)+
annotate("text", x=34.2, y=1.03, label="E", size=5)
#pull legend out of plot
g_legend <- function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)}
leg<-g_legend(Nyearsplot.correct)
grid.arrange(arrangeGrob(arrangeGrob(Nyearsplot.correct1, Nyearsplot.extra1, ncol=2),
arrangeGrob(Nyearsplot.missing1, Nyearsplot.mismatch1, ncol=2),
arrangeGrob(Nyearsplot.fail1, leg, ncol=2), ncol=1,
left=textGrob("\n Proportion of outcomes", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("Series length ",
gp=gpar(fontsize=16, fontface="bold"), vjust=-2)))
pdf("figs/Nyears_simulation_outcomes.pdf", height=10, width=7)
grid.arrange(arrangeGrob(arrangeGrob(Nyearsplot.correct1, Nyearsplot.extra1, ncol=2),
arrangeGrob(Nyearsplot.missing1, Nyearsplot.mismatch1, ncol=2),
arrangeGrob(Nyearsplot.fail1, leg, ncol=2), ncol=1,
left=textGrob("\n Proportion of outcomes", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("Series length ",
gp=gpar(fontsize=16, fontface="bold"), vjust=-2)))
dev.off()
###############
# Outcome by incoming data experiment
#now we're looking at the data in the oposite way- so, we have an outcome, what is the
# probability of error of that outcome
summarize.results.breaksout<-count(simulation.results,
c("Nyears", "noise", "nbreaksin","nbreaksout",
"changeK", "changeR"))
#cut out scenarios changing k and R abecause this wouldn't be 'known' from this side
breaksout.results<-summarize.results.breaksout[which(summarize.results.breaksout$Nyears==25 &
summarize.results.breaksout$changeK==40 &
summarize.results.breaksout$changeR==20 &
summarize.results.breaksout$nbreaksout<4),]
#compute frequency of results of that type
breaksout.tot.tests<-ddply(breaksout.results,
c("Nyears", "noise", "nbreaksout",
"changeK", "changeR"), summarise,
tot.tests=sum(freq))
#merge in (for denominator)
summarize.results.breaksout.1<-merge(breaksout.results, breaksout.tot.tests)
summarize.results.breaksout.1$proportion<-summarize.results.breaksout.1$freq/summarize.results.breaksout.1$tot.tests
# Scenario- 0 breaks observed
breaksout.results.0<-summarize.results.breaksout.1[which(summarize.results.breaksout.1$nbreaksout==0),]
#plot by outcome
pal<-c("#ffffb2", "#fecc5c", "#fd8d3c", "#e31a1c")
noiseplot.breaksout.0<-ggplot(breaksout.results.0, aes(noise, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Actual number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% noise")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
noiseplot.breaksout.0
# Scenario- 1 breaks observed
breaksout.results.1<-summarize.results.breaksout.1[which(summarize.results.breaksout.1$nbreaksout==1),]
#plot by outcome
pal<-c("#ffffb2", "#fecc5c", "#fd8d3c", "#e31a1c")
noiseplot.breaksout.1<-ggplot(breaksout.results.1, aes(noise, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Actual number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% noise")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
noiseplot.breaksout.1
# Scenario- 2 breaks observed
breaksout.results.2<-summarize.results.breaksout.1[which(summarize.results.breaksout.1$nbreaksout==2),]
#plot by outcome
pal<-c("#ffffb2", "#fecc5c", "#fd8d3c", "#e31a1c")
noiseplot.breaksout.2<-ggplot(breaksout.results.2, aes(noise, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Actual number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% noise")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
noiseplot.breaksout.2
# Scenario- 3 breaks observed
breaksout.results.3<-summarize.results.breaksout.1[which(summarize.results.breaksout.1$nbreaksout==3),]
#plot by outcome
pal<-c("#ffffb2", "#fecc5c", "#fd8d3c", "#e31a1c")
noiseplot.breaksout.3<-ggplot(breaksout.results.3, aes(noise, proportion, fill=as.factor(nbreaksin)))+
scale_fill_manual(values=pal)+
geom_smooth(method="gam", se=TRUE, formula=y ~ poly(x, 3), span=0.1)+
geom_point(colour="black", pch=21, size=3)+
theme_bw(base_size = 12)+
guides(fill=guide_legend(title="Actual number\nof breaks"))+
theme(legend.key=element_blank())+
xlab("% noise")+
ylab("proportion of outcomes")+
xlim(0,90)+ylim(-0.2,1.1)
noiseplot.breaksout.3
#stack plots together
library(gridExtra)
library(grid)
noiseplot.breaksout.01<-noiseplot.breaksout.0+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="A", size=5)
noiseplot.breaksout.11<-noiseplot.breaksout.1+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="B", size=5)
noiseplot.breaksout.21<-noiseplot.breaksout.2+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="C", size=5)
noiseplot.breaksout.31<-noiseplot.breaksout.3+
guides(fill=FALSE)+
ylab(NULL)+
xlab(NULL)+
coord_fixed(ratio=80)+
annotate("text", x=85, y=1.03, label="D", size=5)
#pull legend out of plot
g_legend <- function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)}
leg<-g_legend(noiseplot.breaksout.3)
#create a blank grob to hold space where the legend would go next to D
blank <- grid.rect(gp=gpar(col="white"))
grid.arrange(arrangeGrob(arrangeGrob(noiseplot.breaksout.01, noiseplot.breaksout.11, leg, ncol=3, widths=c(30,30,40)),
arrangeGrob(noiseplot.breaksout.21, noiseplot.breaksout.31, blank, ncol=3, widths=c(30,30,40)),
ncol=1,
left=textGrob("\n Proportion of input scenarios", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("% noise",
gp=gpar(fontsize=16, fontface="bold"), vjust=-2)))
pdf("figs/Figure2.pdf", height=7, width=7)
grid.arrange(arrangeGrob(arrangeGrob(noiseplot.breaksout.01, noiseplot.breaksout.11, leg, ncol=3, widths=c(35,35,30)),
arrangeGrob(noiseplot.breaksout.21, noiseplot.breaksout.31, blank, ncol=3, widths=c(35,35,30)),
ncol=1,
left=textGrob("\n Proportion of input scenarios", rot=90,
gp=gpar(fontsize=16, fontface="bold")),
sub=textGrob("% noise ",
gp=gpar(fontsize=16), vjust=-6)))
dev.off()
|
##############################################################################################
# libraries
##############################################################################################
library(base)
library(stats)
library(caret)
library(pROC)
library(data.table)
library(splitTools)
library(ggthemes)
library(ggpubr)
library(DescTools)
library(dplyr)
library(caret)
library(purrr)
library(mice)
library(modeest)
| /0 libraries.R | no_license | user0101-dot/Simulation-code | R | false | false | 431 | r | ##############################################################################################
# libraries
##############################################################################################
library(base)
library(stats)
library(caret)
library(pROC)
library(data.table)
library(splitTools)
library(ggthemes)
library(ggpubr)
library(DescTools)
library(dplyr)
library(caret)
library(purrr)
library(mice)
library(modeest)
|
#' @title
#'
#' Bayesian multivariate analysis of parametric AFT model with minimum deviance (DIC) among
#' weibull, log normal and log logistic distribution.
#'
#' @description Provides better estimates (which has minimum deviance(DIC) ) for survival data among weibull,
#' log normal and log logistic distribution of parametric AFT model using MCMC for
#' multivariable (maximum 5 at a time) in high dimensional data.
#'
#' @details
#' This function deals covariates (in data) with missing values. Missing value in any column (covariate) is replaced by mean of that particular covariate.
#' AFT model is log-linear regression model for survival time \eqn{ T_{1}},\eqn{ T_{2}},..,\eqn{T_{n}}.
#' i.e., \deqn{log(T_i)= x_i'\beta +\sigma\epsilon_i ;~\epsilon_i \sim F_\epsilon (.)~which~is~iid }
#' i.e., \deqn{T_i \sim AFT(F_\epsilon ,\beta,\tau|x_i)}
#' Where \eqn{ F_\epsilon } is known cdf which is defined on real line.
#' Here, when baseline distribution is extreme value then T follows weibull distribution.
#' To make interpretation of regression coefficients simpler, using extreme value distribution with median 0.
#' So using weibull distribution that leads to AFT model when
#' \deqn{ T \sim Weib(\sqrt{\tau},log(2)\exp(-x'\beta \sqrt{\tau})) }
#'
#' When baseline distribution is normal then T follows log normal distribution.
#' \deqn{ T \sim LN(x'\beta,1/\tau) }
#' When baseline distribution is logistic then T follows log logistic distribution.
#' \deqn{ T \sim Log-Logis(x'\beta,\sqrt{\tau)} }
#'
#' @param m Starting column number of covariates of study from high dimensional entered data.
#' @param n Ending column number of covariates of study from high dimensional entered data.
#' @param STime name of survival time in data.
#' @param Event name of event in data. 0 is for censored and 1 for occurrence of event.
#' @param nc number of MCMC chain.
#' @param ni number of MCMC iteration to update the outcome.
#' @param data High dimensional gene expression data that contains event status, survival time and and set of covariates.
#' @return Data frame is containing posterior estimates mean, sd, credible intervals, n.eff and Rhat for beta's, sigma, tau and deviance of the model for the selected covariates. beta's of regression coefficient of the model. beta[1] is for intercept and others are for covariates (which is/are chosen order as columns in data). 'sigma' is the scale parameter of the distribution. DIC is the estimate of expected predictive error (so lower deviance denotes better estimation).
#' @import R2jags
#'
#' @references Prabhash et al(2016) <doi:10.21307/stattrans-2016-046>
#'
#' @examples
#' ##
#' data(hdata)
#' aftbybmv(10,12,STime="os",Event="death",2,100,hdata)
#' ##
#' @export
#' @author Atanu Bhattacharjee, Gajendra Kumar Vishwakarma and Pragya Kumari
#' @seealso wbysmv, lgnbymv, lgstbymv
#'
aftbybmv=function(m,n,STime,Event,nc,ni,data){
nr<-nrow(data)
if(STime!="os"){
names(data)[names(data) == STime] <- "os"
}
if(Event!="death"){
names(data)[names(data) == Event] <- "death"
}
d11 <- subset(data, select = c(m:n))
le<-length(d11)
for(i in 1:nr) {
for(j in 1:le) {
d11[i,j] = ifelse(is.na(d11[i,j])=="TRUE", mean(d11[,j], na.rm=TRUE),
d11[i,j])
}
}
pnt<-NULL
for(j in 1:le)
{
if(sum(d11[,j])==0) {
pnt<-c(pnt,j)
}
}
if(is.null(pnt)==F){
d11 <- d11[,-pnt]
}
len<-length(d11)
d12<-data.frame(data[,c('death','os')],d11)
nnr<-nrow(d12)
mx<-max(d12$os) + 100
surt<-ifelse(d12$death == 1, d12$os, NA)
stcen<-ifelse(d12$death == 0, d12$os, mx)
stcen1<-log(stcen)
ls<-log(surt)
d12$os<-surt
cen<-as.numeric(is.na(surt))
d12<-data.frame(d12,stcen,cen,ls,stcen1)
if(len>5){
cat("Outcome for first 5 covariates : ")
vv<-subset(d11,select = c(1:5))
} else {
vv<-d11
}
vname<-colnames(vv)
if(len==1){
data1<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1], N = nr)
modelj1<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
os[i] ~ dweib(alpha,lambda[i])
cen[i] ~ dinterval(os[i],stcen[i])
lambda[i] <- log(2)*exp(-mu[i]*sqrt(tau))
mu[i] <- beta[1] + beta[2]*sV1[i]
}
alpha <- sqrt(tau)
for(i in 1:2){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits1 <- function() {
list(beta=c(0,0), tau=1)
}
jagsft1 <- jags(model.file=modelj1, data=data1, inits = inits1,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f1=data.frame(jagsft1$BUGSoutput$summary)
data2<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1], N = nr)
modelj2<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
os[i] ~ dlnorm(mu[i], tau)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i]
}
for(i in 1:2){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits2 <- function() {
list(beta=c(0,0), tau=1)
}
jagsft2 <- jags(model.file=modelj2, data=data2, inits = inits2,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f2=data.frame(jagsft2$BUGSoutput$summary)
data3<-list(os=d12$ls, stcen=d12$stcen1, cen=d12$cen, v1=vv[,1], N = nr)
modelj3<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
os[i] ~ dlogis(mu[i], taustar)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i]
}
taustar <- sqrt(tau)
for(i in 1:2){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
junk1 <- exp(os[1])
}
inits3 <- function() {
list(beta=c(0,0), tau=1)
}
jagsft3 <- jags(model.file=modelj3, data=data3, inits = inits3,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc,
n.iter = ni)
f3=data.frame(jagsft3$BUGSoutput$summary)
} else if(len==2){
data1<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1], v2=vv[,2], N = nr)
modelj1<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
os[i] ~ dweib(alpha,lambda[i])
cen[i] ~ dinterval(os[i],stcen[i])
lambda[i] <- log(2)*exp(-mu[i]*sqrt(tau))
mu[i] <- beta[1] + beta[2]*sV1[i]+beta[3]*sV2[i]
}
alpha <- sqrt(tau)
for(i in 1:3){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits1 <- function() {
list(beta=c(0,0,0), tau=1)
}
jagsft1 <- jags(model.file=modelj1, data=data1, inits = inits1,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f1=data.frame(jagsft1$BUGSoutput$summary)
data2<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1],v2=vv[,2], N = nr)
modelj2<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
os[i] ~ dlnorm(mu[i], tau)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i] + beta[3]*sV2[i]
}
for(i in 1:3){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits2 <- function() {
list(beta=c(0,0,0), tau=1)
}
jagsft2 <- jags(model.file=modelj2, data=data2, inits = inits2,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f2=data.frame(jagsft2$BUGSoutput$summary)
data3<-list(os=d12$ls, stcen=d12$stcen1, cen=d12$cen, v1=vv[,1],v2=vv[,2], N = nr)
modelj3<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
os[i] ~ dlogis(mu[i], taustar)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i] +beta[3]*sV2[i]
}
taustar <- sqrt(tau)
for(i in 1:3){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
junk1 <- exp(os[1])
}
inits3 <- function() {
list(beta=c(0,0,0), tau=1)
}
jagsft3 <- jags(model.file=modelj3, data=data3, inits = inits3,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc,
n.iter = ni)
f3=data.frame(jagsft3$BUGSoutput$summary)
} else if(len==3){
data1<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1], v2=vv[,2],v3=vv[,3], N = nr)
modelj1<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
sV3[i] <- (v3[i]-mean(v3[]))/sd(v3[])
os[i] ~ dweib(alpha,lambda[i])
cen[i] ~ dinterval(os[i],stcen[i])
lambda[i] <- log(2)*exp(-mu[i]*sqrt(tau))
mu[i] <- beta[1] + beta[2]*sV1[i]+beta[3]*sV2[i] + beta[4]*sV3[i]
}
alpha <- sqrt(tau)
for(i in 1:4){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits1 <- function() {
list(beta=c(0,0,0,0), tau=1)
}
jagsft1 <- jags(model.file=modelj1, data=data1, inits = inits1,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f1=data.frame(jagsft1$BUGSoutput$summary)
data2<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1],v2=vv[,2], v3=vv[,3], N = nr)
modelj2<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
sV3[i] <- (v3[i]-mean(v3[]))/sd(v3[])
os[i] ~ dlnorm(mu[i], tau)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i] + beta[3]*sV2[i] + beta[4]*sV3[i]
}
for(i in 1:4){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits2 <- function() {
list(beta=c(0,0,0,0), tau=1)
}
jagsft2 <- jags(model.file=modelj2, data=data2, inits = inits2,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f2=data.frame(jagsft2$BUGSoutput$summary)
data3<-list(os=d12$ls, stcen=d12$stcen1, cen=d12$cen, v1=vv[,1],v2=vv[,2], v3=vv[,3], N = nr)
modelj3<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
sV3[i] <- (v3[i]-mean(v3[]))/sd(v3[])
os[i] ~ dlogis(mu[i], taustar)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i] +beta[3]*sV2[i] + beta[4]*sV3[i]
}
taustar <- sqrt(tau)
for(i in 1:4){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
junk1 <- exp(os[1])
}
inits3 <- function() {
list(beta=c(0,0,0,0), tau=1)
}
jagsft3 <- jags(model.file=modelj3, data=data3, inits = inits3,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc,
n.iter = ni)
f3=data.frame(jagsft3$BUGSoutput$summary)
} else if(len==4){
data1<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1], v2=vv[,2],v3=vv[,3],v4=vv[,4], N = nr)
modelj1<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
sV3[i] <- (v3[i]-mean(v3[]))/sd(v3[])
sV4[i] <- (v4[i]-mean(v4[]))/sd(v4[])
os[i] ~ dweib(alpha,lambda[i])
cen[i] ~ dinterval(os[i],stcen[i])
lambda[i] <- log(2)*exp(-mu[i]*sqrt(tau))
mu[i] <- beta[1] + beta[2]*sV1[i]+beta[3]*sV2[i] + beta[4]*sV3[i] + beta[5]*sV4[i]
}
alpha <- sqrt(tau)
for(i in 1:5){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits1 <- function() {
list(beta=c(0,0,0,0,0), tau=1)
}
jagsft1 <- jags(model.file=modelj1, data=data1, inits = inits1,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f1=data.frame(jagsft1$BUGSoutput$summary)
data2<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1],v2=vv[,2], v3=vv[,3],v4=vv[,4], N = nr)
modelj2<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
sV3[i] <- (v3[i]-mean(v3[]))/sd(v3[])
sV4[i] <- (v4[i]-mean(v4[]))/sd(v4[])
os[i] ~ dlnorm(mu[i], tau)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i] + beta[3]*sV2[i] + beta[4]*sV3[i] + beta[5]*sV4[i]
}
for(i in 1:5){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits2 <- function() {
list(beta=c(0,0,0,0,0), tau=1)
}
jagsft2 <- jags(model.file=modelj2, data=data2, inits = inits2,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f2=data.frame(jagsft2$BUGSoutput$summary)
data3<-list(os=d12$ls, stcen=d12$stcen1, cen=d12$cen, v1=vv[,1],v2=vv[,2], v3=vv[,3],v4=vv[,4], N = nr)
modelj3<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
sV3[i] <- (v3[i]-mean(v3[]))/sd(v3[])
sV4[i] <- (v4[i]-mean(v4[]))/sd(v4[])
os[i] ~ dlogis(mu[i], taustar)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i] +beta[3]*sV2[i] + beta[4]*sV3[i] + beta[5]*sV4[i]
}
taustar <- sqrt(tau)
for(i in 1:5){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
junk1 <- exp(os[1])
}
inits3 <- function() {
list(beta=c(0,0,0,0,0), tau=1)
}
jagsft3 <- jags(model.file=modelj3, data=data3, inits = inits3,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc,
n.iter = ni)
f3=data.frame(jagsft3$BUGSoutput$summary)
} else{
data1<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1], v2=vv[,2],v3=vv[,3],v4=vv[,4],v5=vv[,5], N = nr)
modelj1<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
sV3[i] <- (v3[i]-mean(v3[]))/sd(v3[])
sV4[i] <- (v4[i]-mean(v4[]))/sd(v4[])
sV5[i] <- (v5[i]-mean(v5[]))/sd(v5[])
os[i] ~ dweib(alpha,lambda[i])
cen[i] ~ dinterval(os[i],stcen[i])
lambda[i] <- log(2)*exp(-mu[i]*sqrt(tau))
mu[i] <- beta[1] + beta[2]*sV1[i]+beta[3]*sV2[i] + beta[4]*sV3[i] + beta[5]*sV4[i]+ beta[6]*sV5[i]
}
alpha <- sqrt(tau)
for(i in 1:6){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits1 <- function() {
list(beta=c(0,0,0,0,0,0), tau=1)
}
jagsft1 <- jags(model.file=modelj1, data=data1, inits = inits1,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f1=data.frame(jagsft1$BUGSoutput$summary)
data2<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1],v2=vv[,2], v3=vv[,3],v4=vv[,4], v5=vv[,5],N = nr)
modelj2<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
sV3[i] <- (v3[i]-mean(v3[]))/sd(v3[])
sV4[i] <- (v4[i]-mean(v4[]))/sd(v4[])
sV5[i] <- (v5[i]-mean(v5[]))/sd(v5[])
os[i] ~ dlnorm(mu[i], tau)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i] + beta[3]*sV2[i] + beta[4]*sV3[i] + beta[5]*sV4[i]+ beta[6]*sV5[i]
}
for(i in 1:6){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits2 <- function() {
list(beta=c(0,0,0,0,0,0), tau=1)
}
jagsft2 <- jags(model.file=modelj2, data=data2, inits = inits2,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f2=data.frame(jagsft2$BUGSoutput$summary)
data3<-list(os=d12$ls, stcen=d12$stcen1, cen=d12$cen, v1=vv[,1],v2=vv[,2], v3=vv[,3],v4=vv[,4], v5=vv[,5],N = nr)
modelj3<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
sV3[i] <- (v3[i]-mean(v3[]))/sd(v3[])
sV4[i] <- (v4[i]-mean(v4[]))/sd(v4[])
sV5[i] <- (v5[i]-mean(v5[]))/sd(v5[])
os[i] ~ dlogis(mu[i], taustar)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i] +beta[3]*sV2[i] + beta[4]*sV3[i] + beta[5]*sV4[i] + beta[6]*sV5[i]
}
taustar <- sqrt(tau)
for(i in 1:6){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
junk1 <- exp(os[1])
}
inits3 <- function() {
list(beta=c(0,0,0,0,0,0), tau=1)
}
jagsft3 <- jags(model.file=modelj3, data=data3, inits = inits3,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc,
n.iter = ni)
f3=data.frame(jagsft3$BUGSoutput$summary)
}
cat("Estimates for variables: ", vname,"\n")
f <- min(f1[(len+2),1],f2[(len+2),1],f3[(len+2),1])
if(f1[(len+2),1]==f){
cat("First initialization belongs to Weibull distribution.","\n")
cat("Second initialization belongs to log normal distribution.","\n")
cat("Third initialization belongs to log logistic distribution.","\n")
cat("Estimates for weibull distribution, which is found suitable with minimal DIC value: ","\n")
return(f1)
}
if(f2[(len+2),1]==f){
cat("First initialization belongs to Weibull distribution.","\n")
cat("Second initialization belongs to log normal distribution.","\n")
cat("Third initialization belongs to log logistic distribution.","\n")
cat("Estimates for log normal distribution, which is found suitable with minimal DIC value:","\n")
return(f2)
}
if(f3[(len+2),1]==f){
cat("First initialization belongs to Weibull distribution.","\n")
cat("Second initialization belongs to log normal distribution.","\n")
cat("Third initialization belongs to log logistic distribution.","\n")
cat("Estimates for log logistic, which is found suitable with minimal DIC value:","\n")
return(f3)
}
}
utils::globalVariables(c("N","v1","sd","v2","v3","v4","v5","tau","step","phi"))
| /R/aftbybmv.R | no_license | cran/afthd | R | false | false | 20,240 | r | #' @title
#'
#' Bayesian multivariate analysis of parametric AFT model with minimum deviance (DIC) among
#' weibull, log normal and log logistic distribution.
#'
#' @description Provides better estimates (which has minimum deviance(DIC) ) for survival data among weibull,
#' log normal and log logistic distribution of parametric AFT model using MCMC for
#' multivariable (maximum 5 at a time) in high dimensional data.
#'
#' @details
#' This function deals covariates (in data) with missing values. Missing value in any column (covariate) is replaced by mean of that particular covariate.
#' AFT model is log-linear regression model for survival time \eqn{ T_{1}},\eqn{ T_{2}},..,\eqn{T_{n}}.
#' i.e., \deqn{log(T_i)= x_i'\beta +\sigma\epsilon_i ;~\epsilon_i \sim F_\epsilon (.)~which~is~iid }
#' i.e., \deqn{T_i \sim AFT(F_\epsilon ,\beta,\tau|x_i)}
#' Where \eqn{ F_\epsilon } is known cdf which is defined on real line.
#' Here, when baseline distribution is extreme value then T follows weibull distribution.
#' To make interpretation of regression coefficients simpler, using extreme value distribution with median 0.
#' So using weibull distribution that leads to AFT model when
#' \deqn{ T \sim Weib(\sqrt{\tau},log(2)\exp(-x'\beta \sqrt{\tau})) }
#'
#' When baseline distribution is normal then T follows log normal distribution.
#' \deqn{ T \sim LN(x'\beta,1/\tau) }
#' When baseline distribution is logistic then T follows log logistic distribution.
#' \deqn{ T \sim Log-Logis(x'\beta,\sqrt{\tau)} }
#'
#' @param m Starting column number of covariates of study from high dimensional entered data.
#' @param n Ending column number of covariates of study from high dimensional entered data.
#' @param STime name of survival time in data.
#' @param Event name of event in data. 0 is for censored and 1 for occurrence of event.
#' @param nc number of MCMC chain.
#' @param ni number of MCMC iteration to update the outcome.
#' @param data High dimensional gene expression data that contains event status, survival time and and set of covariates.
#' @return Data frame is containing posterior estimates mean, sd, credible intervals, n.eff and Rhat for beta's, sigma, tau and deviance of the model for the selected covariates. beta's of regression coefficient of the model. beta[1] is for intercept and others are for covariates (which is/are chosen order as columns in data). 'sigma' is the scale parameter of the distribution. DIC is the estimate of expected predictive error (so lower deviance denotes better estimation).
#' @import R2jags
#'
#' @references Prabhash et al(2016) <doi:10.21307/stattrans-2016-046>
#'
#' @examples
#' ##
#' data(hdata)
#' aftbybmv(10,12,STime="os",Event="death",2,100,hdata)
#' ##
#' @export
#' @author Atanu Bhattacharjee, Gajendra Kumar Vishwakarma and Pragya Kumari
#' @seealso wbysmv, lgnbymv, lgstbymv
#'
aftbybmv=function(m,n,STime,Event,nc,ni,data){
nr<-nrow(data)
if(STime!="os"){
names(data)[names(data) == STime] <- "os"
}
if(Event!="death"){
names(data)[names(data) == Event] <- "death"
}
d11 <- subset(data, select = c(m:n))
le<-length(d11)
for(i in 1:nr) {
for(j in 1:le) {
d11[i,j] = ifelse(is.na(d11[i,j])=="TRUE", mean(d11[,j], na.rm=TRUE),
d11[i,j])
}
}
pnt<-NULL
for(j in 1:le)
{
if(sum(d11[,j])==0) {
pnt<-c(pnt,j)
}
}
if(is.null(pnt)==F){
d11 <- d11[,-pnt]
}
len<-length(d11)
d12<-data.frame(data[,c('death','os')],d11)
nnr<-nrow(d12)
mx<-max(d12$os) + 100
surt<-ifelse(d12$death == 1, d12$os, NA)
stcen<-ifelse(d12$death == 0, d12$os, mx)
stcen1<-log(stcen)
ls<-log(surt)
d12$os<-surt
cen<-as.numeric(is.na(surt))
d12<-data.frame(d12,stcen,cen,ls,stcen1)
if(len>5){
cat("Outcome for first 5 covariates : ")
vv<-subset(d11,select = c(1:5))
} else {
vv<-d11
}
vname<-colnames(vv)
if(len==1){
data1<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1], N = nr)
modelj1<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
os[i] ~ dweib(alpha,lambda[i])
cen[i] ~ dinterval(os[i],stcen[i])
lambda[i] <- log(2)*exp(-mu[i]*sqrt(tau))
mu[i] <- beta[1] + beta[2]*sV1[i]
}
alpha <- sqrt(tau)
for(i in 1:2){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits1 <- function() {
list(beta=c(0,0), tau=1)
}
jagsft1 <- jags(model.file=modelj1, data=data1, inits = inits1,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f1=data.frame(jagsft1$BUGSoutput$summary)
data2<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1], N = nr)
modelj2<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
os[i] ~ dlnorm(mu[i], tau)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i]
}
for(i in 1:2){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits2 <- function() {
list(beta=c(0,0), tau=1)
}
jagsft2 <- jags(model.file=modelj2, data=data2, inits = inits2,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f2=data.frame(jagsft2$BUGSoutput$summary)
data3<-list(os=d12$ls, stcen=d12$stcen1, cen=d12$cen, v1=vv[,1], N = nr)
modelj3<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
os[i] ~ dlogis(mu[i], taustar)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i]
}
taustar <- sqrt(tau)
for(i in 1:2){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
junk1 <- exp(os[1])
}
inits3 <- function() {
list(beta=c(0,0), tau=1)
}
jagsft3 <- jags(model.file=modelj3, data=data3, inits = inits3,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc,
n.iter = ni)
f3=data.frame(jagsft3$BUGSoutput$summary)
} else if(len==2){
data1<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1], v2=vv[,2], N = nr)
modelj1<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
os[i] ~ dweib(alpha,lambda[i])
cen[i] ~ dinterval(os[i],stcen[i])
lambda[i] <- log(2)*exp(-mu[i]*sqrt(tau))
mu[i] <- beta[1] + beta[2]*sV1[i]+beta[3]*sV2[i]
}
alpha <- sqrt(tau)
for(i in 1:3){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits1 <- function() {
list(beta=c(0,0,0), tau=1)
}
jagsft1 <- jags(model.file=modelj1, data=data1, inits = inits1,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f1=data.frame(jagsft1$BUGSoutput$summary)
data2<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1],v2=vv[,2], N = nr)
modelj2<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
os[i] ~ dlnorm(mu[i], tau)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i] + beta[3]*sV2[i]
}
for(i in 1:3){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits2 <- function() {
list(beta=c(0,0,0), tau=1)
}
jagsft2 <- jags(model.file=modelj2, data=data2, inits = inits2,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f2=data.frame(jagsft2$BUGSoutput$summary)
data3<-list(os=d12$ls, stcen=d12$stcen1, cen=d12$cen, v1=vv[,1],v2=vv[,2], N = nr)
modelj3<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
os[i] ~ dlogis(mu[i], taustar)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i] +beta[3]*sV2[i]
}
taustar <- sqrt(tau)
for(i in 1:3){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
junk1 <- exp(os[1])
}
inits3 <- function() {
list(beta=c(0,0,0), tau=1)
}
jagsft3 <- jags(model.file=modelj3, data=data3, inits = inits3,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc,
n.iter = ni)
f3=data.frame(jagsft3$BUGSoutput$summary)
} else if(len==3){
data1<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1], v2=vv[,2],v3=vv[,3], N = nr)
modelj1<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
sV3[i] <- (v3[i]-mean(v3[]))/sd(v3[])
os[i] ~ dweib(alpha,lambda[i])
cen[i] ~ dinterval(os[i],stcen[i])
lambda[i] <- log(2)*exp(-mu[i]*sqrt(tau))
mu[i] <- beta[1] + beta[2]*sV1[i]+beta[3]*sV2[i] + beta[4]*sV3[i]
}
alpha <- sqrt(tau)
for(i in 1:4){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits1 <- function() {
list(beta=c(0,0,0,0), tau=1)
}
jagsft1 <- jags(model.file=modelj1, data=data1, inits = inits1,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f1=data.frame(jagsft1$BUGSoutput$summary)
data2<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1],v2=vv[,2], v3=vv[,3], N = nr)
modelj2<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
sV3[i] <- (v3[i]-mean(v3[]))/sd(v3[])
os[i] ~ dlnorm(mu[i], tau)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i] + beta[3]*sV2[i] + beta[4]*sV3[i]
}
for(i in 1:4){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits2 <- function() {
list(beta=c(0,0,0,0), tau=1)
}
jagsft2 <- jags(model.file=modelj2, data=data2, inits = inits2,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f2=data.frame(jagsft2$BUGSoutput$summary)
data3<-list(os=d12$ls, stcen=d12$stcen1, cen=d12$cen, v1=vv[,1],v2=vv[,2], v3=vv[,3], N = nr)
modelj3<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
sV3[i] <- (v3[i]-mean(v3[]))/sd(v3[])
os[i] ~ dlogis(mu[i], taustar)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i] +beta[3]*sV2[i] + beta[4]*sV3[i]
}
taustar <- sqrt(tau)
for(i in 1:4){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
junk1 <- exp(os[1])
}
inits3 <- function() {
list(beta=c(0,0,0,0), tau=1)
}
jagsft3 <- jags(model.file=modelj3, data=data3, inits = inits3,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc,
n.iter = ni)
f3=data.frame(jagsft3$BUGSoutput$summary)
} else if(len==4){
data1<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1], v2=vv[,2],v3=vv[,3],v4=vv[,4], N = nr)
modelj1<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
sV3[i] <- (v3[i]-mean(v3[]))/sd(v3[])
sV4[i] <- (v4[i]-mean(v4[]))/sd(v4[])
os[i] ~ dweib(alpha,lambda[i])
cen[i] ~ dinterval(os[i],stcen[i])
lambda[i] <- log(2)*exp(-mu[i]*sqrt(tau))
mu[i] <- beta[1] + beta[2]*sV1[i]+beta[3]*sV2[i] + beta[4]*sV3[i] + beta[5]*sV4[i]
}
alpha <- sqrt(tau)
for(i in 1:5){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits1 <- function() {
list(beta=c(0,0,0,0,0), tau=1)
}
jagsft1 <- jags(model.file=modelj1, data=data1, inits = inits1,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f1=data.frame(jagsft1$BUGSoutput$summary)
data2<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1],v2=vv[,2], v3=vv[,3],v4=vv[,4], N = nr)
modelj2<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
sV3[i] <- (v3[i]-mean(v3[]))/sd(v3[])
sV4[i] <- (v4[i]-mean(v4[]))/sd(v4[])
os[i] ~ dlnorm(mu[i], tau)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i] + beta[3]*sV2[i] + beta[4]*sV3[i] + beta[5]*sV4[i]
}
for(i in 1:5){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits2 <- function() {
list(beta=c(0,0,0,0,0), tau=1)
}
jagsft2 <- jags(model.file=modelj2, data=data2, inits = inits2,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f2=data.frame(jagsft2$BUGSoutput$summary)
data3<-list(os=d12$ls, stcen=d12$stcen1, cen=d12$cen, v1=vv[,1],v2=vv[,2], v3=vv[,3],v4=vv[,4], N = nr)
modelj3<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
sV3[i] <- (v3[i]-mean(v3[]))/sd(v3[])
sV4[i] <- (v4[i]-mean(v4[]))/sd(v4[])
os[i] ~ dlogis(mu[i], taustar)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i] +beta[3]*sV2[i] + beta[4]*sV3[i] + beta[5]*sV4[i]
}
taustar <- sqrt(tau)
for(i in 1:5){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
junk1 <- exp(os[1])
}
inits3 <- function() {
list(beta=c(0,0,0,0,0), tau=1)
}
jagsft3 <- jags(model.file=modelj3, data=data3, inits = inits3,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc,
n.iter = ni)
f3=data.frame(jagsft3$BUGSoutput$summary)
} else{
data1<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1], v2=vv[,2],v3=vv[,3],v4=vv[,4],v5=vv[,5], N = nr)
modelj1<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
sV3[i] <- (v3[i]-mean(v3[]))/sd(v3[])
sV4[i] <- (v4[i]-mean(v4[]))/sd(v4[])
sV5[i] <- (v5[i]-mean(v5[]))/sd(v5[])
os[i] ~ dweib(alpha,lambda[i])
cen[i] ~ dinterval(os[i],stcen[i])
lambda[i] <- log(2)*exp(-mu[i]*sqrt(tau))
mu[i] <- beta[1] + beta[2]*sV1[i]+beta[3]*sV2[i] + beta[4]*sV3[i] + beta[5]*sV4[i]+ beta[6]*sV5[i]
}
alpha <- sqrt(tau)
for(i in 1:6){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits1 <- function() {
list(beta=c(0,0,0,0,0,0), tau=1)
}
jagsft1 <- jags(model.file=modelj1, data=data1, inits = inits1,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f1=data.frame(jagsft1$BUGSoutput$summary)
data2<-list(os=d12$os, stcen=d12$stcen, cen=d12$cen, v1=vv[,1],v2=vv[,2], v3=vv[,3],v4=vv[,4], v5=vv[,5],N = nr)
modelj2<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
sV3[i] <- (v3[i]-mean(v3[]))/sd(v3[])
sV4[i] <- (v4[i]-mean(v4[]))/sd(v4[])
sV5[i] <- (v5[i]-mean(v5[]))/sd(v5[])
os[i] ~ dlnorm(mu[i], tau)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i] + beta[3]*sV2[i] + beta[4]*sV3[i] + beta[5]*sV4[i]+ beta[6]*sV5[i]
}
for(i in 1:6){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
}
inits2 <- function() {
list(beta=c(0,0,0,0,0,0), tau=1)
}
jagsft2 <- jags(model.file=modelj2, data=data2, inits = inits2,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc, n.iter = ni)
f2=data.frame(jagsft2$BUGSoutput$summary)
data3<-list(os=d12$ls, stcen=d12$stcen1, cen=d12$cen, v1=vv[,1],v2=vv[,2], v3=vv[,3],v4=vv[,4], v5=vv[,5],N = nr)
modelj3<-function(){
for (i in 1:N) {
sV1[i] <- (v1[i]-mean(v1[]))/sd(v1[])
sV2[i] <- (v2[i]-mean(v2[]))/sd(v2[])
sV3[i] <- (v3[i]-mean(v3[]))/sd(v3[])
sV4[i] <- (v4[i]-mean(v4[]))/sd(v4[])
sV5[i] <- (v5[i]-mean(v5[]))/sd(v5[])
os[i] ~ dlogis(mu[i], taustar)
cen[i] ~ dinterval(os[i],stcen[i])
mu[i] <- beta[1] + beta[2]*sV1[i] +beta[3]*sV2[i] + beta[4]*sV3[i] + beta[5]*sV4[i] + beta[6]*sV5[i]
}
taustar <- sqrt(tau)
for(i in 1:6){
beta[i] ~ dnorm(0,0.000001)
rm[i] <- exp(beta[i])
prob[i] <- step(beta[i])
}
tau ~ dgamma(0.001,0.001)
sigma <- sqrt(1/tau)
junk1 <- exp(os[1])
}
inits3 <- function() {
list(beta=c(0,0,0,0,0,0), tau=1)
}
jagsft3 <- jags(model.file=modelj3, data=data3, inits = inits3,
parameters.to.save = c('beta','tau','sigma'), n.chains=nc,
n.iter = ni)
f3=data.frame(jagsft3$BUGSoutput$summary)
}
cat("Estimates for variables: ", vname,"\n")
f <- min(f1[(len+2),1],f2[(len+2),1],f3[(len+2),1])
if(f1[(len+2),1]==f){
cat("First initialization belongs to Weibull distribution.","\n")
cat("Second initialization belongs to log normal distribution.","\n")
cat("Third initialization belongs to log logistic distribution.","\n")
cat("Estimates for weibull distribution, which is found suitable with minimal DIC value: ","\n")
return(f1)
}
if(f2[(len+2),1]==f){
cat("First initialization belongs to Weibull distribution.","\n")
cat("Second initialization belongs to log normal distribution.","\n")
cat("Third initialization belongs to log logistic distribution.","\n")
cat("Estimates for log normal distribution, which is found suitable with minimal DIC value:","\n")
return(f2)
}
if(f3[(len+2),1]==f){
cat("First initialization belongs to Weibull distribution.","\n")
cat("Second initialization belongs to log normal distribution.","\n")
cat("Third initialization belongs to log logistic distribution.","\n")
cat("Estimates for log logistic, which is found suitable with minimal DIC value:","\n")
return(f3)
}
}
utils::globalVariables(c("N","v1","sd","v2","v3","v4","v5","tau","step","phi"))
|
library(class)
numPerFold = floor(nrow(known.Scrambled)/10)
### Try K from 1 to 20
knnMeanErrors = sapply(1:20, function(k){
### For each K, do a 10 fold CV to obtain mean error
error.Rates = sapply(1:10, function(index){
### Perform modelling with current fold
KFold.helper(known.Scrambled, function(train, test){
mean(
knn(
train = model.frame(subject.FullFormula.PredictorVars, train),
test = model.frame(subject.FullFormula.PredictorVars, test),
cl = train$subject,
k = k
) != test$subject
)
}, index, numPerFold)
})
mean(error.Rates)
})
require(ggplot2)
require(dplyr)
plot.KNN <- data.frame(
x = 1:20,
y = knnMeanErrors
) %>%
ggplot(aes(x, y)) +
geom_line() +
geom_point(shape = 21, size = 2, fill = "blue") +
labs(
title = "KNN - Mean 10-fold CV error across different k's",
y = "Error rate",
x = "K"
)
min.error <- min(knnMeanErrors)
| /Final-Modelling.KNN.r | no_license | landon-thompson/stat-702-final | R | false | false | 1,015 | r |
library(class)
numPerFold = floor(nrow(known.Scrambled)/10)
### Try K from 1 to 20
knnMeanErrors = sapply(1:20, function(k){
### For each K, do a 10 fold CV to obtain mean error
error.Rates = sapply(1:10, function(index){
### Perform modelling with current fold
KFold.helper(known.Scrambled, function(train, test){
mean(
knn(
train = model.frame(subject.FullFormula.PredictorVars, train),
test = model.frame(subject.FullFormula.PredictorVars, test),
cl = train$subject,
k = k
) != test$subject
)
}, index, numPerFold)
})
mean(error.Rates)
})
require(ggplot2)
require(dplyr)
plot.KNN <- data.frame(
x = 1:20,
y = knnMeanErrors
) %>%
ggplot(aes(x, y)) +
geom_line() +
geom_point(shape = 21, size = 2, fill = "blue") +
labs(
title = "KNN - Mean 10-fold CV error across different k's",
y = "Error rate",
x = "K"
)
min.error <- min(knnMeanErrors)
|
#' Create a matrix with output for multiple parameter combinations
#'
#' This function creates a matrix with output for multiple parameter combinations across nseasons
#'
#' Updated 2018-08-21
#' @param Kx total number of plants
#' @param betax maximum seasonal transmission rate
#' @param wx environmental effect on transmission rate
#' @param hx host effect on transmission rate
#' @param mx vector management effect on transmission rate
#' @param ax roguing effect in terms of decreased DP
#' @param maY maximum attainable yield, end of season, in the absence of disease
#' @param miY minimum yield when all plants are diseased (useable yield despite disease)
#' @param thetax rate of decline of Yld with increasing disease incidence
#' @param Ex amount of external inoculum around field
#' @param rx reversion rate
#' @param zx proportional selection against diseased plants
#' @param gx seed production rate in healthy plants
#' @param cx proportional seed production rate in diseased plants
#' @param phix proportion clean seed purchased
#' @param nseasons number of seasons
#' @keywords seed health
#' @export
#' @examples
#' Multstoch()
# to do - GENERAL TESTING
# to do - check whether parameter list is correct
# Columns of output matrix
# col 1 - timestep (initial time step is season 0)
# col 2 - HP healthy plant number
# col 3 - DP diseased plant number (after roguing)
# col 4 - HS healthy seed number
# col 5 - DS diseased seed number
# col 6 - pHS proportion healthy seed
# col 7 - pDS proportion diseased seed
# col 8 - Yld end of season yield
# col 9 - YL end of season yield loss
# col 10 - DPbr (diseased plants before roguing)
# Weather (wx), vector management (mx), positive selection (zx) and roguing (zx) are stochastic
# Each have a mean and associated standard deviation
# set.seed(1234)
mpc <- function(vpHSinit=c(0.2,0.5,0.8), vKx = 100, vbetax=c(0.02,0.04), vwxtnormm=c(0.3,0.7), vwxtnormsd=c(0.3, 0.1), vhx=1, vmxtnormm=1, vmxtnormsd=0.1, vaxtnormm=c(1,0.5), vaxtnormsd= c(0.3, 0.1), vrx=c(0.1,0.3), vzxtnormm=c(1,0.5), vzxtnormsd= c(0.3, 0.1), vgx=4, vcx=0.9, vphix=c(0,0.5), vnseasons=10, vnsim=1, vHPcut=0.5, vpHScut=0.5,vmaY=100, vmiY=0, vthetax=c(-0.5,0,0.5), vEx=0.02){
ncomb <- length(vpHSinit) * length(vKx) * length(vbetax) * length(vwxtnormm) * length(vwxtnormsd)*length(vhx) * length(vmxtnormm)*length(vmxtnormsd) * length(vaxtnormm) * length(vaxtnormsd) * length(vrx) * length(vzxtnormm) * length(vzxtnormsd) * length(vgx) * length(vcx) * length(vphix) * length(vnseasons) * length(vnsim)*length(vHPcut)*length(vpHScut)*length(vmaY)*length(vmiY)*length(vthetax)*length(vEx)
basn <- c('fHP', 'fDP', 'fHS', 'fDS', 'fpHS', 'fpDS', 'HPtrans', 'pHStrans', 'HPpseas', 'pHSpseas', 'fYld', 'fYL')
outmpc <- as.data.frame(matrix(data=-999, nrow=ncomb, ncol=84, dimnames = list(1:ncomb,c('pHSinit','Kx','betax','wxtnormm','wxtnormsd','hx','mxtnormm','mxtnormsd','axtnormm','axtnormsd','rx','zxtnormm','zxtnormsd','gx','cx','phix','nseasons','nsim', 'HPcut', 'pHScut', 'maY','miY','thetax','Ex', paste(basn,'mean',sep=''),paste(basn,'median',sep=''),paste(basn,'var',sep=''),paste(basn,'q0.05',sep=''),paste(basn,'q0.95',sep='')))))
icomb <- 1 # indicates number of current parameter combination, and corresponding row of outmpc
for(i1 in 1:length(vpHSinit)){ tpHSinit <- vpHSinit[i1]
for(i2 in 1:length(vKx)){tKx <- vKx[i2]
for(i3 in 1:length(vbetax)){tbetax <- vbetax[i3]
for(i4 in 1:length(vwxtnormm)){twxtnormm <- vwxtnormm[i4]
for(i5 in 1:length(vwxtnormsd)){twxtnormsd<- vwxtnormsd[i5]
for(i6 in 1:length(vhx)){thx <- vhx[i6]
for(i7 in 1:length(vmxtnormm)){tmxtnormm <- vmxtnormm[i7]
for(i8 in 1:length(vmxtnormsd)){tmxtnormsd <- vmxtnormsd[i8]
for(i9 in 1:length(vaxtnormm)){taxtnormm <-vaxtnormm[i9]
for(i10 in 1:length(vaxtnormsd)){taxtnormsd <- vaxtnormsd[i10]
for(i11 in 1:length(vrx)){trx <- vrx[i11]
for(i12 in 1:length(vzxtnormm)){tzxtnormm <-vzxtnormm[i12]
for(i13 in 1:length(vzxtnormsd)){tzxtnormsd <- vzxtnormsd[i13]
for(i14 in 1:length(vgx)){tgx <- vgx[i14]
for(i15 in 1:length(vcx)){tcx <- vcx[i15]
for(i16 in 1:length(vphix)){tphix <- vphix[i16]
for(i17 in 1:length(vnseasons)){tnseasons <- vnseasons[i17]
for(i18 in 1:length(vnsim)){tnsim <- vnsim[i18]
for(i19 in 1:length(vHPcut)){tHPcut<- vHPcut[i19]
for(i20 in 1:length(vpHScut)){tpHScut<- vpHScut[i20]
for(i21 in 1:length(vmaY)){tmaY<- vmaY[i21]
for(i22 in 1:length(vmiY)){tmiY<- vmiY[i22]
for(i23 in 1:length(vthetax)){tthetax<- vthetax[i23]
for(i24 in 1:length(vEx)){tEx<-vEx[i24]
temp <- Multstoch(pHSinit2=tpHSinit, Kx2 = tKx, betax2=tbetax, wxtnormm2=twxtnormm, wxtnormsd2=twxtnormsd, hx2=thx, mxtnormm2=tmxtnormm, mxtnormsd2=tmxtnormsd, axtnormm2=taxtnormm, axtnormsd2=taxtnormsd, rx2=trx, zxtnormm2=tzxtnormm, zxtnormsd2=tzxtnormsd, gx2=tgx, cx2=tcx, phix2=tphix, nseasons2=tnseasons, nsim2=tnsim, HPcut2=tHPcut, pHScut2=tpHScut,maY2=tmaY,miY2=tmiY, thetax2=tthetax, Ex2=tEx)$outfsum
outmpc[icomb,1:24] <- c(tpHSinit,tKx,tbetax,twxtnormm,twxtnormsd,thx,tmxtnormm, tmxtnormsd,taxtnormm,taxtnormsd,trx,tzxtnormm,tzxtnormsd,tgx,tcx, tphix,tnseasons,tnsim,tHPcut,tpHScut,tmaY,tmiY,tthetax, tEx)
outmpc[icomb,25:36] <- temp[1,]
outmpc[icomb,37:48] <- temp[2,]
outmpc[icomb,49:60] <- temp[3,]
outmpc[icomb,61:72] <- temp[4,]
outmpc[icomb,73:84] <- temp[5,]
icomb <- icomb + 1
}}}}}}}}}}}}}}}}}}}}}}}}
outmpc
}
| /R/mpc.R | no_license | kelseyandersen/seedHealth | R | false | false | 5,347 | r |
#' Create a matrix with output for multiple parameter combinations
#'
#' This function creates a matrix with output for multiple parameter combinations across nseasons
#'
#' Updated 2018-08-21
#' @param Kx total number of plants
#' @param betax maximum seasonal transmission rate
#' @param wx environmental effect on transmission rate
#' @param hx host effect on transmission rate
#' @param mx vector management effect on transmission rate
#' @param ax roguing effect in terms of decreased DP
#' @param maY maximum attainable yield, end of season, in the absence of disease
#' @param miY minimum yield when all plants are diseased (useable yield despite disease)
#' @param thetax rate of decline of Yld with increasing disease incidence
#' @param Ex amount of external inoculum around field
#' @param rx reversion rate
#' @param zx proportional selection against diseased plants
#' @param gx seed production rate in healthy plants
#' @param cx proportional seed production rate in diseased plants
#' @param phix proportion clean seed purchased
#' @param nseasons number of seasons
#' @keywords seed health
#' @export
#' @examples
#' Multstoch()
# to do - GENERAL TESTING
# to do - check whether parameter list is correct
# Columns of output matrix
# col 1 - timestep (initial time step is season 0)
# col 2 - HP healthy plant number
# col 3 - DP diseased plant number (after roguing)
# col 4 - HS healthy seed number
# col 5 - DS diseased seed number
# col 6 - pHS proportion healthy seed
# col 7 - pDS proportion diseased seed
# col 8 - Yld end of season yield
# col 9 - YL end of season yield loss
# col 10 - DPbr (diseased plants before roguing)
# Weather (wx), vector management (mx), positive selection (zx) and roguing (zx) are stochastic
# Each have a mean and associated standard deviation
# set.seed(1234)
mpc <- function(vpHSinit=c(0.2,0.5,0.8), vKx = 100, vbetax=c(0.02,0.04), vwxtnormm=c(0.3,0.7), vwxtnormsd=c(0.3, 0.1), vhx=1, vmxtnormm=1, vmxtnormsd=0.1, vaxtnormm=c(1,0.5), vaxtnormsd= c(0.3, 0.1), vrx=c(0.1,0.3), vzxtnormm=c(1,0.5), vzxtnormsd= c(0.3, 0.1), vgx=4, vcx=0.9, vphix=c(0,0.5), vnseasons=10, vnsim=1, vHPcut=0.5, vpHScut=0.5,vmaY=100, vmiY=0, vthetax=c(-0.5,0,0.5), vEx=0.02){
ncomb <- length(vpHSinit) * length(vKx) * length(vbetax) * length(vwxtnormm) * length(vwxtnormsd)*length(vhx) * length(vmxtnormm)*length(vmxtnormsd) * length(vaxtnormm) * length(vaxtnormsd) * length(vrx) * length(vzxtnormm) * length(vzxtnormsd) * length(vgx) * length(vcx) * length(vphix) * length(vnseasons) * length(vnsim)*length(vHPcut)*length(vpHScut)*length(vmaY)*length(vmiY)*length(vthetax)*length(vEx)
basn <- c('fHP', 'fDP', 'fHS', 'fDS', 'fpHS', 'fpDS', 'HPtrans', 'pHStrans', 'HPpseas', 'pHSpseas', 'fYld', 'fYL')
outmpc <- as.data.frame(matrix(data=-999, nrow=ncomb, ncol=84, dimnames = list(1:ncomb,c('pHSinit','Kx','betax','wxtnormm','wxtnormsd','hx','mxtnormm','mxtnormsd','axtnormm','axtnormsd','rx','zxtnormm','zxtnormsd','gx','cx','phix','nseasons','nsim', 'HPcut', 'pHScut', 'maY','miY','thetax','Ex', paste(basn,'mean',sep=''),paste(basn,'median',sep=''),paste(basn,'var',sep=''),paste(basn,'q0.05',sep=''),paste(basn,'q0.95',sep='')))))
icomb <- 1 # indicates number of current parameter combination, and corresponding row of outmpc
for(i1 in 1:length(vpHSinit)){ tpHSinit <- vpHSinit[i1]
for(i2 in 1:length(vKx)){tKx <- vKx[i2]
for(i3 in 1:length(vbetax)){tbetax <- vbetax[i3]
for(i4 in 1:length(vwxtnormm)){twxtnormm <- vwxtnormm[i4]
for(i5 in 1:length(vwxtnormsd)){twxtnormsd<- vwxtnormsd[i5]
for(i6 in 1:length(vhx)){thx <- vhx[i6]
for(i7 in 1:length(vmxtnormm)){tmxtnormm <- vmxtnormm[i7]
for(i8 in 1:length(vmxtnormsd)){tmxtnormsd <- vmxtnormsd[i8]
for(i9 in 1:length(vaxtnormm)){taxtnormm <-vaxtnormm[i9]
for(i10 in 1:length(vaxtnormsd)){taxtnormsd <- vaxtnormsd[i10]
for(i11 in 1:length(vrx)){trx <- vrx[i11]
for(i12 in 1:length(vzxtnormm)){tzxtnormm <-vzxtnormm[i12]
for(i13 in 1:length(vzxtnormsd)){tzxtnormsd <- vzxtnormsd[i13]
for(i14 in 1:length(vgx)){tgx <- vgx[i14]
for(i15 in 1:length(vcx)){tcx <- vcx[i15]
for(i16 in 1:length(vphix)){tphix <- vphix[i16]
for(i17 in 1:length(vnseasons)){tnseasons <- vnseasons[i17]
for(i18 in 1:length(vnsim)){tnsim <- vnsim[i18]
for(i19 in 1:length(vHPcut)){tHPcut<- vHPcut[i19]
for(i20 in 1:length(vpHScut)){tpHScut<- vpHScut[i20]
for(i21 in 1:length(vmaY)){tmaY<- vmaY[i21]
for(i22 in 1:length(vmiY)){tmiY<- vmiY[i22]
for(i23 in 1:length(vthetax)){tthetax<- vthetax[i23]
for(i24 in 1:length(vEx)){tEx<-vEx[i24]
temp <- Multstoch(pHSinit2=tpHSinit, Kx2 = tKx, betax2=tbetax, wxtnormm2=twxtnormm, wxtnormsd2=twxtnormsd, hx2=thx, mxtnormm2=tmxtnormm, mxtnormsd2=tmxtnormsd, axtnormm2=taxtnormm, axtnormsd2=taxtnormsd, rx2=trx, zxtnormm2=tzxtnormm, zxtnormsd2=tzxtnormsd, gx2=tgx, cx2=tcx, phix2=tphix, nseasons2=tnseasons, nsim2=tnsim, HPcut2=tHPcut, pHScut2=tpHScut,maY2=tmaY,miY2=tmiY, thetax2=tthetax, Ex2=tEx)$outfsum
outmpc[icomb,1:24] <- c(tpHSinit,tKx,tbetax,twxtnormm,twxtnormsd,thx,tmxtnormm, tmxtnormsd,taxtnormm,taxtnormsd,trx,tzxtnormm,tzxtnormsd,tgx,tcx, tphix,tnseasons,tnsim,tHPcut,tpHScut,tmaY,tmiY,tthetax, tEx)
outmpc[icomb,25:36] <- temp[1,]
outmpc[icomb,37:48] <- temp[2,]
outmpc[icomb,49:60] <- temp[3,]
outmpc[icomb,61:72] <- temp[4,]
outmpc[icomb,73:84] <- temp[5,]
icomb <- icomb + 1
}}}}}}}}}}}}}}}}}}}}}}}}
outmpc
}
|
\name{itab}
\alias{itab}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Independence Table
}
\description{
Computes the independence table for a data table with non-negative entries.
The entries of the independence table are defined by the multiplication of all corresponding marginal relative frequencies.
}
\usage{
itab(x)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
A data table of any dimension but with non-negative entries.
}
}
\value{
A data table of the same dimension as the input table.}
\author{
Alexander Pilhoefer
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
A <- optile(arsim(4000,c(13,17),4,0.1),iter=10)
fluctile(A)
fluctile(itab(A))
D <- A-itab(A)
G <- (D)^2/itab(A)
fluctile(G, tile.col = c(2,4)[1+ (D>0)])
}
| /man/itab.Rd | no_license | heike/extracat | R | false | false | 834 | rd | \name{itab}
\alias{itab}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Independence Table
}
\description{
Computes the independence table for a data table with non-negative entries.
The entries of the independence table are defined by the multiplication of all corresponding marginal relative frequencies.
}
\usage{
itab(x)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
A data table of any dimension but with non-negative entries.
}
}
\value{
A data table of the same dimension as the input table.}
\author{
Alexander Pilhoefer
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
A <- optile(arsim(4000,c(13,17),4,0.1),iter=10)
fluctile(A)
fluctile(itab(A))
D <- A-itab(A)
G <- (D)^2/itab(A)
fluctile(G, tile.col = c(2,4)[1+ (D>0)])
}
|
library(Seurat)
library(Signac)
library(dplyr)
library(data.table)
library(stringr)
library(harmony)
library(patchwork)
setwd('~/RWorkSpace/CITE-seq/Duerr/DOGMA-seq/DIG_CITE_rerun_1/code/')
load('../output/cite_harmony_independent.RData')
cite$wnn_clusters <- factor(cite$wnn_clusters, levels = 0:10)
Idents(cite) <- "wnn_clusters"
reference <- readRDS('~/CITE-seq/Seurat/PBMC.RDS')
DefaultAssay(cite) <- 'SCT'
anchors <- FindTransferAnchors(
reference = reference,
query = cite,
normalization.method = "SCT",
reference.reduction = "spca",
dims = 1:50
)
cite <- MapQuery(
anchorset = anchors,
query = cite,
reference = reference,
refdata = list(
celltype.l1 = "celltype.l1",
celltype.l2 = "celltype.l2",
predicted_ADT = "ADT"
),
reference.reduction = "spca",
reduction.model = "wnn.umap"
)
p1 <- DimPlot(cite, reduction = 'wnn.umap', label = TRUE, repel = TRUE, label.size = 4)
p2 <- DimPlot(cite, reduction = 'wnn.umap', label = TRUE, repel = TRUE, group.by = "predicted.celltype.l1", label.size = 4)
p3 <- DimPlot(cite, reduction = 'wnn.umap', label = TRUE, repel = TRUE, group.by = "predicted.celltype.l2", label.size = 4)
pdf('../plots/harmony/cite_predicted.pdf', width = 24, height = 8)
p1 | p2 | p3
dev.off()
p1 <- DimPlot(cite, reduction = 'wnn.umap', label = TRUE, repel = TRUE, label.size = 4)
p2 <- DimPlot(cite, reduction = 'wnn.umap', label = TRUE, repel = TRUE, group.by = "predicted.celltype.l1", label.size = 4)
pdf('../plots/harmony/cite_predicted_2.pdf', width = 16, height = 8)
p1 | p2
dev.off()
library(ggplot2)
data_plot <- table(cite$predicted.celltype.l1, cite$condition)
data_plot <- reshape2::melt(prop.table(data_plot, 2))
colnames(data_plot) <- c('Clusters', 'Conditions', 'Proportions')
data_plot$Clusters <- as.factor(data_plot$Clusters)
data_plot$Proportions <- data_plot$Proportions*100
p1 <- ggplot(data_plot, aes(x = Clusters, y = Proportions, col = Conditions, fill = Conditions))+
geom_col(position = 'dodge')+
theme_bw()+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=0))
pdf('../plots/harmony/cite_proportion_predicted_slide.pdf', width = 8, height = 4)
p1
dev.off()
data_plot <- table(cite$predicted.celltype.l2, cite$condition)
data_plot <- reshape2::melt(prop.table(data_plot, 2))
colnames(data_plot) <- c('Clusters', 'Conditions', 'Proportions')
data_plot$Clusters <- as.factor(data_plot$Clusters)
data_plot$Proportions <- data_plot$Proportions*100
p1 <- ggplot(data_plot, aes(x = Clusters, y = Proportions, col = Conditions, fill = Conditions))+
geom_col(position = 'dodge')+
theme_bw()+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=0))
pdf('../plots/harmony/cite_proportion_predicted_l2_slide.pdf', width = 8, height = 4)
p1
dev.off()
write.csv(cite@meta.data, file = '../plots/harmony/cite_predicted.csv')
p1 <- FeaturePlot(cite, c('sct_IL17A'), reduction = 'wnn.umap', min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('IL17A (RNA)')
p2 <- FeaturePlot(cite, c('sct_CCR6'), reduction = 'wnn.umap', min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('CCR6 (RNA)')
p3 <- FeaturePlot(cite, c('sct_RORC'), reduction = 'wnn.umap', min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('RORC (RNA)')
p4 <- FeaturePlot(cite, c('adt_CD194-A0071'), reduction = 'wnn.umap', cols = c("lightgrey", "darkgreen"), min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('CCR4 (ADT)')
p5 <- FeaturePlot(cite, c('adt_CD196-A0143'), reduction = 'wnn.umap', cols = c("lightgrey", "darkgreen"), min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('CCR6 (ADT)')
pdf('../plots/harmony/cite_Th17.pdf', width = 12, height = 8)
wrap_plots(p1, p2, p3, p4, p5, ncol = 3)
dev.off()
p1 <- FeaturePlot(cite, c('sct_IFNG'), reduction = 'wnn.umap', min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('IFNG (RNA)')
p2 <- FeaturePlot(cite, c('sct_IL12RB2'), reduction = 'wnn.umap', min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('IL12RB2 (RNA)')
p3 <- FeaturePlot(cite, c('sct_TBX21'), reduction = 'wnn.umap', min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('TBX21 (RNA)')
p4 <- FeaturePlot(cite, c('adt_CD183-A0140'), reduction = 'wnn.umap', cols = c("lightgrey", "darkgreen"), min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('CXCR3 (ADT)')
p5 <- FeaturePlot(cite, c('adt_CD195-A0141'), reduction = 'wnn.umap', cols = c("lightgrey", "darkgreen"), min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('CCR5 (ADT)')
pdf('../plots/harmony/cite_Th1.pdf', width = 12, height = 8)
wrap_plots(p1, p2, p3, p4, p5, ncol = 3)
dev.off()
save(cite, file = '../output/cite_harmony_independent_predicted.RData')
| /DOGMA_CITE/09.1_analysis_cite.R | no_license | xzlandy/Benchmark_CITEseq_DOGMAseq | R | false | false | 4,598 | r | library(Seurat)
library(Signac)
library(dplyr)
library(data.table)
library(stringr)
library(harmony)
library(patchwork)
setwd('~/RWorkSpace/CITE-seq/Duerr/DOGMA-seq/DIG_CITE_rerun_1/code/')
load('../output/cite_harmony_independent.RData')
cite$wnn_clusters <- factor(cite$wnn_clusters, levels = 0:10)
Idents(cite) <- "wnn_clusters"
reference <- readRDS('~/CITE-seq/Seurat/PBMC.RDS')
DefaultAssay(cite) <- 'SCT'
anchors <- FindTransferAnchors(
reference = reference,
query = cite,
normalization.method = "SCT",
reference.reduction = "spca",
dims = 1:50
)
cite <- MapQuery(
anchorset = anchors,
query = cite,
reference = reference,
refdata = list(
celltype.l1 = "celltype.l1",
celltype.l2 = "celltype.l2",
predicted_ADT = "ADT"
),
reference.reduction = "spca",
reduction.model = "wnn.umap"
)
p1 <- DimPlot(cite, reduction = 'wnn.umap', label = TRUE, repel = TRUE, label.size = 4)
p2 <- DimPlot(cite, reduction = 'wnn.umap', label = TRUE, repel = TRUE, group.by = "predicted.celltype.l1", label.size = 4)
p3 <- DimPlot(cite, reduction = 'wnn.umap', label = TRUE, repel = TRUE, group.by = "predicted.celltype.l2", label.size = 4)
pdf('../plots/harmony/cite_predicted.pdf', width = 24, height = 8)
p1 | p2 | p3
dev.off()
p1 <- DimPlot(cite, reduction = 'wnn.umap', label = TRUE, repel = TRUE, label.size = 4)
p2 <- DimPlot(cite, reduction = 'wnn.umap', label = TRUE, repel = TRUE, group.by = "predicted.celltype.l1", label.size = 4)
pdf('../plots/harmony/cite_predicted_2.pdf', width = 16, height = 8)
p1 | p2
dev.off()
library(ggplot2)
data_plot <- table(cite$predicted.celltype.l1, cite$condition)
data_plot <- reshape2::melt(prop.table(data_plot, 2))
colnames(data_plot) <- c('Clusters', 'Conditions', 'Proportions')
data_plot$Clusters <- as.factor(data_plot$Clusters)
data_plot$Proportions <- data_plot$Proportions*100
p1 <- ggplot(data_plot, aes(x = Clusters, y = Proportions, col = Conditions, fill = Conditions))+
geom_col(position = 'dodge')+
theme_bw()+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=0))
pdf('../plots/harmony/cite_proportion_predicted_slide.pdf', width = 8, height = 4)
p1
dev.off()
data_plot <- table(cite$predicted.celltype.l2, cite$condition)
data_plot <- reshape2::melt(prop.table(data_plot, 2))
colnames(data_plot) <- c('Clusters', 'Conditions', 'Proportions')
data_plot$Clusters <- as.factor(data_plot$Clusters)
data_plot$Proportions <- data_plot$Proportions*100
p1 <- ggplot(data_plot, aes(x = Clusters, y = Proportions, col = Conditions, fill = Conditions))+
geom_col(position = 'dodge')+
theme_bw()+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=0))
pdf('../plots/harmony/cite_proportion_predicted_l2_slide.pdf', width = 8, height = 4)
p1
dev.off()
write.csv(cite@meta.data, file = '../plots/harmony/cite_predicted.csv')
p1 <- FeaturePlot(cite, c('sct_IL17A'), reduction = 'wnn.umap', min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('IL17A (RNA)')
p2 <- FeaturePlot(cite, c('sct_CCR6'), reduction = 'wnn.umap', min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('CCR6 (RNA)')
p3 <- FeaturePlot(cite, c('sct_RORC'), reduction = 'wnn.umap', min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('RORC (RNA)')
p4 <- FeaturePlot(cite, c('adt_CD194-A0071'), reduction = 'wnn.umap', cols = c("lightgrey", "darkgreen"), min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('CCR4 (ADT)')
p5 <- FeaturePlot(cite, c('adt_CD196-A0143'), reduction = 'wnn.umap', cols = c("lightgrey", "darkgreen"), min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('CCR6 (ADT)')
pdf('../plots/harmony/cite_Th17.pdf', width = 12, height = 8)
wrap_plots(p1, p2, p3, p4, p5, ncol = 3)
dev.off()
p1 <- FeaturePlot(cite, c('sct_IFNG'), reduction = 'wnn.umap', min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('IFNG (RNA)')
p2 <- FeaturePlot(cite, c('sct_IL12RB2'), reduction = 'wnn.umap', min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('IL12RB2 (RNA)')
p3 <- FeaturePlot(cite, c('sct_TBX21'), reduction = 'wnn.umap', min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('TBX21 (RNA)')
p4 <- FeaturePlot(cite, c('adt_CD183-A0140'), reduction = 'wnn.umap', cols = c("lightgrey", "darkgreen"), min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('CXCR3 (ADT)')
p5 <- FeaturePlot(cite, c('adt_CD195-A0141'), reduction = 'wnn.umap', cols = c("lightgrey", "darkgreen"), min.cutoff = 'q2', max.cutoff = 'q98') + ggtitle('CCR5 (ADT)')
pdf('../plots/harmony/cite_Th1.pdf', width = 12, height = 8)
wrap_plots(p1, p2, p3, p4, p5, ncol = 3)
dev.off()
save(cite, file = '../output/cite_harmony_independent_predicted.RData')
|
###########################
# File: League Settings.R
# Description: User sets league settings
# Date: 6/1/2013
# Author: Isaac Petersen (isaac@fantasyfootballanalytics.net)
# Notes:
# To do:
###########################
#Roster
numQBstarters <- 1
numRBstarters <- 2
numWRstarters <- 2
numTEstarters <- 1
numTotalStarters <- 7
numTotalPlayers <- 20
#League settings
defaultCap <- 200 #what the typical cap is for your service (ESPN, Yahoo, etc.) -- used for placing "avg cost" in context
leagueCap <- 225 #your league's cap
maxCost <- leagueCap - (numTotalPlayers - numTotalStarters)
#Variable names
prefix <- c("name","pos")
varNames <- c("name","team","positionRank","overallRank","pts",
"passAtt","passComp","passYds","passTds","passInt",
"rushAtt","rushYds","rushTds","rec","recYds","recTds",
"returnTds","twoPts","fumbles")
#"solo","ast","idpSack","idpFumlRec","idpFumlForce","idpInt","idpPD",
#"ptsAllowed","dstSack","dstInt","dstFumlRec","blk","to","intTd","kRetTd","pRetTd",
#"fg","fg3039","fg4049","fg50","xp"
#Scoring
passAttMultiplier <- 0 #0 pts per passing attempt
passCompMultiplier <- 0 #0 pts per passing completion
passIncompMultiplier <- 0 #0 pts per passing incompletion
passYdsMultiplier <- (1/25) #1 pt per 25 passing yds
passTdsMultiplier <- 4 #4 pts per passing td
passIntMultiplier <- -3 #-3 pts per passing interception
rushAttMultiplier <- 0 #0 pts per rushing attempt
rushYdsMultiplier <- (1/10) #1 pt per 10 rushing yds
rushTdsMultiplier <- 6 #6 pts per rushing touchdown
recMultiplier <- 0 #0 pts per reception
recYdsMultiplier <- (1/8) #1 pt per 8 receiving yds
recTdsMultiplier <- 6 #6 pts per receiving touchdown
returnTdsMultiplier <- 6 #6 pts per return touchdown
twoPtsMultiplier <- 2 #2 pts per 2-point conversion
fumlMultiplier <- -3 #-3 pts per fumble lost
#Projections
sourcesOfProjections <- c("Accuscore", "CBS1", "CBS2", "ESPN", "FantasyFootballNerd", "FantasyPros", "FantasySharks", "FFtoday", "Footballguys1", "Footballguys2", "Footballguys3", "Footballguys4", "FOX", "NFL", "WalterFootball", "Yahoo")
sourcesOfProjectionsAbbreviation <- c("accu", "cbs1", "cbs2", "espn", "ffn", "fp", "fs", "fftoday", "fbg1", "fbg2", "fbg3", "fbg4", "fox", "nfl", "wf", "yahoo")
#Number of players at each position drafted in Top 100 (adjust for your league)
qbReplacements <- 15
rbReplacements <- 37
wrReplacements <- 36
teReplacements <- 11
#Alternative way of calculating the number of players at each position drafted in Top 100 based on league settings
#numTeams <- 10 #number of teams in league
#numQB <- 1 #number of avg QBs in starting lineup
#numRB <- 2.5 #number of avg RBs in starting lineup
#numWR <- 2.5 #number of avg WRs in starting lineup
#numTE <- 1 #number of avg TEs in starting lineup
#qbReplacements <- print(ceiling(numQB*numTeams*1.7))
#rbReplacements <- print(ceiling(numRB*numTeams*1.4))
#wrReplacements <- print(ceiling(numWR*numTeams*1.4))
#teReplacements <- print(ceiling(numTE*numTeams*1.3)) | /R Scripts/Functions/League Settings.R | no_license | tjalex12/FantasyFootballAnalyticsR | R | false | false | 3,101 | r | ###########################
# File: League Settings.R
# Description: User sets league settings
# Date: 6/1/2013
# Author: Isaac Petersen (isaac@fantasyfootballanalytics.net)
# Notes:
# To do:
###########################
#Roster
numQBstarters <- 1
numRBstarters <- 2
numWRstarters <- 2
numTEstarters <- 1
numTotalStarters <- 7
numTotalPlayers <- 20
#League settings
defaultCap <- 200 #what the typical cap is for your service (ESPN, Yahoo, etc.) -- used for placing "avg cost" in context
leagueCap <- 225 #your league's cap
maxCost <- leagueCap - (numTotalPlayers - numTotalStarters)
#Variable names
prefix <- c("name","pos")
varNames <- c("name","team","positionRank","overallRank","pts",
"passAtt","passComp","passYds","passTds","passInt",
"rushAtt","rushYds","rushTds","rec","recYds","recTds",
"returnTds","twoPts","fumbles")
#"solo","ast","idpSack","idpFumlRec","idpFumlForce","idpInt","idpPD",
#"ptsAllowed","dstSack","dstInt","dstFumlRec","blk","to","intTd","kRetTd","pRetTd",
#"fg","fg3039","fg4049","fg50","xp"
#Scoring
passAttMultiplier <- 0 #0 pts per passing attempt
passCompMultiplier <- 0 #0 pts per passing completion
passIncompMultiplier <- 0 #0 pts per passing incompletion
passYdsMultiplier <- (1/25) #1 pt per 25 passing yds
passTdsMultiplier <- 4 #4 pts per passing td
passIntMultiplier <- -3 #-3 pts per passing interception
rushAttMultiplier <- 0 #0 pts per rushing attempt
rushYdsMultiplier <- (1/10) #1 pt per 10 rushing yds
rushTdsMultiplier <- 6 #6 pts per rushing touchdown
recMultiplier <- 0 #0 pts per reception
recYdsMultiplier <- (1/8) #1 pt per 8 receiving yds
recTdsMultiplier <- 6 #6 pts per receiving touchdown
returnTdsMultiplier <- 6 #6 pts per return touchdown
twoPtsMultiplier <- 2 #2 pts per 2-point conversion
fumlMultiplier <- -3 #-3 pts per fumble lost
#Projections
sourcesOfProjections <- c("Accuscore", "CBS1", "CBS2", "ESPN", "FantasyFootballNerd", "FantasyPros", "FantasySharks", "FFtoday", "Footballguys1", "Footballguys2", "Footballguys3", "Footballguys4", "FOX", "NFL", "WalterFootball", "Yahoo")
sourcesOfProjectionsAbbreviation <- c("accu", "cbs1", "cbs2", "espn", "ffn", "fp", "fs", "fftoday", "fbg1", "fbg2", "fbg3", "fbg4", "fox", "nfl", "wf", "yahoo")
#Number of players at each position drafted in Top 100 (adjust for your league)
qbReplacements <- 15
rbReplacements <- 37
wrReplacements <- 36
teReplacements <- 11
#Alternative way of calculating the number of players at each position drafted in Top 100 based on league settings
#numTeams <- 10 #number of teams in league
#numQB <- 1 #number of avg QBs in starting lineup
#numRB <- 2.5 #number of avg RBs in starting lineup
#numWR <- 2.5 #number of avg WRs in starting lineup
#numTE <- 1 #number of avg TEs in starting lineup
#qbReplacements <- print(ceiling(numQB*numTeams*1.7))
#rbReplacements <- print(ceiling(numRB*numTeams*1.4))
#wrReplacements <- print(ceiling(numWR*numTeams*1.4))
#teReplacements <- print(ceiling(numTE*numTeams*1.3)) |
library("CVXR")
weights_Markowitz <- function(lambda, Sigma, mu){
w <- Variable(length(mu))
res <- solve(Problem(Maximize(t(mu) %*% w - lambda*quad_form(w, Sigma)),
constraints = list(w >= 0, sum(w) == 1)))
w_Markowitz <- as.matrix(res$getValue(w))
rownames(w_Markowitz) <- stock_namels
return(w_Markowitz)
}
library(xts)
library(quantmod)
begin_date <- "2015-01-01"
end_date <- "2019-07-13"
stock_namels <- c("AAPL", "IBM", "AMZN", "BAC", "MS", "FDX", "MMM", "KO", "PFE")
prices <- xts()
stock = "IBM"
for (stock in stock_namels){
prices <- cbind(prices, Ad(getSymbols(stock, from = begin_date, to = end_date, auto.assign = FALSE)))
}
plot(prices/rep(prices[1, ], each = nrow(prices)), legend.loc = "topleft",
main = "Normalized prices")
logret <- diff(log(prices))
colnames(logret) <- stock_namels
mu <- colMeans(logret, na.rm = TRUE)
Sigma <- cov(na.omit(logret))
#generally, \lambda is between 0 and 4. The larger \lamba the more risk-averse.
par(mfrow = c(3,2))
for (lmd in c(0.5,1,2,4,8,10)){
barplot(t(weights_Markowitz(lmd, Sigma, mu)), main = "Portfolio allocation", xlab = paste("?? = ", lmd))
}
par(mfrow = c(1,1))
#remark: if \(\lambda\) goes to positive infinity, it will converge to GMVP(Global Minimum Variance Portfolio) situation
## Closed-form solution for most simple case
w_naive <- 1/sqrt(diag(Sigma))
w_naive <- w_naive/sum(w_naive)
w_all <- cbind(w_Markowitz_lmd1,w_Markowitz_lmd10, w_naive)
colnames(w_all) <- c("Mkwtz_lmd1", "Mkwtz_lmd10","risk-parity" )
barplot(t(w_all), beside = TRUE, legend = colnames(w_all))
## Test for outsample performance (waited to be added)
## Sensitivity to parameters (particularly to ??).(waited to be added) | /Risk_parity_portfolio.R | no_license | Karagul/R-for-Trading-Strategy_RMarkdown | R | false | false | 1,773 | r | library("CVXR")
weights_Markowitz <- function(lambda, Sigma, mu){
w <- Variable(length(mu))
res <- solve(Problem(Maximize(t(mu) %*% w - lambda*quad_form(w, Sigma)),
constraints = list(w >= 0, sum(w) == 1)))
w_Markowitz <- as.matrix(res$getValue(w))
rownames(w_Markowitz) <- stock_namels
return(w_Markowitz)
}
library(xts)
library(quantmod)
begin_date <- "2015-01-01"
end_date <- "2019-07-13"
stock_namels <- c("AAPL", "IBM", "AMZN", "BAC", "MS", "FDX", "MMM", "KO", "PFE")
prices <- xts()
stock = "IBM"
for (stock in stock_namels){
prices <- cbind(prices, Ad(getSymbols(stock, from = begin_date, to = end_date, auto.assign = FALSE)))
}
plot(prices/rep(prices[1, ], each = nrow(prices)), legend.loc = "topleft",
main = "Normalized prices")
logret <- diff(log(prices))
colnames(logret) <- stock_namels
mu <- colMeans(logret, na.rm = TRUE)
Sigma <- cov(na.omit(logret))
#generally, \lambda is between 0 and 4. The larger \lamba the more risk-averse.
par(mfrow = c(3,2))
for (lmd in c(0.5,1,2,4,8,10)){
barplot(t(weights_Markowitz(lmd, Sigma, mu)), main = "Portfolio allocation", xlab = paste("?? = ", lmd))
}
par(mfrow = c(1,1))
#remark: if \(\lambda\) goes to positive infinity, it will converge to GMVP(Global Minimum Variance Portfolio) situation
## Closed-form solution for most simple case
w_naive <- 1/sqrt(diag(Sigma))
w_naive <- w_naive/sum(w_naive)
w_all <- cbind(w_Markowitz_lmd1,w_Markowitz_lmd10, w_naive)
colnames(w_all) <- c("Mkwtz_lmd1", "Mkwtz_lmd10","risk-parity" )
barplot(t(w_all), beside = TRUE, legend = colnames(w_all))
## Test for outsample performance (waited to be added)
## Sensitivity to parameters (particularly to ??).(waited to be added) |
#Required libaries
suppressPackageStartupMessages(c(library(shiny),
library(shinythemes),
library(dplyr),
library(tidytext),
library(plotly)))
#Word Frequency Dictionaries
unigram <- readRDS("unigram.rds")
bigram <- readRDS("bigram.rds")
trigram <- readRDS("trigram.rds")
quadgram <- readRDS("quadgram.rds")
#Function to transform input from shiny into separate words
cleanInput <- function(a){
if(length(a) == 0) {
word <- tibble(x = "",
y = "",
z = "")
}
else if(length(a) == 1){
word <- tibble(sentence = a) %>% unnest_tokens(output = word,
input = sentence,
to_lower = TRUE,
strip_punct = TRUE,
strip_numeric = TRUE)
if(nrow(word) == 1){
word <- tibble(x = word[1,] %>% pull(),
y = "",
z = "")
}
else if(nrow(word) == 2){
word <- tibble(x = word[1,] %>% pull(),
y = word[2,] %>% pull(),
z = "")
}
else if(nrow(word) >= 3){
word = tibble(x = tail(word, 3)[1,] %>% pull(),
y = tail(word, 2)[1,] %>% pull(),
z = tail(word, 1) %>% pull())
}
}
}
#Function for text Prediction
NextWord <- function(x = "", y = "", z = "", n = 5){
if(length(x) == 0 & length(y) == 0 & length(z) == 0) print("Please ignore the warning, it will disapear if you insert some Text")
else if (x %in% quadgram$word1 & y %in% quadgram$word2 & z %in% quadgram$word3){
quadgram %>% filter(x == word1, y == word2, z == word3) %>% select(Next_Word, Probability) %>% head(n)
}
else if (x %in% trigram$word1 & y %in% trigram$word2){
trigram %>% filter(x == word1, y == word2) %>% select(Next_Word, Probability) %>% head(n)
}
else if (x %in% bigram$word1){
bigram %>% filter(x == word1) %>% select(Next_Word, Probability) %>% head(n)
}
else unigram[1:n,]
}
# Define UI for application that Predicts Words
ui <- fluidPage(theme = shinytheme("cerulean"),
tabsetPanel(
tabPanel(title = "App",
# Application title
titlePanel("Words Prediction With R"),
# Inputs
fluidRow(column(width = 6,
textInput(inputId = "sentence",
label = "Predict the Next Word of this text",
value = "hi"),
sliderInput(inputId = "suggestions",
label = "Nº of Suggestions",
min = 1,
max = 15,
value = 5),
submitButton(text = "Submit Text")
),
# Outputs
column(width = 6,
tableOutput("prediction")
)
),
fluidRow(column(width = 12,
plotlyOutput("bar")
)
)
),
#About this App Tab
tabPanel(title = "About this App",
withMathJax(includeHTML("about_app.html"))
))
)
# Server logic required to Make the Prediction and plot it
server <- function(input, output) {
prediction <- reactive({
word <- cleanInput(input$sentence)
NextWord(word$x,word$y,word$z, input$suggestions)
})
#Table
output$prediction <- renderTable(prediction(),
bordered = TRUE,
hover = TRUE,
striped = TRUE,
colnames = TRUE)
#Plot
output$bar <- renderPlotly({
plot_ly(prediction(),
y = ~Probability,
x = ~Next_Word,
type = "bar",
color = ~Probability)
})
}
# Run the application
shinyApp(ui = ui, server = server)
| /App/app.R | no_license | giunide/datascience_capstone | R | false | false | 4,766 | r | #Required libaries
suppressPackageStartupMessages(c(library(shiny),
library(shinythemes),
library(dplyr),
library(tidytext),
library(plotly)))
#Word Frequency Dictionaries
unigram <- readRDS("unigram.rds")
bigram <- readRDS("bigram.rds")
trigram <- readRDS("trigram.rds")
quadgram <- readRDS("quadgram.rds")
#Function to transform input from shiny into separate words
cleanInput <- function(a){
if(length(a) == 0) {
word <- tibble(x = "",
y = "",
z = "")
}
else if(length(a) == 1){
word <- tibble(sentence = a) %>% unnest_tokens(output = word,
input = sentence,
to_lower = TRUE,
strip_punct = TRUE,
strip_numeric = TRUE)
if(nrow(word) == 1){
word <- tibble(x = word[1,] %>% pull(),
y = "",
z = "")
}
else if(nrow(word) == 2){
word <- tibble(x = word[1,] %>% pull(),
y = word[2,] %>% pull(),
z = "")
}
else if(nrow(word) >= 3){
word = tibble(x = tail(word, 3)[1,] %>% pull(),
y = tail(word, 2)[1,] %>% pull(),
z = tail(word, 1) %>% pull())
}
}
}
#Function for text Prediction
NextWord <- function(x = "", y = "", z = "", n = 5){
if(length(x) == 0 & length(y) == 0 & length(z) == 0) print("Please ignore the warning, it will disapear if you insert some Text")
else if (x %in% quadgram$word1 & y %in% quadgram$word2 & z %in% quadgram$word3){
quadgram %>% filter(x == word1, y == word2, z == word3) %>% select(Next_Word, Probability) %>% head(n)
}
else if (x %in% trigram$word1 & y %in% trigram$word2){
trigram %>% filter(x == word1, y == word2) %>% select(Next_Word, Probability) %>% head(n)
}
else if (x %in% bigram$word1){
bigram %>% filter(x == word1) %>% select(Next_Word, Probability) %>% head(n)
}
else unigram[1:n,]
}
# Define UI for application that Predicts Words
ui <- fluidPage(theme = shinytheme("cerulean"),
tabsetPanel(
tabPanel(title = "App",
# Application title
titlePanel("Words Prediction With R"),
# Inputs
fluidRow(column(width = 6,
textInput(inputId = "sentence",
label = "Predict the Next Word of this text",
value = "hi"),
sliderInput(inputId = "suggestions",
label = "Nº of Suggestions",
min = 1,
max = 15,
value = 5),
submitButton(text = "Submit Text")
),
# Outputs
column(width = 6,
tableOutput("prediction")
)
),
fluidRow(column(width = 12,
plotlyOutput("bar")
)
)
),
#About this App Tab
tabPanel(title = "About this App",
withMathJax(includeHTML("about_app.html"))
))
)
# Server logic required to Make the Prediction and plot it
server <- function(input, output) {
prediction <- reactive({
word <- cleanInput(input$sentence)
NextWord(word$x,word$y,word$z, input$suggestions)
})
#Table
output$prediction <- renderTable(prediction(),
bordered = TRUE,
hover = TRUE,
striped = TRUE,
colnames = TRUE)
#Plot
output$bar <- renderPlotly({
plot_ly(prediction(),
y = ~Probability,
x = ~Next_Word,
type = "bar",
color = ~Probability)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
#' ---
#' title: 02 Project Management With RStudio
#' output: html_document
#' ---
#'
#' ## R projects
#' ## Git
| /workspace/02.R | no_license | csiu/2017-04-26-r_workshop | R | false | false | 115 | r | #' ---
#' title: 02 Project Management With RStudio
#' output: html_document
#' ---
#'
#' ## R projects
#' ## Git
|
# Hi there,
# the following code is collecting and merging all necessary variables for the analysis
# of the association between ELS and psycho-cardio-metabolic multi-morbidity in children
# This includes the outcomes of interest (internalizing problems and cardio-metabolic
# risk), the covariates that are going to be used as well as the auxiliary variables
# used in the imputation of the final dataset.
# It does not include data used to build the ELS score exposure.
#### ---------------------------- Dependencies ---------------------------- ####
# First, let's point to the necessary libraries
library(foreign)
library(stats)
# Defining the path to the data
# check if the path to the data is already in memory, otherwise ask for it.
#if (exists("pathtodata") == F) { pathtodata = readline(prompt="Enter path to data: ") }
# ATTENTION!!! If prompted with an "Enter path to data:" message -> Enter the location
# of your datafiles. The code assumes that all (raw) data is stored in ONE folder.
# Do not forget the final slash in your path, and, speaking of slashes, beware of
# OS sensitive changes when you want to modify the structure of your dirs!
################################################################################
#### ------------------ INTERNALIZING PROBLEMS ( @ 9 ) -------------------- ####
################################################################################
#load data from previous scripts
#load('prenatal_stress.Rdata')
#load('postnatal_stress.Rdata')
#load('alspac.table.collapsed.Rdata')
names(alspac.table)=tolower(names(alspac.table))
# Internalizing scale @ 9 yrs # informant: MOTHER.
internalizing = alspac.table[,c('cidb2957', # note: lowercase 'b' here
'qlet',
'kv9991a', #age 10 (months)
'tb9991a', #age 13
'fh0011a', # age 15
'fj003a', # age 17? VB: not in dataset? but was in original?
'ypb9992', # age 22 VB: also not in?
'kv8603',# age 10 #'agechild_cbcl9m',
'tb8603', # age 13
'fh6876', # age 15
'fjci350', # age 17 (CIS-R)
'ypb5180')] # age 22 (SMFQ; might not be the best measure)
#'nmisint_9m', # number of missing values in internalizing scale items
#'sum_int_9m')] # weighted sum score internalizing scale (allowing 25% missing)
for (i in c('kv9991a',
'tb9991a',
'fh0011a',
'fj003a',
'ypb9992',
'kv8603',
'tb8603',
'fjci350',
'ypb5180')){
internalizing[,i] = as.numeric(levels(internalizing[,i]))[internalizing[,i]]
}
internalizing$fh6876=as.numeric(internalizing$fh6876)
#corbetw2mat(internalizing.n[,-c(1:2)], internalizing[,-c(1:2)], what = "paired")
# Calculation (based on SPSS script available on the V: drive):
# if 24 out of 32 items are available (i.e. 75%), sum of the item
# scores * (32 / 32 - nmisnt_9m).
# Let's make it a bit more reader friendly
#colnames(internalizing)[3:4] <- c("n_missing_intern", "intern_score");
internalizing = internalizing %>%
rename(
int.age.10y = kv9991a,
int.age.13y = tb9991a,
int.age.15y = fh0011a,
int.age.17y = fj003a,
int.age.22y = ypb9992,
intern_score.10y = kv8603,
intern_score.13y = tb8603,
intern_score.15y = fh6876,
intern_score.17y = fjci350,
intern_score.22y = ypb5180
)
# One alternative could be to use the anxious/depressed empirical sub-scale (9y, mother report)
# However I did give it a shot and it does not seem to perfom better than the internalizing one.
################################################################################
#### -------------------------- FAT MASS ( @ 9 ) -------------------------- ####
################################################################################
# Although the original plan for this project involved a more comprehensive measure
# of child metabolic syndrome (see CMR_score.R file for specifics), android fat mass
# was the only metabolic risk variable that showed appreciable variance in such young
# children and the highest correlation with internalizing (small but significant r =.12)
# It was also selected on the base of data availability both cross-sectionally and
# for future longitudinal assessment.
# Read in the datasets
#fatmass <- readquick("CHILDFATMASS9_13092016.sav") # 5862 obs of 28 vars
# Select only the necessary measures
fat_mass = alspac.table[,c('cidb2957','qlet',
'f9003c', # age 10 (months) #'agechild9_visit1', # this value (age) is the same for all other datasets
'kg998a', # age13
'fkar0010', # age24
'f9dx126', #trunkFM at age10y #'fat_mass_androidchild9')]
'fg3257', # andrFM at age13y
'fh2257', # andrFM at age15y
'fjdx138', # andrFM at age17y
'fkdx1041')] # andrFM at age24y
for (i in c('f9003c',
'kg998a',
'fkar0010',
'f9dx126',
'fg3257',
'fh2257',
'fjdx138',
'fkdx1041')){
fat_mass[,i] = as.numeric(levels(fat_mass[,i]))[fat_mass[,i]]
}
#colnames(fat_mass)[3] <- "fat_mass"
fat_mass = fat_mass %>%
rename(
fm.age.10y = f9003c,
fm.age.13y = kg998a,
fm.age.24y = fkar0010,
fat_mass.10y = f9dx126,
fat_mass.13y = fg3257,
fat_mass.15y = fh2257,
fat_mass.17y = fjdx138,
fat_mass.24y = fkdx1041
)
################################################################################
# merge the two main (mental and physical) outcomes with child sex into one dataset
PCM_outcome <- merge(internalizing, fat_mass, by = c('cidb2957','qlet'), all.x = TRUE)
# ------------------------------------------------------------------------------
# Before we can use them in the analysis, the outcome variables need to be standardized.
# so, here we take the standard deviation score.
PCM_outcome$intern_score_z.10y <- as.numeric(scale(PCM_outcome$intern_score.10y))
PCM_outcome$intern_score_z.13y <- as.numeric(scale(PCM_outcome$intern_score.13y))
PCM_outcome$intern_score_z.15y <- as.numeric(scale(PCM_outcome$intern_score.15y))
PCM_outcome$intern_score_z.17y <- as.numeric(scale(PCM_outcome$intern_score.17y))
PCM_outcome$intern_score_z.22y <- as.numeric(scale(PCM_outcome$intern_score.22y))
PCM_outcome$fat_mass_z.10y <- as.numeric(scale(PCM_outcome$fat_mass.10y))
PCM_outcome$fat_mass_z.13y <- as.numeric(scale(PCM_outcome$fat_mass.13y))
PCM_outcome$fat_mass_z.15y <- as.numeric(scale(PCM_outcome$fat_mass.15y))
PCM_outcome$fat_mass_z.17y <- as.numeric(scale(PCM_outcome$fat_mass.17y))
PCM_outcome$fat_mass_z.24y <- as.numeric(scale(PCM_outcome$fat_mass.24y))
################################################################################
#### ---------------------------- COVARIATES ------------------------------ ####
################################################################################
# Variables that will be used in the covariate models of this project are those
# marked with ###. they include: 'sex', 'age_child', 'm_smoking', 'm_drinking'
# and 'm_bmi_before_pregnancy'.
# For the other demographic auxiliary variables (used for imputation): when they
# were assessed both prenatally and postnatally, both measures are included.
# Auxiliary variables for this project include: 'ethnicity', 'm_age', parity',
# 'm_smoking', 'gest_age_birth', 'gest_weight', 'm_bmi_pregnancy', 'm_bmi_5yrs',
# 'sex', 'm_dep_pregnancy', 'p_dep_pregnacy', "m_dep_3yrs", "p_dep_3yrs"
# ------------------------------------------------------------------------------
### AGE of the child
# Combine age of the child measured during first visit and at CBCL administration
# This value will serve as a covariate in the first adjusted model.
#PCM_outcome$age_child = (PCM_outcome$agechild9_visit1 + PCM_outcome$agechild_cbcl9m) / 2
# in ALSPAC: average across intS and fatmass age
PCM_outcome$age_child.10y = (PCM_outcome$int.age.10y + PCM_outcome$fm.age.10y) / 2
PCM_outcome$age_child.13y = (PCM_outcome$int.age.13y + PCM_outcome$fm.age.13y) / 2
PCM_outcome$age_child.15y = PCM_outcome$int.age.15y
PCM_outcome$age_child.17y = PCM_outcome$int.age.17y
PCM_outcome$age_child.23y = (PCM_outcome$int.age.22y + PCM_outcome$fm.age.24y) / 2
# OPTIONAL: check age differnce between measurements
# plot(PCM_outcome$agechild9_visit1, PCM_outcome$agechild_cbcl9m)
# summary(PCM_outcome$agechild9_visit1 - PCM_outcome$agechild_cbcl9m)
#-------------------------------------------------------------------------------
### MATERNAL SMOKING during pregnancy
# smokingv1 <- readquick("MATERNALSMOKING_22112016.sav") # 9778 obs of 11 variables
#
# smoking = smokingv1[,c('idm', 'smoke_all')] # (1) never a smoker;
# # (2) smoked until pregnancy was known (i.e., first trimester only);
# # (3) continued smoking during pregnancy.
# colnames(smoking)[2] = "m_smoking"
# ALSPAC: 2 categories (sustained vs (stopped or never))
alspac.table$m_smoking=NA
alspac.table[which(alspac.table$e170=='N'),"m_smoking"] <- 0
alspac.table[which(alspac.table$e170=='Y'),"m_smoking"] <- 1
#-------------------------------------------------------------------------------
### MATERNAL ALCOHOL CONSUMPTION during pregnancy
# drinkingv1 <- readquick("GEDRAGSGROEP_MaternalDrinking_22112016.sav") #drinkingv2 <- readquick("MATERNALALCOHOL_22112016.sav") # old variable
#
# drinking = drinkingv1[,c('idm', 'mdrink_updated')] # (0) never;
# # (1) until pregnancy was known (i.e., first trimester only);
# # (2) continued during pregnancy occasionally;
# # (3) continued during pregnancy frequently.
# colnames(drinking)[2] = "m_drinking"
# ALSPAC
alspac.table$m_drinking=NA
alspac.table[which(alspac.table$b721=='never' & alspac.table$e220=='Not at all'),"m_drinking"] <- 0
alspac.table[which(!(alspac.table$b721=='never' & alspac.table$e220=='Not at all') &
!(is.na(alspac.table$b721) | is.na(alspac.table$e220))),"m_drinking"] <- 1
#-------------------------------------------------------------------------------
## Other variables
# child_general <- readquick("CHILD-ALLGENERALDATA_07072020.sav") # 9901 obs of 122
#
# # Ethnicity recode - dichotomized into: dutch, western and non-western;
# for (i in 1:9901) {
# if (is.na(child_general$ethnfv2[i])) { child_general$ethnicity[i] <- NA
# } else if (child_general$ethnfv2[i] == 1) { # Dutch
# child_general$ethnicity[i] <- 0
# } else if (child_general$ethnfv2[i] == 300 | child_general$ethnfv2[i] == 500 | child_general$ethnfv2[i] >= 700) {
# # American, western (300) Asian, western (500) European (700), Oceanie (800)
# child_general$ethnicity[i] <- 1
# } else {
# child_general$ethnicity[i] <- 2 }
# # Indonesian (2), Cape Verdian (3), Maroccan (4) Dutch Antilles (5) Surinamese
# # (6) Turkish (7) African (200), American, non western (400), Asian, non western (600)
# }
alspac.table$ethnicity=NA
alspac.table[which(alspac.table$c800=='White' & alspac.table$c801=='White'),"ethnicity"] <- 1
alspac.table[which(!(alspac.table$c800=='White' & alspac.table$c801=='White') &
!(is.na(alspac.table$c800) | is.na(alspac.table$c801))),"ethnicity"] <- 0
# pre-pregnancy BMI
alspac.table$dw002=as.numeric(levels(alspac.table$dw002))[alspac.table$dw002] # Pre-pregnancy weight (Kg) at 12w gest
alspac.table$m4221=as.numeric(levels(alspac.table$m4221))[alspac.table$m4221] # Height (cm) 7y 1m
alspac.table$bmi_0 = alspac.table$dw002/((alspac.table$m4221/100)^2) # calculating ore-pregnancy BMI
general_cov_aux = alspac.table[,c('cidb2957', 'qlet',
'm_smoking',
'm_drinking',
'kz021', ### SEX - 1 = boy; 2 = girl.
'ethnicity', ### ETHNICITY - dutch, western, non-western
'bmi_0', ### MATERNAL BMI - self-reported, before pregnancy
'mult', # used for exclusion criteria
#'mother', # mother id used to identify siblings (for exclusion)
'b032', # parity (used for imputation)
'bestgest', # gestational age at birth (used for imputation)
'kz030', # gestational weight (used for imputation)
#'bmi_1', # maternal BMI during pregnancy (used for imputation)
'mz028b')] # maternal age at intake (used for imputation)
# ALSPAC: bmi tbd
# Below is what ALSPAC has - should we use all of these time points? Or too many? And 9y already too late?
# dw043 = weight/(height in meters) 12w gest
# ew002 = mothers postnatal weight (kg) 8w
# m4220 = Respondent's weight (kg) 7y 1m
# n1140 = Respondent's weight (kg) 8y 1m
# p1290 = Respondent's weight (kg) 9y 1m
for (i in c('mult',
'b032',
'bestgest',
'kz030',
'mz028b')){
general_cov_aux[,i] = as.numeric(levels(general_cov_aux[,i]))[general_cov_aux[,i]]
}
# Again, let's try to keep it user friendly
general_cov_aux = general_cov_aux %>%
rename(
sex = kz021,
gest_age_birth = bestgest,
gest_weight = kz030,
m_age_cont = mz028b,
twin = mult,
parity = b032,
m_bmi_before_pregnancy = bmi_0)
# ALSPAC: the following section still needs to be done
# #-------------------------------------------------------------------------------
# # Maternal BMI at age 5 (used for imputation)
# m_anthropometry_5yrs = readquick('MOTHERANTHROPOMETRY_18022013.sav')
# m_bmi_5yrs = m_anthropometry_5yrs[,c('mother','bmimotherf5')]; colnames(m_bmi_5yrs)[2] = "m_bmi_5yrs"
#
# # Merge with the other general variables
# general_cov_aux = merge(general_cov_aux, m_bmi_5yrs, by = 'mother', all.x = TRUE)
#
#
# #-------------------------------------------------------------------------------
# # Maternal and paternal depression during pregnancy and at age 3
# bsi_pregnancy_m = readquick('GR1003-BSI D1_22112016.sav') # 9778 obs of 261 vars
# bsi_pregnancy_p = readquick('GR1004-BSI G1_22112016.sav') # 9778 obs of 261 vars
# bsi_3yrs = readquick('BSI 3 years of age_GR1065 G1-GR1066 C1_22112016.sav') # 9897 obs of 49 vars
#
# # Depression during pregnancy
# dep_pregnancy_m = bsi_pregnancy_m[, c('idm','dep')]; colnames(dep_pregnancy_m)[2] = c("m_dep_cont_pregnancy")
# dep_pregnancy_p = bsi_pregnancy_p[, c('idm','dep_p')]; colnames(dep_pregnancy_p)[2] = c("p_dep_cont_pregnancy")
#
# # Merge it with the previous dataset
# general_cov_aux <- Reduce(function(x,y) merge(x = x, y = y, by = 'idm', all.x = TRUE),
# list(general_cov_aux, dep_pregnancy_m, dep_pregnancy_p))
#
# # Depression @ 3y: Items 9, 16, 17, 18, 35, and 50
# d <- data.frame(bsi_3yrs$g0100365, bsi_3yrs$g0100665, bsi_3yrs$g0100765, bsi_3yrs$g0100865, bsi_3yrs$g0101365, bsi_3yrs$g0102165, # mother report
# bsi_3yrs$c0100366, bsi_3yrs$c0100666, bsi_3yrs$c0100766, bsi_3yrs$c0100866, bsi_3yrs$c0101366, bsi_3yrs$c0102166) # father report
# n_items_m <- rowSums(!is.na(d[,1:6])); n_items_p <- rowSums(!is.na(d[,7:12]))
# bsi_3yrs$m_dep_cont_3yrs <- ifelse(n_items_m >= 5, yes = (rowSums(d[,1:6])/n_items_m)-1, no = NA)
# bsi_3yrs$p_dep_cont_3yrs <- ifelse(n_items_p >= 5, yes = (rowSums(d[,7:12])/n_items_p)-1, no = NA)
#
# dep_3yrs = bsi_3yrs[, c('idc','m_dep_cont_3yrs', 'p_dep_cont_3yrs')]
# # Merge it with the previous dataset
# general_cov_aux = merge(general_cov_aux, dep_3yrs, by = 'idc', all.x = TRUE)
#-------------------------------------------------------------------------------
################################################################################
# Merge all covariates / auxiliary variables together
# covariates_and_auxiliary <- Reduce(function(x,y) merge(x = x, y = y, by = 'idm', all.x = TRUE),
# list(general_cov_aux, smoking, drinking))
# ALSPAC for now:
covariates_and_auxiliary = general_cov_aux
################################################################################
################################################################################
# merging outcome variables, covariates and auxiliary variables in one dataset
PCM_project = merge(PCM_outcome, covariates_and_auxiliary, by = c('cidb2957','qlet'), all.x = T)
# A bit of a quick and dirty fix to make merging with ELS easier
# colnames(PCM_project)[which(colnames(PCM_project) == 'idc')] <- toupper('idc')
# colnames(PCM_project)[which(colnames(PCM_project) == 'idm')] <- toupper('idm')
################################################################################
#-------------------------------------------------------------------------------
################################################################################
#### --------------------------- save and run ----------------------------- ####
################################################################################
# Save the dataset in an .RData file, in the directory where the raw data are stored
save(PCM_project, file = "PCM_project.RData")
# Save covariates and auxiliary variables separately as well
save(covariates_and_auxiliary, file = 'covariates_and_auxiliary.RData')
| /4.1.PCM_outcomes_covs_aux.R | no_license | TomWoofenden/ELS_and_PCM | R | false | false | 17,704 | r | # Hi there,
# the following code is collecting and merging all necessary variables for the analysis
# of the association between ELS and psycho-cardio-metabolic multi-morbidity in children
# This includes the outcomes of interest (internalizing problems and cardio-metabolic
# risk), the covariates that are going to be used as well as the auxiliary variables
# used in the imputation of the final dataset.
# It does not include data used to build the ELS score exposure.
#### ---------------------------- Dependencies ---------------------------- ####
# First, let's point to the necessary libraries
library(foreign)
library(stats)
# Defining the path to the data
# check if the path to the data is already in memory, otherwise ask for it.
#if (exists("pathtodata") == F) { pathtodata = readline(prompt="Enter path to data: ") }
# ATTENTION!!! If prompted with an "Enter path to data:" message -> Enter the location
# of your datafiles. The code assumes that all (raw) data is stored in ONE folder.
# Do not forget the final slash in your path, and, speaking of slashes, beware of
# OS sensitive changes when you want to modify the structure of your dirs!
################################################################################
#### ------------------ INTERNALIZING PROBLEMS ( @ 9 ) -------------------- ####
################################################################################
#load data from previous scripts
#load('prenatal_stress.Rdata')
#load('postnatal_stress.Rdata')
#load('alspac.table.collapsed.Rdata')
names(alspac.table)=tolower(names(alspac.table))
# Internalizing scale @ 9 yrs # informant: MOTHER.
internalizing = alspac.table[,c('cidb2957', # note: lowercase 'b' here
'qlet',
'kv9991a', #age 10 (months)
'tb9991a', #age 13
'fh0011a', # age 15
'fj003a', # age 17? VB: not in dataset? but was in original?
'ypb9992', # age 22 VB: also not in?
'kv8603',# age 10 #'agechild_cbcl9m',
'tb8603', # age 13
'fh6876', # age 15
'fjci350', # age 17 (CIS-R)
'ypb5180')] # age 22 (SMFQ; might not be the best measure)
#'nmisint_9m', # number of missing values in internalizing scale items
#'sum_int_9m')] # weighted sum score internalizing scale (allowing 25% missing)
for (i in c('kv9991a',
'tb9991a',
'fh0011a',
'fj003a',
'ypb9992',
'kv8603',
'tb8603',
'fjci350',
'ypb5180')){
internalizing[,i] = as.numeric(levels(internalizing[,i]))[internalizing[,i]]
}
internalizing$fh6876=as.numeric(internalizing$fh6876)
#corbetw2mat(internalizing.n[,-c(1:2)], internalizing[,-c(1:2)], what = "paired")
# Calculation (based on SPSS script available on the V: drive):
# if 24 out of 32 items are available (i.e. 75%), sum of the item
# scores * (32 / 32 - nmisnt_9m).
# Let's make it a bit more reader friendly
#colnames(internalizing)[3:4] <- c("n_missing_intern", "intern_score");
internalizing = internalizing %>%
rename(
int.age.10y = kv9991a,
int.age.13y = tb9991a,
int.age.15y = fh0011a,
int.age.17y = fj003a,
int.age.22y = ypb9992,
intern_score.10y = kv8603,
intern_score.13y = tb8603,
intern_score.15y = fh6876,
intern_score.17y = fjci350,
intern_score.22y = ypb5180
)
# One alternative could be to use the anxious/depressed empirical sub-scale (9y, mother report)
# However I did give it a shot and it does not seem to perfom better than the internalizing one.
################################################################################
#### -------------------------- FAT MASS ( @ 9 ) -------------------------- ####
################################################################################
# Although the original plan for this project involved a more comprehensive measure
# of child metabolic syndrome (see CMR_score.R file for specifics), android fat mass
# was the only metabolic risk variable that showed appreciable variance in such young
# children and the highest correlation with internalizing (small but significant r =.12)
# It was also selected on the base of data availability both cross-sectionally and
# for future longitudinal assessment.
# Read in the datasets
#fatmass <- readquick("CHILDFATMASS9_13092016.sav") # 5862 obs of 28 vars
# Select only the necessary measures
fat_mass = alspac.table[,c('cidb2957','qlet',
'f9003c', # age 10 (months) #'agechild9_visit1', # this value (age) is the same for all other datasets
'kg998a', # age13
'fkar0010', # age24
'f9dx126', #trunkFM at age10y #'fat_mass_androidchild9')]
'fg3257', # andrFM at age13y
'fh2257', # andrFM at age15y
'fjdx138', # andrFM at age17y
'fkdx1041')] # andrFM at age24y
for (i in c('f9003c',
'kg998a',
'fkar0010',
'f9dx126',
'fg3257',
'fh2257',
'fjdx138',
'fkdx1041')){
fat_mass[,i] = as.numeric(levels(fat_mass[,i]))[fat_mass[,i]]
}
#colnames(fat_mass)[3] <- "fat_mass"
fat_mass = fat_mass %>%
rename(
fm.age.10y = f9003c,
fm.age.13y = kg998a,
fm.age.24y = fkar0010,
fat_mass.10y = f9dx126,
fat_mass.13y = fg3257,
fat_mass.15y = fh2257,
fat_mass.17y = fjdx138,
fat_mass.24y = fkdx1041
)
################################################################################
# merge the two main (mental and physical) outcomes with child sex into one dataset
PCM_outcome <- merge(internalizing, fat_mass, by = c('cidb2957','qlet'), all.x = TRUE)
# ------------------------------------------------------------------------------
# Before we can use them in the analysis, the outcome variables need to be standardized.
# so, here we take the standard deviation score.
PCM_outcome$intern_score_z.10y <- as.numeric(scale(PCM_outcome$intern_score.10y))
PCM_outcome$intern_score_z.13y <- as.numeric(scale(PCM_outcome$intern_score.13y))
PCM_outcome$intern_score_z.15y <- as.numeric(scale(PCM_outcome$intern_score.15y))
PCM_outcome$intern_score_z.17y <- as.numeric(scale(PCM_outcome$intern_score.17y))
PCM_outcome$intern_score_z.22y <- as.numeric(scale(PCM_outcome$intern_score.22y))
PCM_outcome$fat_mass_z.10y <- as.numeric(scale(PCM_outcome$fat_mass.10y))
PCM_outcome$fat_mass_z.13y <- as.numeric(scale(PCM_outcome$fat_mass.13y))
PCM_outcome$fat_mass_z.15y <- as.numeric(scale(PCM_outcome$fat_mass.15y))
PCM_outcome$fat_mass_z.17y <- as.numeric(scale(PCM_outcome$fat_mass.17y))
PCM_outcome$fat_mass_z.24y <- as.numeric(scale(PCM_outcome$fat_mass.24y))
################################################################################
#### ---------------------------- COVARIATES ------------------------------ ####
################################################################################
# Variables that will be used in the covariate models of this project are those
# marked with ###. they include: 'sex', 'age_child', 'm_smoking', 'm_drinking'
# and 'm_bmi_before_pregnancy'.
# For the other demographic auxiliary variables (used for imputation): when they
# were assessed both prenatally and postnatally, both measures are included.
# Auxiliary variables for this project include: 'ethnicity', 'm_age', parity',
# 'm_smoking', 'gest_age_birth', 'gest_weight', 'm_bmi_pregnancy', 'm_bmi_5yrs',
# 'sex', 'm_dep_pregnancy', 'p_dep_pregnacy', "m_dep_3yrs", "p_dep_3yrs"
# ------------------------------------------------------------------------------
### AGE of the child
# Combine age of the child measured during first visit and at CBCL administration
# This value will serve as a covariate in the first adjusted model.
#PCM_outcome$age_child = (PCM_outcome$agechild9_visit1 + PCM_outcome$agechild_cbcl9m) / 2
# in ALSPAC: average across intS and fatmass age
PCM_outcome$age_child.10y = (PCM_outcome$int.age.10y + PCM_outcome$fm.age.10y) / 2
PCM_outcome$age_child.13y = (PCM_outcome$int.age.13y + PCM_outcome$fm.age.13y) / 2
PCM_outcome$age_child.15y = PCM_outcome$int.age.15y
PCM_outcome$age_child.17y = PCM_outcome$int.age.17y
PCM_outcome$age_child.23y = (PCM_outcome$int.age.22y + PCM_outcome$fm.age.24y) / 2
# OPTIONAL: check age differnce between measurements
# plot(PCM_outcome$agechild9_visit1, PCM_outcome$agechild_cbcl9m)
# summary(PCM_outcome$agechild9_visit1 - PCM_outcome$agechild_cbcl9m)
#-------------------------------------------------------------------------------
### MATERNAL SMOKING during pregnancy
# smokingv1 <- readquick("MATERNALSMOKING_22112016.sav") # 9778 obs of 11 variables
#
# smoking = smokingv1[,c('idm', 'smoke_all')] # (1) never a smoker;
# # (2) smoked until pregnancy was known (i.e., first trimester only);
# # (3) continued smoking during pregnancy.
# colnames(smoking)[2] = "m_smoking"
# ALSPAC: 2 categories (sustained vs (stopped or never))
alspac.table$m_smoking=NA
alspac.table[which(alspac.table$e170=='N'),"m_smoking"] <- 0
alspac.table[which(alspac.table$e170=='Y'),"m_smoking"] <- 1
#-------------------------------------------------------------------------------
### MATERNAL ALCOHOL CONSUMPTION during pregnancy
# drinkingv1 <- readquick("GEDRAGSGROEP_MaternalDrinking_22112016.sav") #drinkingv2 <- readquick("MATERNALALCOHOL_22112016.sav") # old variable
#
# drinking = drinkingv1[,c('idm', 'mdrink_updated')] # (0) never;
# # (1) until pregnancy was known (i.e., first trimester only);
# # (2) continued during pregnancy occasionally;
# # (3) continued during pregnancy frequently.
# colnames(drinking)[2] = "m_drinking"
# ALSPAC
alspac.table$m_drinking=NA
alspac.table[which(alspac.table$b721=='never' & alspac.table$e220=='Not at all'),"m_drinking"] <- 0
alspac.table[which(!(alspac.table$b721=='never' & alspac.table$e220=='Not at all') &
!(is.na(alspac.table$b721) | is.na(alspac.table$e220))),"m_drinking"] <- 1
#-------------------------------------------------------------------------------
## Other variables
# child_general <- readquick("CHILD-ALLGENERALDATA_07072020.sav") # 9901 obs of 122
#
# # Ethnicity recode - dichotomized into: dutch, western and non-western;
# for (i in 1:9901) {
# if (is.na(child_general$ethnfv2[i])) { child_general$ethnicity[i] <- NA
# } else if (child_general$ethnfv2[i] == 1) { # Dutch
# child_general$ethnicity[i] <- 0
# } else if (child_general$ethnfv2[i] == 300 | child_general$ethnfv2[i] == 500 | child_general$ethnfv2[i] >= 700) {
# # American, western (300) Asian, western (500) European (700), Oceanie (800)
# child_general$ethnicity[i] <- 1
# } else {
# child_general$ethnicity[i] <- 2 }
# # Indonesian (2), Cape Verdian (3), Maroccan (4) Dutch Antilles (5) Surinamese
# # (6) Turkish (7) African (200), American, non western (400), Asian, non western (600)
# }
alspac.table$ethnicity=NA
alspac.table[which(alspac.table$c800=='White' & alspac.table$c801=='White'),"ethnicity"] <- 1
alspac.table[which(!(alspac.table$c800=='White' & alspac.table$c801=='White') &
!(is.na(alspac.table$c800) | is.na(alspac.table$c801))),"ethnicity"] <- 0
# pre-pregnancy BMI
alspac.table$dw002=as.numeric(levels(alspac.table$dw002))[alspac.table$dw002] # Pre-pregnancy weight (Kg) at 12w gest
alspac.table$m4221=as.numeric(levels(alspac.table$m4221))[alspac.table$m4221] # Height (cm) 7y 1m
alspac.table$bmi_0 = alspac.table$dw002/((alspac.table$m4221/100)^2) # calculating ore-pregnancy BMI
general_cov_aux = alspac.table[,c('cidb2957', 'qlet',
'm_smoking',
'm_drinking',
'kz021', ### SEX - 1 = boy; 2 = girl.
'ethnicity', ### ETHNICITY - dutch, western, non-western
'bmi_0', ### MATERNAL BMI - self-reported, before pregnancy
'mult', # used for exclusion criteria
#'mother', # mother id used to identify siblings (for exclusion)
'b032', # parity (used for imputation)
'bestgest', # gestational age at birth (used for imputation)
'kz030', # gestational weight (used for imputation)
#'bmi_1', # maternal BMI during pregnancy (used for imputation)
'mz028b')] # maternal age at intake (used for imputation)
# ALSPAC: bmi tbd
# Below is what ALSPAC has - should we use all of these time points? Or too many? And 9y already too late?
# dw043 = weight/(height in meters) 12w gest
# ew002 = mothers postnatal weight (kg) 8w
# m4220 = Respondent's weight (kg) 7y 1m
# n1140 = Respondent's weight (kg) 8y 1m
# p1290 = Respondent's weight (kg) 9y 1m
for (i in c('mult',
'b032',
'bestgest',
'kz030',
'mz028b')){
general_cov_aux[,i] = as.numeric(levels(general_cov_aux[,i]))[general_cov_aux[,i]]
}
# Again, let's try to keep it user friendly
general_cov_aux = general_cov_aux %>%
rename(
sex = kz021,
gest_age_birth = bestgest,
gest_weight = kz030,
m_age_cont = mz028b,
twin = mult,
parity = b032,
m_bmi_before_pregnancy = bmi_0)
# ALSPAC: the following section still needs to be done
# #-------------------------------------------------------------------------------
# # Maternal BMI at age 5 (used for imputation)
# m_anthropometry_5yrs = readquick('MOTHERANTHROPOMETRY_18022013.sav')
# m_bmi_5yrs = m_anthropometry_5yrs[,c('mother','bmimotherf5')]; colnames(m_bmi_5yrs)[2] = "m_bmi_5yrs"
#
# # Merge with the other general variables
# general_cov_aux = merge(general_cov_aux, m_bmi_5yrs, by = 'mother', all.x = TRUE)
#
#
# #-------------------------------------------------------------------------------
# # Maternal and paternal depression during pregnancy and at age 3
# bsi_pregnancy_m = readquick('GR1003-BSI D1_22112016.sav') # 9778 obs of 261 vars
# bsi_pregnancy_p = readquick('GR1004-BSI G1_22112016.sav') # 9778 obs of 261 vars
# bsi_3yrs = readquick('BSI 3 years of age_GR1065 G1-GR1066 C1_22112016.sav') # 9897 obs of 49 vars
#
# # Depression during pregnancy
# dep_pregnancy_m = bsi_pregnancy_m[, c('idm','dep')]; colnames(dep_pregnancy_m)[2] = c("m_dep_cont_pregnancy")
# dep_pregnancy_p = bsi_pregnancy_p[, c('idm','dep_p')]; colnames(dep_pregnancy_p)[2] = c("p_dep_cont_pregnancy")
#
# # Merge it with the previous dataset
# general_cov_aux <- Reduce(function(x,y) merge(x = x, y = y, by = 'idm', all.x = TRUE),
# list(general_cov_aux, dep_pregnancy_m, dep_pregnancy_p))
#
# # Depression @ 3y: Items 9, 16, 17, 18, 35, and 50
# d <- data.frame(bsi_3yrs$g0100365, bsi_3yrs$g0100665, bsi_3yrs$g0100765, bsi_3yrs$g0100865, bsi_3yrs$g0101365, bsi_3yrs$g0102165, # mother report
# bsi_3yrs$c0100366, bsi_3yrs$c0100666, bsi_3yrs$c0100766, bsi_3yrs$c0100866, bsi_3yrs$c0101366, bsi_3yrs$c0102166) # father report
# n_items_m <- rowSums(!is.na(d[,1:6])); n_items_p <- rowSums(!is.na(d[,7:12]))
# bsi_3yrs$m_dep_cont_3yrs <- ifelse(n_items_m >= 5, yes = (rowSums(d[,1:6])/n_items_m)-1, no = NA)
# bsi_3yrs$p_dep_cont_3yrs <- ifelse(n_items_p >= 5, yes = (rowSums(d[,7:12])/n_items_p)-1, no = NA)
#
# dep_3yrs = bsi_3yrs[, c('idc','m_dep_cont_3yrs', 'p_dep_cont_3yrs')]
# # Merge it with the previous dataset
# general_cov_aux = merge(general_cov_aux, dep_3yrs, by = 'idc', all.x = TRUE)
#-------------------------------------------------------------------------------
################################################################################
# Merge all covariates / auxiliary variables together
# covariates_and_auxiliary <- Reduce(function(x,y) merge(x = x, y = y, by = 'idm', all.x = TRUE),
# list(general_cov_aux, smoking, drinking))
# ALSPAC for now:
covariates_and_auxiliary = general_cov_aux
################################################################################
################################################################################
# merging outcome variables, covariates and auxiliary variables in one dataset
PCM_project = merge(PCM_outcome, covariates_and_auxiliary, by = c('cidb2957','qlet'), all.x = T)
# A bit of a quick and dirty fix to make merging with ELS easier
# colnames(PCM_project)[which(colnames(PCM_project) == 'idc')] <- toupper('idc')
# colnames(PCM_project)[which(colnames(PCM_project) == 'idm')] <- toupper('idm')
################################################################################
#-------------------------------------------------------------------------------
################################################################################
#### --------------------------- save and run ----------------------------- ####
################################################################################
# Save the dataset in an .RData file, in the directory where the raw data are stored
save(PCM_project, file = "PCM_project.RData")
# Save covariates and auxiliary variables separately as well
save(covariates_and_auxiliary, file = 'covariates_and_auxiliary.RData')
|
library(tidyverse)
library(tibble)
library(tidyr)
library(dplyr)
library(readxl)
library(ggplot2)
library(lubridate)
# Covariance and Correlation
df<-data.frame(X=c(90,90,60,60,30),Y=c(60,90,60,60,30))
cov(df)
cor(df)
# Activity: Covariance and Correlation
heart<- read.csv("https://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/processed.cleveland.data",header=FALSE,sep=",",na.strings = '?')
names(heart) <- c( "age", "sex", "cp", "trestbps", "chol","fbs", "restecg",
"thalach","exang", "oldpeak","slope", "ca", "thal", "num")
heart %>%
select(age,chol,fbs,thalach,exang) %>%
cov()
heart %>%
select(age,chol,fbs,thalach,exang) %>%
cor()
# Linear Regression
x <- 1:5
y <- c(1.3,4.3,5.5,8.4,14.2)
plot(x,y)
m <- lm(y~x)
summary(m)
predict(m,data.frame(x=6))
abline(m)
# Activitiy Liear Regression
age <- heart$age
chol <- heart$chol
plot(age,chol)
m <- lm(chol~age)
predict(m,data.frame(age=60))
abline(m)
# Multivariate Regression
m <- lm(mpg~wt+hp,data=mtcars)
# Activity: Multivariate Regression
age <- heart$age
chol <- heart$chol
thalach<- heart$thalach
m <- lm(chol~age+thalach)
predict(m,data.frame(age=60,thalach=180))
# Hypothesis Testing
boxplot(extra~group,data=sleep)
t.test(extra~group,data=sleep)
t.test(extra~group,data=sleep,alternative="less")
# Activity : Hypothesis Testing
boxplot(weight~feed,data=chickwts)
d <- subset(chickwts,feed == "casein" | feed =="horsebean")
t.test(weight~feed,data=d)
t.test(weight~feed,data=chickwts.test,alternative='less')
t.test(weight~feed,data=chickwts.test,alternative='greater')
# ANOVA
m <- aov(weight~feed,data=chickwts)
summary(m)
# Activity : Hypothesis Testing
shampoo = data.frame('A'=c(36.6,39.2,30.4,37.1,34.1),'B' = c(17.5,20.6,18.7,25.7,22.0),'C'=c(15.0,10.4,18.9,10.5,15.2))
shampoo <- as_tibble(shampoo)
shampoo %>%
gather(brand, effect) %>%
boxplot(effect~brand,.)
shampoo %>%
gather(brand, effect) %>%
aov(effect~brand,.)
iris | /exercises/topic3.R | no_license | tertiarycourses/WSQ-R-Data-Analytics | R | false | false | 1,971 | r | library(tidyverse)
library(tibble)
library(tidyr)
library(dplyr)
library(readxl)
library(ggplot2)
library(lubridate)
# Covariance and Correlation
df<-data.frame(X=c(90,90,60,60,30),Y=c(60,90,60,60,30))
cov(df)
cor(df)
# Activity: Covariance and Correlation
heart<- read.csv("https://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/processed.cleveland.data",header=FALSE,sep=",",na.strings = '?')
names(heart) <- c( "age", "sex", "cp", "trestbps", "chol","fbs", "restecg",
"thalach","exang", "oldpeak","slope", "ca", "thal", "num")
heart %>%
select(age,chol,fbs,thalach,exang) %>%
cov()
heart %>%
select(age,chol,fbs,thalach,exang) %>%
cor()
# Linear Regression
x <- 1:5
y <- c(1.3,4.3,5.5,8.4,14.2)
plot(x,y)
m <- lm(y~x)
summary(m)
predict(m,data.frame(x=6))
abline(m)
# Activitiy Liear Regression
age <- heart$age
chol <- heart$chol
plot(age,chol)
m <- lm(chol~age)
predict(m,data.frame(age=60))
abline(m)
# Multivariate Regression
m <- lm(mpg~wt+hp,data=mtcars)
# Activity: Multivariate Regression
age <- heart$age
chol <- heart$chol
thalach<- heart$thalach
m <- lm(chol~age+thalach)
predict(m,data.frame(age=60,thalach=180))
# Hypothesis Testing
boxplot(extra~group,data=sleep)
t.test(extra~group,data=sleep)
t.test(extra~group,data=sleep,alternative="less")
# Activity : Hypothesis Testing
boxplot(weight~feed,data=chickwts)
d <- subset(chickwts,feed == "casein" | feed =="horsebean")
t.test(weight~feed,data=d)
t.test(weight~feed,data=chickwts.test,alternative='less')
t.test(weight~feed,data=chickwts.test,alternative='greater')
# ANOVA
m <- aov(weight~feed,data=chickwts)
summary(m)
# Activity : Hypothesis Testing
shampoo = data.frame('A'=c(36.6,39.2,30.4,37.1,34.1),'B' = c(17.5,20.6,18.7,25.7,22.0),'C'=c(15.0,10.4,18.9,10.5,15.2))
shampoo <- as_tibble(shampoo)
shampoo %>%
gather(brand, effect) %>%
boxplot(effect~brand,.)
shampoo %>%
gather(brand, effect) %>%
aov(effect~brand,.)
iris |
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.32784507357645e-308, 9.53818251908624e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615831300-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 362 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.32784507357645e-308, 9.53818251908624e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) |
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
kmeans_interface <- function(data_mat, clusters) {
.Call(`_rpackage_kmeans_interface`, data_mat, clusters)
}
| /R/RcppExports.R | no_license | jkaufy/rpackage | R | false | false | 241 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
kmeans_interface <- function(data_mat, clusters) {
.Call(`_rpackage_kmeans_interface`, data_mat, clusters)
}
|
# modify input arguments so not order dependent; a1 = a1, etc...
#' Negative log-likelihood function: recovery model
#'
#' Function returning negative log-likelihood (nll) for patterns of survival in
#' infected and uninfected treatments, when infected hosts can recover from
#' infection.
#'
#' This model assumes all the hosts in an infected treatment are all initially
#' infected, and they can all potentially recover from infection. Recovered
#' hosts are assumed to only experience background mortality equivalent to that
#' experienced by matching uninfected or control individuals; no assumptions are
#' made as to whether they are still infected or infectious. It is also assumed
#' that the timing of recovery from infection is not directly observed, but an
#' individual's infected/recovery status can be determined after they have died
#' or been censored.
#'
#' The probability that an infection 'survives' over time, i.e., the host does
#' not recover from infection, is assumed to follow a probability distribution
#' which acts independently of the probability distributions determining
#' background mortality or mortality due to infection.
#'
#' This function only estimates location and scale parameters as constants, it
#' is not designed to estimate them as functions.
#'
#'
#' @param a1,b1 location & scale parameters for background mortality
#' @param a2,b2 location & scale parameters for mortality due to infection
#' @param a3,b3 location & scale parameters for how long infection 'survives'
#' @param data a data.frame with the data
#' @param d1,d2,d3 probability distributions for background mortality, mortality
#' due to infection & how long infection 'survives' ("Weibull", "Gumbel",
#' "Frechet")
#' @return numeric
#' @section Warning: requires the data to be specified in a specific format;
#' see vignette 'data format' for details
#' @examples
#' \donttest{
#' # NB the data to analyse needs to be in a data frame of a specific form
#' head(recovery_data)
#'
#' # step #1: prepare nll function for analysis
#' m01_prep_function <- function(a1, b1, a2, b2, a3, b3){
#' nll_recovery(a1, b1, a2, b2, a3, b3,
#' data = recovery_data,
#' d1 = "Weibull", d2 = "Weibull", d3 = "Weibull"
#' )}
#'
#' # step #2: send 'prep_function' to mle2 for maximum likelihood estimation,
#' # specifying starting values
#' m01 <- mle2(m01_prep_function,
#' start = list(a1 = 2, b1 = 0.5, a2 = 2, b2 = 0.5, a3 = 2, b3 = 0.5)
#' )
#'
#' summary(m01)
#'
#' # values used to simulate data were for the Weibull distribution;
#' # a1 = 2.8, b1 = 0.5, a2 = 2.2, b2 = 0.35, a3 = 2.35, b3 = 0.35
#' }
nll_recovery <- function(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = ""
){
nll <- P_recovery_calc_likelihood_matrix(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = "")
}
### subfunctions used by above ###
P_recovery_calc_survival_functions <- function(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = ""){
# function returns matrix with values for various survival functions
# from t[1] -> t[max]
# given values of a1, b1, etc.
tmax <- max(data$t)
calc.matrix <- matrix(0,tmax,13)
colnames(calc.matrix) <- c("t", "f1", "f2", "f3", "S1", "S2", "S3",
"h1", "h2", "h3", "f1S2S3", "f2S1S3", "f3S1S2")
for (t in 1:tmax){
z1 <- P_get_zx(t, a1, b1, d1)
z2 <- P_get_zx(t, a2, b2, d2)
z3 <- P_get_zx(t, a3, b3, d3)
f1 <- P_get_fx(t, z1, b1, d1)
f2 <- P_get_fx(t, z2, b2, d2)
f3 <- P_get_fx(t, z3, b3, d3)
S1 <- P_get_Sx(t, z1, d1)
S2 <- P_get_Sx(t, z2, d2)
S3 <- P_get_Sx(t, z3, d3)
h1 <- P_get_hx(t, z1, b1, d1)
h2 <- P_get_hx(t, z2, b2, d2)
h3 <- P_get_hx(t, z3, b3, d3)
f1S2S3 <- f1 * S2 * S3
f2S1S3 <- f2 * S1 * S3
f3S1S2 <- f3 * S1 * S2
calc.matrix[t,1] <- t
calc.matrix[t,2] <- f1
calc.matrix[t,3] <- f2
calc.matrix[t,4] <- f3
calc.matrix[t,5] <- S1
calc.matrix[t,6] <- S2
calc.matrix[t,7] <- S3
calc.matrix[t,8] <- h1
calc.matrix[t,9] <- h2
calc.matrix[t,10] <- h3
calc.matrix[t,11] <- f1S2S3
calc.matrix[t,12] <- f2S1S3
calc.matrix[t,13] <- f3S1S2
}
calc.matrix
}
###
P_recovery_calc_f3S1S2 <- function(a1 = a1, b1 = b1,
a2 = a2, b2 = b2,
a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = ""){
# returns matrix with calculations for probabilities of recovery
# while host still alive at different times given values of a1, b1, etc...
tmax <- max(data$t)
matrix01 <- matrix(0,tmax,tmax)
matrix02 <- matrix(0,tmax,tmax)
for (t in 1:tmax){
for (u in 1:t){
matrix01[t,u] <- u
zu1 <- P_get_zu(u, a1, b1, d1)
zu2 <- P_get_zu(u, a2, b2, d2)
zu3 <- P_get_zu(u, a3, b3, d3)
Su1 <- P_get_Su(u, zu1, d1)
Su2 <- P_get_Su(u, zu2, d2)
fu3 <- P_get_fu(u, zu3, b3, d3)
matrix02[t,u] <- fu3 * Su1 * Su2
}
}
matrix02
}
###
P_recovery_calc_St_Sr <- function(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = ""){
# returns matrix with calculations for probability of surviving
# from time of recovery, S[r], to time t, S[t]: S[t]/S[r]
# given values of a1, b1, etc.
tmax <- max(data$t)
matrix03 <- matrix(0,tmax,tmax)
matrix04 <- matrix(0,tmax,tmax)
for (t in 1:tmax){
for (u in 1:t){
matrix03[t,u] <- t
z1t <- P_get_zx(t, a1, b1, d1)
z1u <- P_get_zu(u, a1, b1, d1)
S1t <- P_get_Sx(t, z1t, d1)
S1u <- P_get_Su(u, z1u, d1)
matrix04[t,u] <- S1t / S1u
}
}
matrix04
}
###
P_recovery_calc_f3S1S2_St_Sr <- function(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = ""){
# calculates probabilities of recovering at different times
# and surviving to time t
# from product of recovery_calc_f3S1S2 * recovery_St.Sr
# given values of a1, b1, etc.
f3S1S2 <- P_recovery_calc_f3S1S2(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = "")
St.Sr <- P_recovery_calc_St_Sr(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = "")
f3S1S2_St_Sr <- f3S1S2 * St.Sr
}
###
P_recovery_calc_sum_f3S1S2_St_Sr <- function(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = ""){
# calculates sum of probabilities of recovering at different times
# and surviving to time t given values of a1, b1, etc.
tmax <- max(data$t)
f3S1S2.St.Sr <- P_recovery_calc_f3S1S2_St_Sr(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = "")
vector.sum.f3S1S2.St.Sr <- rowSums(f3S1S2.St.Sr)
sum_f3S1S2_St_Sr <- matrix(vector.sum.f3S1S2.St.Sr, , 1)
}
###
P_recovery_calc_likelihood_matrix <- function(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = ""){
# calculates likelihoods using 'recovery' sub-functions
# values a1, b1, etc...
# define maximum time
tmax <- max(data$t)
# convert data.frame(data) to matrix
matrix01 <- as.matrix(data)
# calculate various survival functions (defined elsewhere) based on values a1,a2,...
survival.functions <- P_recovery_calc_survival_functions(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = "")
f3S1S2 <- P_recovery_calc_f3S1S2(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = "")
St.Sr <- P_recovery_calc_St_Sr(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = "")
f3S1S2.St.Sr <- P_recovery_calc_f3S1S2_St_Sr(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = "")
sum.f3S1S2.St.Sr <- P_recovery_calc_sum_f3S1S2_St_Sr(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = "")
h1 <- matrix(survival.functions[, 'h1'], , 1)
# create & fill matrix for likelihood calculations
likelihood.matrix <- matrix(0,nrow(matrix01), 15)
likelihood.matrix[, 01] <- matrix01[, "t"]
likelihood.matrix[, 02] <- matrix01[, 'control.d'] *
survival.functions[, 'f1']
likelihood.matrix[, 03] <- matrix01[, 'control.c'] *
survival.functions[, 'S1']
likelihood.matrix[, 04] <- matrix01[, 'infected.d'] *
(survival.functions[, 'f1S2S3'] + survival.functions[, 'f2S1S3'])
likelihood.matrix[, 05] <- matrix01[, 'infected.c'] *
(survival.functions[, 'S1'] *
survival.functions[, 'S2'] *
survival.functions[, 'S3'])
likelihood.matrix[, 06] <- sum.f3S1S2.St.Sr * h1 # recovered, died
likelihood.matrix[, 07] <- matrix01[, 'recovered.d']
likelihood.matrix[, 08] <- likelihood.matrix[, 6] * likelihood.matrix[, 7]
likelihood.matrix[, 09] <- sum.f3S1S2.St.Sr # recovered, censored
likelihood.matrix[, 10] <- matrix01[, 'recovered.c']
likelihood.matrix[, 11] <- likelihood.matrix[, 9] * likelihood.matrix[, 10]
likelihood.matrix[, 12] <- likelihood.matrix[, 2] +
likelihood.matrix[, 3] +
likelihood.matrix[, 4] +
likelihood.matrix[, 5] +
likelihood.matrix[, 8] +
likelihood.matrix[, 11]
likelihood.matrix[, 13] <- log(likelihood.matrix[, 12])
likelihood.matrix[, 14] <- likelihood.matrix[, 13] * matrix01[, 'fq']
likelihood.matrix[, 15] <- sum(likelihood.matrix[, 14])
# return negative log-likelihood calculated in likelihood matrix
neg.log.likelihood <- -(likelihood.matrix[1, 15])
# separately uncheck the lines below & re-run function to see the
# calculations for each component contributing to likelihood calculation
# likelihood.matrix
# matrix01
# survival.functions
}
| /R/nll_recovery.R | no_license | cran/anovir | R | false | false | 10,041 | r |
# modify input arguments so not order dependent; a1 = a1, etc...
#' Negative log-likelihood function: recovery model
#'
#' Function returning negative log-likelihood (nll) for patterns of survival in
#' infected and uninfected treatments, when infected hosts can recover from
#' infection.
#'
#' This model assumes all the hosts in an infected treatment are all initially
#' infected, and they can all potentially recover from infection. Recovered
#' hosts are assumed to only experience background mortality equivalent to that
#' experienced by matching uninfected or control individuals; no assumptions are
#' made as to whether they are still infected or infectious. It is also assumed
#' that the timing of recovery from infection is not directly observed, but an
#' individual's infected/recovery status can be determined after they have died
#' or been censored.
#'
#' The probability that an infection 'survives' over time, i.e., the host does
#' not recover from infection, is assumed to follow a probability distribution
#' which acts independently of the probability distributions determining
#' background mortality or mortality due to infection.
#'
#' This function only estimates location and scale parameters as constants, it
#' is not designed to estimate them as functions.
#'
#'
#' @param a1,b1 location & scale parameters for background mortality
#' @param a2,b2 location & scale parameters for mortality due to infection
#' @param a3,b3 location & scale parameters for how long infection 'survives'
#' @param data a data.frame with the data
#' @param d1,d2,d3 probability distributions for background mortality, mortality
#' due to infection & how long infection 'survives' ("Weibull", "Gumbel",
#' "Frechet")
#' @return numeric
#' @section Warning: requires the data to be specified in a specific format;
#' see vignette 'data format' for details
#' @examples
#' \donttest{
#' # NB the data to analyse needs to be in a data frame of a specific form
#' head(recovery_data)
#'
#' # step #1: prepare nll function for analysis
#' m01_prep_function <- function(a1, b1, a2, b2, a3, b3){
#' nll_recovery(a1, b1, a2, b2, a3, b3,
#' data = recovery_data,
#' d1 = "Weibull", d2 = "Weibull", d3 = "Weibull"
#' )}
#'
#' # step #2: send 'prep_function' to mle2 for maximum likelihood estimation,
#' # specifying starting values
#' m01 <- mle2(m01_prep_function,
#' start = list(a1 = 2, b1 = 0.5, a2 = 2, b2 = 0.5, a3 = 2, b3 = 0.5)
#' )
#'
#' summary(m01)
#'
#' # values used to simulate data were for the Weibull distribution;
#' # a1 = 2.8, b1 = 0.5, a2 = 2.2, b2 = 0.35, a3 = 2.35, b3 = 0.35
#' }
nll_recovery <- function(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = ""
){
nll <- P_recovery_calc_likelihood_matrix(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = "")
}
### subfunctions used by above ###
P_recovery_calc_survival_functions <- function(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = ""){
# function returns matrix with values for various survival functions
# from t[1] -> t[max]
# given values of a1, b1, etc.
tmax <- max(data$t)
calc.matrix <- matrix(0,tmax,13)
colnames(calc.matrix) <- c("t", "f1", "f2", "f3", "S1", "S2", "S3",
"h1", "h2", "h3", "f1S2S3", "f2S1S3", "f3S1S2")
for (t in 1:tmax){
z1 <- P_get_zx(t, a1, b1, d1)
z2 <- P_get_zx(t, a2, b2, d2)
z3 <- P_get_zx(t, a3, b3, d3)
f1 <- P_get_fx(t, z1, b1, d1)
f2 <- P_get_fx(t, z2, b2, d2)
f3 <- P_get_fx(t, z3, b3, d3)
S1 <- P_get_Sx(t, z1, d1)
S2 <- P_get_Sx(t, z2, d2)
S3 <- P_get_Sx(t, z3, d3)
h1 <- P_get_hx(t, z1, b1, d1)
h2 <- P_get_hx(t, z2, b2, d2)
h3 <- P_get_hx(t, z3, b3, d3)
f1S2S3 <- f1 * S2 * S3
f2S1S3 <- f2 * S1 * S3
f3S1S2 <- f3 * S1 * S2
calc.matrix[t,1] <- t
calc.matrix[t,2] <- f1
calc.matrix[t,3] <- f2
calc.matrix[t,4] <- f3
calc.matrix[t,5] <- S1
calc.matrix[t,6] <- S2
calc.matrix[t,7] <- S3
calc.matrix[t,8] <- h1
calc.matrix[t,9] <- h2
calc.matrix[t,10] <- h3
calc.matrix[t,11] <- f1S2S3
calc.matrix[t,12] <- f2S1S3
calc.matrix[t,13] <- f3S1S2
}
calc.matrix
}
###
P_recovery_calc_f3S1S2 <- function(a1 = a1, b1 = b1,
a2 = a2, b2 = b2,
a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = ""){
# returns matrix with calculations for probabilities of recovery
# while host still alive at different times given values of a1, b1, etc...
tmax <- max(data$t)
matrix01 <- matrix(0,tmax,tmax)
matrix02 <- matrix(0,tmax,tmax)
for (t in 1:tmax){
for (u in 1:t){
matrix01[t,u] <- u
zu1 <- P_get_zu(u, a1, b1, d1)
zu2 <- P_get_zu(u, a2, b2, d2)
zu3 <- P_get_zu(u, a3, b3, d3)
Su1 <- P_get_Su(u, zu1, d1)
Su2 <- P_get_Su(u, zu2, d2)
fu3 <- P_get_fu(u, zu3, b3, d3)
matrix02[t,u] <- fu3 * Su1 * Su2
}
}
matrix02
}
###
P_recovery_calc_St_Sr <- function(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = ""){
# returns matrix with calculations for probability of surviving
# from time of recovery, S[r], to time t, S[t]: S[t]/S[r]
# given values of a1, b1, etc.
tmax <- max(data$t)
matrix03 <- matrix(0,tmax,tmax)
matrix04 <- matrix(0,tmax,tmax)
for (t in 1:tmax){
for (u in 1:t){
matrix03[t,u] <- t
z1t <- P_get_zx(t, a1, b1, d1)
z1u <- P_get_zu(u, a1, b1, d1)
S1t <- P_get_Sx(t, z1t, d1)
S1u <- P_get_Su(u, z1u, d1)
matrix04[t,u] <- S1t / S1u
}
}
matrix04
}
###
P_recovery_calc_f3S1S2_St_Sr <- function(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = ""){
# calculates probabilities of recovering at different times
# and surviving to time t
# from product of recovery_calc_f3S1S2 * recovery_St.Sr
# given values of a1, b1, etc.
f3S1S2 <- P_recovery_calc_f3S1S2(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = "")
St.Sr <- P_recovery_calc_St_Sr(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = "")
f3S1S2_St_Sr <- f3S1S2 * St.Sr
}
###
P_recovery_calc_sum_f3S1S2_St_Sr <- function(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = ""){
# calculates sum of probabilities of recovering at different times
# and surviving to time t given values of a1, b1, etc.
tmax <- max(data$t)
f3S1S2.St.Sr <- P_recovery_calc_f3S1S2_St_Sr(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = "")
vector.sum.f3S1S2.St.Sr <- rowSums(f3S1S2.St.Sr)
sum_f3S1S2_St_Sr <- matrix(vector.sum.f3S1S2.St.Sr, , 1)
}
###
P_recovery_calc_likelihood_matrix <- function(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = ""){
# calculates likelihoods using 'recovery' sub-functions
# values a1, b1, etc...
# define maximum time
tmax <- max(data$t)
# convert data.frame(data) to matrix
matrix01 <- as.matrix(data)
# calculate various survival functions (defined elsewhere) based on values a1,a2,...
survival.functions <- P_recovery_calc_survival_functions(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = "")
f3S1S2 <- P_recovery_calc_f3S1S2(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = "")
St.Sr <- P_recovery_calc_St_Sr(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = "")
f3S1S2.St.Sr <- P_recovery_calc_f3S1S2_St_Sr(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = "")
sum.f3S1S2.St.Sr <- P_recovery_calc_sum_f3S1S2_St_Sr(
a1 = a1, b1 = b1, a2 = a2, b2 = b2, a3 = a3, b3 = b3,
data = data, d1 = "", d2 = "", d3 = "")
h1 <- matrix(survival.functions[, 'h1'], , 1)
# create & fill matrix for likelihood calculations
likelihood.matrix <- matrix(0,nrow(matrix01), 15)
likelihood.matrix[, 01] <- matrix01[, "t"]
likelihood.matrix[, 02] <- matrix01[, 'control.d'] *
survival.functions[, 'f1']
likelihood.matrix[, 03] <- matrix01[, 'control.c'] *
survival.functions[, 'S1']
likelihood.matrix[, 04] <- matrix01[, 'infected.d'] *
(survival.functions[, 'f1S2S3'] + survival.functions[, 'f2S1S3'])
likelihood.matrix[, 05] <- matrix01[, 'infected.c'] *
(survival.functions[, 'S1'] *
survival.functions[, 'S2'] *
survival.functions[, 'S3'])
likelihood.matrix[, 06] <- sum.f3S1S2.St.Sr * h1 # recovered, died
likelihood.matrix[, 07] <- matrix01[, 'recovered.d']
likelihood.matrix[, 08] <- likelihood.matrix[, 6] * likelihood.matrix[, 7]
likelihood.matrix[, 09] <- sum.f3S1S2.St.Sr # recovered, censored
likelihood.matrix[, 10] <- matrix01[, 'recovered.c']
likelihood.matrix[, 11] <- likelihood.matrix[, 9] * likelihood.matrix[, 10]
likelihood.matrix[, 12] <- likelihood.matrix[, 2] +
likelihood.matrix[, 3] +
likelihood.matrix[, 4] +
likelihood.matrix[, 5] +
likelihood.matrix[, 8] +
likelihood.matrix[, 11]
likelihood.matrix[, 13] <- log(likelihood.matrix[, 12])
likelihood.matrix[, 14] <- likelihood.matrix[, 13] * matrix01[, 'fq']
likelihood.matrix[, 15] <- sum(likelihood.matrix[, 14])
# return negative log-likelihood calculated in likelihood matrix
neg.log.likelihood <- -(likelihood.matrix[1, 15])
# separately uncheck the lines below & re-run function to see the
# calculations for each component contributing to likelihood calculation
# likelihood.matrix
# matrix01
# survival.functions
}
|
\name{MethylSet-class}
\Rdversion{1.1}
\docType{class}
\alias{MethylSet-class}
\alias{MethylSet}
%% generics
\alias{getMeth}
\alias{getUnmeth}
\alias{getBeta}
\alias{getM}
\alias{preprocessMethod}
%%methods
\alias{getMeth,MethylSet-method}
\alias{getUnmeth,MethylSet-method}
\alias{getBeta,MethylSet-method}
\alias{getM,MethylSet-method}
\alias{getCN,MethylSet-method}
\alias{preprocessMethod,MethylSet-method}
\alias{getManifest,MethylSet-method}
\alias{show,MethylSet-method}
\alias{initialize,MethylSet-method}
\alias{dropMethylationLoci}
\title{MethylSet instances}
\description{
This class holds preprocessed data for Illumina methylation
microarrays.
}
\usage{
## Constructor
MethylSet(Meth, Unmeth, phenoData, annotation = NULL)
## Data extraction / Accessors
\S4method{getMeth}{MethylSet}(object)
\S4method{getUnmeth}{MethylSet}(object)
\S4method{getBeta}{MethylSet}(object, type = "", offset = 0, betaThreshold = 0)
\S4method{getM}{MethylSet}(object, type = "", \dots)
\S4method{getCN}{MethylSet}(object, \dots)
\S4method{getManifest}{MethylSet}(object)
\S4method{preprocessMethod}{MethylSet}(object)
## Utilities
dropMethylationLoci(object, dropRS = TRUE, dropCH = TRUE)
}
\arguments{
\item{object}{A \code{MethylSet}.}
\item{Meth}{A matrix of methylation values (between zero and infinity)
with each row being a methylation loci and each column a sample.}
\item{Unmeth}{See the \code{Meth} argument.}
\item{phenoData}{a \code{phenoData} object.}
\item{annotation}{An annotation string, optional.}
\item{type}{How are the values calculated? For \code{getBeta} setting
\code{type="Illumina"} sets \code{offset=100} as per Genome Studio.
For \code{getM} setting \code{type=""} computes M-values as the
logarithm of \code{Meth}/\code{Unmeth}, otherwise it is computed as
the logit of \code{getBeta(object)}.}
\item{offset}{Offset in the beta ratio, see detail.}
\item{betaThreshold}{Constrains the beta values to be in the inverval
betwen \code{betaThreshold} and 1-\code{betaThreshold}.}
\item{dropRS}{Should SNP probes be dropped?}
\item{dropCH}{Should CH probes be dropped}
\item{\dots}{For the constructor: additional objects passes to the
\code{eSet} constructor, particular a \code{phenoData} slot. For
\code{getM} these values gets passed onto \code{getBeta}.}
}
\details{
This class inherits from \code{eSet}. Essentially the class is a representation of a
\code{Meth} matrix and a \code{Unmeth} matrix linked to a \code{pData} data frame.
In addition, an annotation and a preprocessMethod slot is present. The annotation slot describes
the type of array and also which annotation package to use. The preprocessMethod slot describes
the kind of preprocessing that resulted in this dataset.
A \code{MethylSet} stores \code{meth} and \code{Unmeth}. From these it is easy to compute Beta
values, defined as
\deqn{\beta = \frac{\textrm{Meth}}{\textrm{Meth} + \textrm{Unmeth} + \textrm{offset}}}{%
Beta = Meth / (Meth + Unmeth + offset)}
The offset is chosen to avoid dividing with small values. Illumina uses a default of 100.
M-values (an unfortunate bad name) are defined as
\deqn{M = \textrm{logit}(\beta) = \log(\textrm{Meth} / \textrm{Unmeth})}{%
M = logit(Beta) = log( Meth / Unmeth )}
This formula has problems if either Meth or Unmeth is zero. For this reason, we can use
\code{betaThreshold} to make sure Beta is neither 0 nor 1, before taken the logit. What makes
sense for the \code{offset} and \code{betaThreshold} depends crucially on how the data was
preprocessed. Do not expect the default values to be particular good.
}
\section{Constructor}{
Instances are constructed using the \code{MethylSet} function with the
arguments outlined above.
}
\section{Accessors}{
In the following code, \code{object} is a \code{MethylSet}.
\describe{
\item{\code{getMeth(object)}, \code{getUnmeth(object)}}{Get the
Meth or the Unmeth matrix }
\item{\code{getBeta(object)}}{Get Beta, see details.}
\item{\code{getM(object)}}{get M-values, see details.}
\item{\code{getCN(object)}}{get copy number values which are defined
as the sum of the methylation and unmethylation channel.}
\item{\code{getManifest(object)}}{get the manifest associated with
the object.}
\item{\code{preprocessMethod(object)}}{Get the preprocess method \code{character}.}
}
}
\section{Utilities}{
In the following code, \code{object} is a \code{MethylSet}.
\describe{
\item{\code{dropMethylationLoci}(object)}{A unifed interface to removing methylation loci. You
can drop SNP probes (probes that measure SNPs, not probes containing SNPs) or CH probes (non-CpG
methylation).}
}
}
\author{Kasper Daniel Hansen \email{khansen@jhsph.edu}}
\seealso{
\code{\linkS4class{eSet}} for the basic class structure.
Objects of this class are typically created from an
\code{\link{RGChannelSet}} using \code{\link{preprocessRaw}} or
another preprocessing function.
}
\examples{
showClass("MethylSet")
}
| /man/MethylSet-class.Rd | no_license | aleiyishi/minfi | R | false | false | 5,088 | rd | \name{MethylSet-class}
\Rdversion{1.1}
\docType{class}
\alias{MethylSet-class}
\alias{MethylSet}
%% generics
\alias{getMeth}
\alias{getUnmeth}
\alias{getBeta}
\alias{getM}
\alias{preprocessMethod}
%%methods
\alias{getMeth,MethylSet-method}
\alias{getUnmeth,MethylSet-method}
\alias{getBeta,MethylSet-method}
\alias{getM,MethylSet-method}
\alias{getCN,MethylSet-method}
\alias{preprocessMethod,MethylSet-method}
\alias{getManifest,MethylSet-method}
\alias{show,MethylSet-method}
\alias{initialize,MethylSet-method}
\alias{dropMethylationLoci}
\title{MethylSet instances}
\description{
This class holds preprocessed data for Illumina methylation
microarrays.
}
\usage{
## Constructor
MethylSet(Meth, Unmeth, phenoData, annotation = NULL)
## Data extraction / Accessors
\S4method{getMeth}{MethylSet}(object)
\S4method{getUnmeth}{MethylSet}(object)
\S4method{getBeta}{MethylSet}(object, type = "", offset = 0, betaThreshold = 0)
\S4method{getM}{MethylSet}(object, type = "", \dots)
\S4method{getCN}{MethylSet}(object, \dots)
\S4method{getManifest}{MethylSet}(object)
\S4method{preprocessMethod}{MethylSet}(object)
## Utilities
dropMethylationLoci(object, dropRS = TRUE, dropCH = TRUE)
}
\arguments{
\item{object}{A \code{MethylSet}.}
\item{Meth}{A matrix of methylation values (between zero and infinity)
with each row being a methylation loci and each column a sample.}
\item{Unmeth}{See the \code{Meth} argument.}
\item{phenoData}{a \code{phenoData} object.}
\item{annotation}{An annotation string, optional.}
\item{type}{How are the values calculated? For \code{getBeta} setting
\code{type="Illumina"} sets \code{offset=100} as per Genome Studio.
For \code{getM} setting \code{type=""} computes M-values as the
logarithm of \code{Meth}/\code{Unmeth}, otherwise it is computed as
the logit of \code{getBeta(object)}.}
\item{offset}{Offset in the beta ratio, see detail.}
\item{betaThreshold}{Constrains the beta values to be in the inverval
betwen \code{betaThreshold} and 1-\code{betaThreshold}.}
\item{dropRS}{Should SNP probes be dropped?}
\item{dropCH}{Should CH probes be dropped}
\item{\dots}{For the constructor: additional objects passes to the
\code{eSet} constructor, particular a \code{phenoData} slot. For
\code{getM} these values gets passed onto \code{getBeta}.}
}
\details{
This class inherits from \code{eSet}. Essentially the class is a representation of a
\code{Meth} matrix and a \code{Unmeth} matrix linked to a \code{pData} data frame.
In addition, an annotation and a preprocessMethod slot is present. The annotation slot describes
the type of array and also which annotation package to use. The preprocessMethod slot describes
the kind of preprocessing that resulted in this dataset.
A \code{MethylSet} stores \code{meth} and \code{Unmeth}. From these it is easy to compute Beta
values, defined as
\deqn{\beta = \frac{\textrm{Meth}}{\textrm{Meth} + \textrm{Unmeth} + \textrm{offset}}}{%
Beta = Meth / (Meth + Unmeth + offset)}
The offset is chosen to avoid dividing with small values. Illumina uses a default of 100.
M-values (an unfortunate bad name) are defined as
\deqn{M = \textrm{logit}(\beta) = \log(\textrm{Meth} / \textrm{Unmeth})}{%
M = logit(Beta) = log( Meth / Unmeth )}
This formula has problems if either Meth or Unmeth is zero. For this reason, we can use
\code{betaThreshold} to make sure Beta is neither 0 nor 1, before taken the logit. What makes
sense for the \code{offset} and \code{betaThreshold} depends crucially on how the data was
preprocessed. Do not expect the default values to be particular good.
}
\section{Constructor}{
Instances are constructed using the \code{MethylSet} function with the
arguments outlined above.
}
\section{Accessors}{
In the following code, \code{object} is a \code{MethylSet}.
\describe{
\item{\code{getMeth(object)}, \code{getUnmeth(object)}}{Get the
Meth or the Unmeth matrix }
\item{\code{getBeta(object)}}{Get Beta, see details.}
\item{\code{getM(object)}}{get M-values, see details.}
\item{\code{getCN(object)}}{get copy number values which are defined
as the sum of the methylation and unmethylation channel.}
\item{\code{getManifest(object)}}{get the manifest associated with
the object.}
\item{\code{preprocessMethod(object)}}{Get the preprocess method \code{character}.}
}
}
\section{Utilities}{
In the following code, \code{object} is a \code{MethylSet}.
\describe{
\item{\code{dropMethylationLoci}(object)}{A unifed interface to removing methylation loci. You
can drop SNP probes (probes that measure SNPs, not probes containing SNPs) or CH probes (non-CpG
methylation).}
}
}
\author{Kasper Daniel Hansen \email{khansen@jhsph.edu}}
\seealso{
\code{\linkS4class{eSet}} for the basic class structure.
Objects of this class are typically created from an
\code{\link{RGChannelSet}} using \code{\link{preprocessRaw}} or
another preprocessing function.
}
\examples{
showClass("MethylSet")
}
|
########
cancer.group<-"cancer" # columns names in pheno.ori used to get samples
control.group<-"Control" # columns names in pheno.ori used to get samples
PD.group<-c("PD") # columns names in pheno.ori used to get samples boolean are stored there
to.unwind.name<-"TOP.550"
if(!exists("the.GQ.cancer")){the.GQ.cancer<-the.GQ.AML}
#the.QG.AML,the.QG.PD,the.QG.Controls ## the.GQ.Control
the.top<-1:dim(meta.results.burden)[1] # everything
the.top<-1:550 # top 550
to.unwind<-c(meta.results.burden[the.top,"gene"])# ,meta.results.skatO[1:the.top,"gene"])
################################################################################
################################################################################
################################################################################
################################################################################
genotypes<-a.indel[pass,the.samples.use] ## ordered correctly for phenotypes
snp.names<-key[pass] ## GEFOS ony name with start
#### snpinfo now A different size than a.indel since added pathways!!! snpinfo[snpinfo[,"gene"]=="KNSTRN",]
snpinfo<-snpinfo.ori[snpinfo.ori[,"Name"] %in% snp.names,]
if(!exists("gene.weights")){
gene.weights.subset<-1
}else{
gene.weights.subset<-gene.weights[snpinfo.ori[,"Name"] %in% snp.names] # weight in same order as snpinfo.ori
}
snpinfo<-cbind(snpinfo,gene.weights.subset)
#snpinfo[1:5,]
sum(is.na(as.numeric(snpinfo[,"gene.weights.subset"])))
###################################################/media/scratch/software/matlab/network.lic
if( sum(!(snp.names %in% snpinfo.ori[,"Name"]))>0){print("WARINING snp.names not in snpinfo- unusual!")}
dim(snpinfo)
length(snp.names)
dim(genotypes)
dim(genotypes)
print("start QC")
genotypes[genotypes=="NA"]<-NA
genotypes[genotypes=="0/0"]<-0
genotypes[genotypes=="0/1"]<-1
genotypes[genotypes=="1/1"]<-2
########### prevent any averaging
dim(genotypes)
genotypes[is.na(genotypes)]<-0
dim(genotypes)
########### prevent any averaging
num.col<-dim(genotypes)[2]
num.row<-dim(genotypes)[1]
genotypes<-as.numeric(as.matrix(genotypes))
dim(genotypes)<-c(num.row,num.col)
genotypes<-t(genotypes) # samples x SNPS
colnames(genotypes)<-snp.names
rownames(genotypes)<-gsub(".GT$","",the.samples.use)
################################################################################
################################################################################
################################################################################
################################################################################
#to.unwind.name<-to.unwind[1]
#to.unwind.name<-"EVERYTHING"
# to.unwind.name<-"TOP500"
#match(net,meta.results.burden[,"gene"])
# to.unwind.name<-"SYNON_test"
# to.unwind.name<-"Pathways"
# to.unwind.name<-"ALL_significant"
# to.unwind.name<-"ALL_significant"
snpinfo.ex<-snpinfo[snpinfo[,"cluster"] %in% to.unwind,]
loci<-snpinfo[snpinfo[,"cluster"] %in% to.unwind,"Name"] # this is IDH1 not IDH1 in cluster # are the snp.names
loci<-unique(loci)
the.genes<-unique(snpinfo.ex[,"cluster"])
the.genes<-unique(snpinfo.ex[,"gene"])
the.genes<-the.genes[!(the.genes %in% clusters.wanted)]
sort(the.genes) #245 ### if used a cluster name need to do back up to (**) the.genes<-c(the.genes,"STAG2")
############repest to clean out cluster names
########### single point only
## length(loci)
## meta.results.burden[1:5,]
## loci<-meta.results.burden[1:550,"Name"]
###############
dim(genotypes)
#genotypes[1:5,1:5]
genotypes.ex<-genotypes[,loci]
dim(genotypes.ex)
genotypes.ex[is.na(genotypes.ex)]<-0
dim(genotypes.ex)
snpinfo.ex<-snpinfo[snpinfo[,"cluster"] %in% to.unwind,]
dim(snpinfo.ex)
dim(genotypes.ex)
dim(pheno)
#snpinfo.ex[1:5,]
########### single point p values
cohort.seq.ex <- skatCohort(genotypes.ex, formula, SNPInfo = snpinfo.ex, data=pheno,aggregateBy = "Name",verbose=FALSE)
## meta.results.skat.ex<-skatMeta(cohort.seq,SNPInfo = snpinfo)
meta.results.burden.ex<-burdenMeta(cohort.seq.ex,wts=1,mafRange = c(0,1),SNPInfo = snpinfo.ex,aggregateBy = "Name")
#meta.results.burden.ex[1:5,]
#pheno[1:5,]
dim(meta.results.burden.ex)
dim(genotypes.ex)
cohort.seq.test <- skatCohort(genotypes.ex, formula, SNPInfo = snpinfo.ex, data=pheno,aggregateBy = "cluster",verbose=FALSE)
meta.results.burden.test<-burdenMeta(cohort.seq.test,wts=1,mafRange = c(0,1),SNPInfo = snpinfo.ex,aggregateBy = "cluster")
#meta.results.burden.test
## meta.results.skat.ex<-skatMeta(cohort.seq,SNPInfo = snpinfo)
#meta.results.skatO.test<-skatOMeta(cohort.seq.test,burden.wts =1,SNPInfo = snpinfo.ex,aggregateBy="cluster")
#meta.results.skatO.test
figure<- match(loci,key)
#genotypes.PD<-a.indel[figure, c("LPH-001-27_PD.GT",paste(pheno.ori[pheno.ori[,"PD"],"SAMPLE"],".GT",sep="")) ]
PDs.get<-rep(FALSE,times=dim(pheno.ori)[1])
for(ipd in 1:length(PD.group)){
PDs.get<-PDs.get | pheno.ori[,PD.group[ipd]]
}
PDs<-pheno.ori[PDs.get,"SAMPLE"]
PDs<-pheno.ori[pheno.ori[,"AML-Child"] | pheno.ori[,"Asian-AML-Child"] | pheno.ori[,"Asian-AML"] | pheno.ori[,"AML-NotDiagnosis-Child"] | pheno.ori[, "Asian-AML-NotDiagnosis-Child"],"SAMPLE"]
genotypes.PD<-a.indel[figure, c(paste(PDs,".GT",sep="")) ]
genotypes.PD<-t(genotypes.PD)
genotypes.PD[genotypes.PD=="NA"]<-NA
genotypes.PD[genotypes.PD=="0/0"]<-0
genotypes.PD[genotypes.PD=="0/1"]<-1
genotypes.PD[genotypes.PD=="1/1"]<-2
rownames(genotypes.PD)<-gsub(".GT","",rownames(genotypes.PD))
dim(genotypes.ex)
dim(genotypes.PD)
options(max.print=200)
muts.in.PD<-apply(genotypes.PD,2,function(x) { paste(names(x)[x!=0 & !is.na(x)],collapse=",")})
muts.in.cases<-apply(genotypes.ex[pheno[,cancer.group],],2,function(x) { paste(names(x)[x!=0 & !is.na(x)],collapse=",")})
muts.in.controls<-apply(genotypes.ex[pheno[,control.group],],2,function(x) { paste(names(x)[x!=0 & !is.na(x)],collapse=",")})
controls<- paste(pheno[pheno[,"Control"],"SAMPLE"],".GT",sep="")
########################################################
check<-16
quality.cases<-rep("",times=length(loci))
quality.controls<-rep("",times=length(loci))
quality.PD<-rep("",times=length(loci))
depth.cases<-rep("",times=length(loci))
depth.fad.cases<-rep("",times=length(loci))
dup.cases<-rep("",times=length(loci))
depth.controls<-rep("",times=length(loci))
depth.fad.controls<-rep("",times=length(loci))
dup.controls<-rep("",times=length(loci))
depth.PD<-rep("",times=length(loci))
depth.fad.PD<-rep("",times=length(loci))
dup.PD<-rep("",times=length(loci))
a.indel.sub<-a.indel[figure,]
a.indel.stats.sub<-a.indel.stats[figure,]
########## for somatic guessing
## somatic.matrix.desc.full.sub<-somatic.matrix.desc.full[figure,]
## somatic.matrix.p.full.sub<-somatic.matrix.p.full[figure,]
somatic.cases<-rep("",times=length(loci))
somatic.PD<-rep("",times=length(loci))
somatic.p.cases<-rep("",times=length(loci))
somatic.p.PD<-rep("",times=length(loci))
pval.cases<-rep("",times=length(loci))
pval.PD<-rep("",times=length(loci))
if(exits("geno.pvalues")){
posns<-match(rownames(a.indel.sub),rownames(geno.pvalues)) # rownames(geno.pvalues)[1:100] key[good.quality][1:100]
missing<-is.na(posns)
sum(missing)
sum(!missing)
recovered.pvalues<-geno.pvalues[posns,c(pheno.ori[pheno.ori[,"cancer"],"SAMPLE"], pheno.ori[pheno.ori[,"PD"],"SAMPLE"])]
colnames(recovered.pvalues)<-paste(colnames(recovered.pvalues),"PVAL",sep=".")
recovered.pvalues[1:5,1:10]
}
# a.indel.stats.sub[1:5,1:20]
# dim(a.indel.sub)
check<-1
for(check in 1:length(loci)){
posn<-check
if(muts.in.PD[check]!=""){
#the.gt<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"GT",sep=".")
the.gq<-paste(unlist(strsplit(muts.in.PD[check],split=",")),"GQ",sep=".")
quality.PD[check]<-paste(a.indel.sub[posn,the.gq],collapse=",")
the.ad<-paste(unlist(strsplit(muts.in.PD[check],split=",")),"AD",sep=".")
depth.PD[check]<-paste(a.indel.sub[posn,the.ad],collapse=";")
the.fad<-paste(unlist(strsplit(muts.in.PD[check],split=",")),"FAD",sep=".")
depth.fad.PD[check]<-paste(a.indel.stats.sub[posn,the.fad],collapse=";")
the.dup<-paste(unlist(strsplit(muts.in.PD[check],split=",")),"DUP",sep=".")
dup.PD[check]<-paste(a.indel.stats.sub[posn,the.dup],collapse=";")
if(exits("geno.pvalues")){
the.pval<-paste(unlist(strsplit(muts.in.PD[check],split=",")),"PVAL",sep=".")
pval.PD[check]<-paste(-1*log10(as.numeric(recovered.pvalues[posn,the.pval])),collapse=";")
}
## the.ad.soma<-paste(unlist(strsplit(muts.in.PD[check],split=",")),"GT",sep=".")
## somatic.PD[check]<-paste(somatic.matrix.desc.full.sub[posn,the.ad.soma],collapse=";")
## the.ad.soma<-paste(unlist(strsplit(muts.in.PD[check],split=",")),"GT",sep=".")
## somatic.p.PD[check]<-paste(signif(somatic.matrix.p.full.sub[posn,the.ad.soma],digits=4),collapse=";")
a.indel[posn,the.gq]
## a.indel[posn,the.gt]
## a.indel[posn,the.dp]
}
if(muts.in.cases[check]!=""){
#the.gt<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"GT",sep=".")
the.gq<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"GQ",sep=".")
quality.cases[check]<-paste(a.indel.sub[posn,the.gq],collapse=",")
the.ad<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"AD",sep=".")
depth.cases[check]<-paste(a.indel.sub[posn,the.ad],collapse=";")
the.fad<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"FAD",sep=".")
depth.fad.cases[check]<-paste(a.indel.stats.sub[posn,the.fad],collapse=";")
the.dup<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"DUP",sep=".")
dup.cases[check]<-paste(a.indel.stats.sub[posn,the.dup],collapse=";")
if(exits("geno.pvalues")){
the.pval<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"PVAL",sep=".")
pval.cases[check]<-paste(-1*log10(as.numeric(recovered.pvalues[posn,the.pval])),collapse=";")
}
## the.ad.soma<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"GT",sep=".")
## somatic.cases[check]<-paste(somatic.matrix.desc.full.sub[posn,the.ad.soma],collapse=";")
## the.ad.soma<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"GT",sep=".")
## somatic.p.cases[check]<-paste(signif(somatic.matrix.p.full.sub[posn,the.ad.soma],digits=4),collapse=";")
a.indel[posn,the.gq]
## a.indel[posn,the.gt]
## a.indel[posn,the.dp]
}
if(muts.in.controls[check]!=""){
#the.gt<-paste(unlist(strsplit(muts.in.controls[check],split=",")),"GT",sep=".")
the.gq<-paste(unlist(strsplit(muts.in.controls[check],split=",")),"GQ",sep=".")
quality.controls[check]<-paste(a.indel.sub[posn,the.gq],collapse=",")
the.ad<-paste(unlist(strsplit(muts.in.controls[check],split=",")),"AD",sep=".")
depth.controls[check]<-paste(a.indel.sub[posn,the.ad],collapse=";")
the.fad<-paste(unlist(strsplit(muts.in.controls[check],split=",")),"FAD",sep=".")
depth.fad.controls[check]<-paste(a.indel.stats.sub[posn,the.fad],collapse=";")
the.dup<-paste(unlist(strsplit(muts.in.controls[check],split=",")),"DUP",sep=".")
dup.controls[check]<-paste(a.indel.stats.sub[posn,the.dup],collapse=";")
a.indel[posn,the.gq]
## a.indel[posn,the.gt]
## a.indel[posn,the.dp]
}
} # end check
##########################################################################
#figure
length(figure)
dim(meta.results.burden.ex)
length(muts.in.cases)
length(muts.in.controls)
#pass[figure]
#help[figure,]
## muts.in.cases[1:10]
## x<-muts.in.cases[6]
if("capture" %in% colnames(pheno.ori)){
capture.counts.cases<-apply(as.matrix(muts.in.cases),1,function(x){
if(x!=""){
x<-unlist(strsplit(x,split=","))
x<-pheno.ori[pheno.ori[,"SAMPLE"] %in% x,"capture"]
x<-table(x)
x<-x[x!=0]
x<-paste(paste(names(x),x,sep="="),collapse=" ;")
}else{x<-""}
x
}
)
capture.counts.PD<-apply(as.matrix(muts.in.PD),1,function(x){
if(x!=""){
x<-unlist(strsplit(x,split=","))
x<-pheno.ori[pheno.ori[,"SAMPLE"] %in% x,"capture"]
x<-table(x)
x<-x[x!=0]
x<-paste(paste(names(x),x,sep="="),collapse=" ;")
}else{x<-""}
x
}
)
}# capture present
if("Aligner" %in% colnames(pheno.ori)){
Aligner.counts.cases<-apply(as.matrix(muts.in.cases),1,function(x){
if(x!=""){
x<-unlist(strsplit(x,split=","))
x<-pheno.ori[pheno.ori[,"SAMPLE"] %in% x,"Aligner"]
x<-table(x)
x<-x[x!=0]
x<-paste(paste(names(x),x,sep="="),collapse=" ;")
}else{x<-""}
x
}
)
Aligner.counts.PD<-apply(as.matrix(muts.in.PD),1,function(x){
if(x!=""){
x<-unlist(strsplit(x,split=","))
x<-pheno.ori[pheno.ori[,"SAMPLE"] %in% x,"Aligner"]
x<-table(x)
x<-x[x!=0]
x<-paste(paste(names(x),x,sep="="),collapse=" ;")
}else{x<-""}
x
}
)
}# aligner present
if("sample.Source" %in% colnames(pheno.ori)){
source.counts.cases<-apply(as.matrix(muts.in.cases),1,function(x){
if(x!=""){
x<-unlist(strsplit(x,split=","))
x<-pheno.ori[pheno.ori[,"SAMPLE"] %in% x,"sample.Source"]
x<-table(x)
x<-x[x!=0]
x<-paste(paste(names(x),x,sep="="),collapse=" ;")
}else{x<-""}
x
}
)
source.counts.PD<-apply(as.matrix(muts.in.PD),1,function(x){
if(x!=""){
x<-unlist(strsplit(x,split=","))
x<-pheno.ori[pheno.ori[,"SAMPLE"] %in% x,"sample.Source"]
x<-table(x)
x<-x[x!=0]
x<-paste(paste(names(x),x,sep="="),collapse=" ;")
}else{x<-""}
x
}
)
} # sample.source presnt
if(sum( c("capture","Aligner","sample.Source") %in% colnames(pheno.ori))==3){
extra.lib.info.cases<-cbind(capture.counts.cases,Aligner.counts.cases,source.counts.cases)
extra.lib.info.PD<-cbind(capture.counts.PD,Aligner.counts.PD,source.counts.PD)
}else{
extra.lib.info.cases<-{}
extra.lib.info.PD<-{}
}
################################ end counting in
#toString(colnames(a.indel)[c(1:6,8,11,16,28,7,30,34,35,36,37:42,43,14,32,33)])
#colnames(a.indel)[1:60]
ann.cols<-c("chr","start","end","REF","ALT","TYPE","refGene::type","knownGene::type","ensGene::type","Gene.Names","Genes.mentioned.at.ASH","refGene::location","knownGene::location","ensGene::location","OMIM (Gene::Status::OMIM::description::disease)","Consequence.Embl","Uploaded_variation.Embl","Gene.Embl","Feature.Embl", "Protein_position.Embl", "Amino_acids.Embl" , "ensGene::type","ID::maf","FILTER")# ,"rs.id")
annotations<-a.indel[,ann.cols]
dim(annotations)
dim(help)
dim(summary.geno.extra)
dim(a.indel)
dim(poss.model)
length(quality.cases)
length(figure)
dim(meta.results.burden.ex)
gerp.scores<-a.indel[,"gerp.scores"]
#sum(meta.results.burden.ex[,"gene"]!=loci)
## colnames(a.indel)[1:50]
## key[grep("chr17",key)[1:100]]
## grep("chr17:41197708",key)
## key[grep("10088407",key)]
#out<-cbind(meta.results.burden.ex,a.indel[figure,c(1:6,16,28,7,30,34,37:42,43)],summary.geno.extra[figure,],high.missing[figure,],help[figure,])
## out<-cbind(meta.results.burden.ex,a.indel[figure,c(1:6,16,28,7,30,34,37:42,43,14,32,33)],summary.geno.extra[figure,c("GENO.AML","GENO.Control","GENO.AML.filt","GENO.Control.filt")],high.missing[figure,])
## summary.geno.extra[figure,]
## annotations[figure,]
## help[figure,]
dim(meta.results.burden.ex)
if(!exists("summary.geno.extra.ori")){summary.geno.extra.ori<-summary.geno.extra}
if(is.null(dim(summary.geno.extra.ori))){summary.geno.extra.ori<-summary.geno.extra}
## if(!exists("pass.old")){pass.old<-pass}
## if(!exists("pass.new")){pass.new<-pass}
#out<-cbind(meta.results.burden.ex,a.indel[figure,c(1:6,16,43,28,7,30,34,37:42)],summary.geno.extra[figure,c("GENO.AML","GENO.Control","GENO.AML.filt","GENO.Control.filt")],help[figure,],muts.in.cases,muts.in.controls)
a.functions<-a.indel[,c("PolyPhen.scores","SIFT.scores","PolyPhen.desc","SIFT.desc")]
posns<-match(key,filt.key)
missing<-is.na(posns)
sum(missing)
filt.sub<-filt[posns,]
if(types[itypes]=="sliding.window"){ ### att the cluster name:
posns<-match(meta.results.burden.ex[,"gene"],snpinfo.sliding[,"Name"])
missing<-is.na(posns)
sum(missing)
the.window<-snpinfo.sliding[posns,"cluster"]
meta.results.burden.ex<-cbind(the.window,meta.results.burden.ex)
}
all.GQ<-cbind(the.QG.cancer,the.QG.PD,the.QG.Controls)
colnames(all.GQ)<-paste(colnames(all.GQ),"GQ",sep=".")
if(sum( exits("test.nextera","test.trueSeq","test.bwa","test.novoalign","test.PD.nextera","test.PD.trueSeq","test.PD.bwa","test.PD.novoalign")) >0){
abundance<-cbind(test.nextera,test.trueSeq,test.bwa,test.novoalign,test.PD.nextera,test.PD.trueSeq,test.PD.bwa,test.PD.novoalign)
abundance[1:5,]
colnames(abundance)<-paste(colnames(abundance),"Pval",sep=".")
colnames(abundance)<-gsub("^test","EnRiched",colnames(abundance))
}else{
abundance<-{}
}
#truth.table<-cbind(GQ.cancer.pass,GQ.Control.pass,novoalign.bias,bwa.bias,trueSeq.bias,nextera.bias)
truth.table<-cbind(GQ.cancer.pass,GQ.Control.pass)
enum<-1:dim(meta.results.burden.ex)[1]
## out<-cbind(enum,meta.results.burden.ex,a.functions[figure,],gerp.scores[figure],annotations[figure,],maf.lt.all[figure,],is.benign.missense[figure],annotations[figure,],summary.geno.extra[figure,colnames(summary.geno.extra)[grep("^GENO",colnames(summary.geno.extra))]], filt.sub[figure,c("FILTER_SUMMARY","SUMMARY_CALLED","SUMMARY_NOT_CALLED")],pass[figure],pass.all.cohorts[figure],test.nextera[figure],test.trueSeq[figure],test.bwa[figure],test.novoalign[figure],test.PD.nextera[figure],test.PD.trueSeq[figure],test.PD.bwa[figure],test.PD.novoalign[figure],alt.counts.thresh.4.rare.in.Controls[figure],all.GQ[figure,],abundance[figure,],truth.table[figure,],help[figure,],high.missing.table[figure,],poss.model[figure,],poss.model.lib[figure,],muts.in.cases,somatic.cases,somatic.p.cases,quality.cases,depth.fad.cases,depth.cases,dup.cases,extra.lib.info.cases,muts.in.PD,somatic.PD,somatic.p.PD,quality.PD,depth.fad.PD,depth.PD,extra.lib.info.PD,muts.in.controls,quality.controls,depth.fad.controls,depth.controls,dup.controls,summary.geno.extra.ori[figure,colnames(summary.geno.extra.ori)[grep("^GENO",colnames(summary.geno.extra.ori))]]) ### use for out
out<-cbind(enum,meta.results.burden.ex,a.functions[figure,],gerp.scores[figure],annotations[figure,],maf.lt.all[figure,],is.benign.missense[figure],annotations[figure,],summary.geno.extra[figure,colnames(summary.geno.extra)[grep("^GENO",colnames(summary.geno.extra))]], filt.sub[figure,c("FILTER_SUMMARY","SUMMARY_CALLED","SUMMARY_NOT_CALLED")],pass[figure],pass.all.cohorts[figure],test.nextera[figure],test.trueSeq[figure],test.bwa[figure],test.novoalign[figure],test.PD.nextera[figure],test.PD.trueSeq[figure],test.PD.bwa[figure],test.PD.novoalign[figure],alt.counts.thresh.4.rare.in.Controls[figure],all.GQ[figure,],abundance[figure,],truth.table[figure,],help[figure,],high.missing.table[figure,],poss.model[figure,],poss.model.lib[figure,],muts.in.cases,somatic.cases,somatic.p.cases,quality.cases,depth.fad.cases,depth.cases,dup.cases,extra.lib.info.cases,muts.in.PD,somatic.PD,somatic.p.PD,quality.PD,depth.fad.PD,depth.PD,extra.lib.info.PD,muts.in.controls,quality.controls,depth.fad.controls,depth.controls,dup.controls,summary.geno.extra.ori[figure,colnames(summary.geno.extra.ori)[grep("^GENO",colnames(summary.geno.extra.ori))]]) ### use for out
#all.data[figure,]
#out<-cbind(meta.results.burden.ex,annotations[figure,],muts.in.cases,muts.in.controls)
dim(out)
#out[,1:13]
# help["chr7:150700484:150700484:G:A:snp",]
## table(out[,"refGene::location"])
## table(out[,"Consequence.Embl"]) # to.unwind.name<-"IDH"
getwd()
setwd(analysis.dir)
paste(paste(to.unwind,collapse="."))
paste(to.unwind.name,collapse=".")
paste(paste(to.unwind.name,collapse="."),p,"GENOTYPE.conponents.","SkatO","clusters",snap.file,"txt",sep=".")
order.by<-order(out[,"p"],decreasing=FALSE)
#enum<-1:dim(meta.results.burden.ex)[1]
out[order.by,][1:10,1:10]
setwd(analysis.dir)
write.table(out[order.by,],file=paste(paste(to.unwind.name,collapse="."),p,"GENOTYPE.conponents.",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
getwd()
| /genotype.components.4.skatmeta.LEO.r | no_license | achalneupane/rcodes | R | false | false | 19,696 | r |
########
cancer.group<-"cancer" # columns names in pheno.ori used to get samples
control.group<-"Control" # columns names in pheno.ori used to get samples
PD.group<-c("PD") # columns names in pheno.ori used to get samples boolean are stored there
to.unwind.name<-"TOP.550"
if(!exists("the.GQ.cancer")){the.GQ.cancer<-the.GQ.AML}
#the.QG.AML,the.QG.PD,the.QG.Controls ## the.GQ.Control
the.top<-1:dim(meta.results.burden)[1] # everything
the.top<-1:550 # top 550
to.unwind<-c(meta.results.burden[the.top,"gene"])# ,meta.results.skatO[1:the.top,"gene"])
################################################################################
################################################################################
################################################################################
################################################################################
genotypes<-a.indel[pass,the.samples.use] ## ordered correctly for phenotypes
snp.names<-key[pass] ## GEFOS ony name with start
#### snpinfo now A different size than a.indel since added pathways!!! snpinfo[snpinfo[,"gene"]=="KNSTRN",]
snpinfo<-snpinfo.ori[snpinfo.ori[,"Name"] %in% snp.names,]
if(!exists("gene.weights")){
gene.weights.subset<-1
}else{
gene.weights.subset<-gene.weights[snpinfo.ori[,"Name"] %in% snp.names] # weight in same order as snpinfo.ori
}
snpinfo<-cbind(snpinfo,gene.weights.subset)
#snpinfo[1:5,]
sum(is.na(as.numeric(snpinfo[,"gene.weights.subset"])))
###################################################/media/scratch/software/matlab/network.lic
if( sum(!(snp.names %in% snpinfo.ori[,"Name"]))>0){print("WARINING snp.names not in snpinfo- unusual!")}
dim(snpinfo)
length(snp.names)
dim(genotypes)
dim(genotypes)
print("start QC")
genotypes[genotypes=="NA"]<-NA
genotypes[genotypes=="0/0"]<-0
genotypes[genotypes=="0/1"]<-1
genotypes[genotypes=="1/1"]<-2
########### prevent any averaging
dim(genotypes)
genotypes[is.na(genotypes)]<-0
dim(genotypes)
########### prevent any averaging
num.col<-dim(genotypes)[2]
num.row<-dim(genotypes)[1]
genotypes<-as.numeric(as.matrix(genotypes))
dim(genotypes)<-c(num.row,num.col)
genotypes<-t(genotypes) # samples x SNPS
colnames(genotypes)<-snp.names
rownames(genotypes)<-gsub(".GT$","",the.samples.use)
################################################################################
################################################################################
################################################################################
################################################################################
#to.unwind.name<-to.unwind[1]
#to.unwind.name<-"EVERYTHING"
# to.unwind.name<-"TOP500"
#match(net,meta.results.burden[,"gene"])
# to.unwind.name<-"SYNON_test"
# to.unwind.name<-"Pathways"
# to.unwind.name<-"ALL_significant"
# to.unwind.name<-"ALL_significant"
snpinfo.ex<-snpinfo[snpinfo[,"cluster"] %in% to.unwind,]
loci<-snpinfo[snpinfo[,"cluster"] %in% to.unwind,"Name"] # this is IDH1 not IDH1 in cluster # are the snp.names
loci<-unique(loci)
the.genes<-unique(snpinfo.ex[,"cluster"])
the.genes<-unique(snpinfo.ex[,"gene"])
the.genes<-the.genes[!(the.genes %in% clusters.wanted)]
sort(the.genes) #245 ### if used a cluster name need to do back up to (**) the.genes<-c(the.genes,"STAG2")
############repest to clean out cluster names
########### single point only
## length(loci)
## meta.results.burden[1:5,]
## loci<-meta.results.burden[1:550,"Name"]
###############
dim(genotypes)
#genotypes[1:5,1:5]
genotypes.ex<-genotypes[,loci]
dim(genotypes.ex)
genotypes.ex[is.na(genotypes.ex)]<-0
dim(genotypes.ex)
snpinfo.ex<-snpinfo[snpinfo[,"cluster"] %in% to.unwind,]
dim(snpinfo.ex)
dim(genotypes.ex)
dim(pheno)
#snpinfo.ex[1:5,]
########### single point p values
cohort.seq.ex <- skatCohort(genotypes.ex, formula, SNPInfo = snpinfo.ex, data=pheno,aggregateBy = "Name",verbose=FALSE)
## meta.results.skat.ex<-skatMeta(cohort.seq,SNPInfo = snpinfo)
meta.results.burden.ex<-burdenMeta(cohort.seq.ex,wts=1,mafRange = c(0,1),SNPInfo = snpinfo.ex,aggregateBy = "Name")
#meta.results.burden.ex[1:5,]
#pheno[1:5,]
dim(meta.results.burden.ex)
dim(genotypes.ex)
cohort.seq.test <- skatCohort(genotypes.ex, formula, SNPInfo = snpinfo.ex, data=pheno,aggregateBy = "cluster",verbose=FALSE)
meta.results.burden.test<-burdenMeta(cohort.seq.test,wts=1,mafRange = c(0,1),SNPInfo = snpinfo.ex,aggregateBy = "cluster")
#meta.results.burden.test
## meta.results.skat.ex<-skatMeta(cohort.seq,SNPInfo = snpinfo)
#meta.results.skatO.test<-skatOMeta(cohort.seq.test,burden.wts =1,SNPInfo = snpinfo.ex,aggregateBy="cluster")
#meta.results.skatO.test
figure<- match(loci,key)
#genotypes.PD<-a.indel[figure, c("LPH-001-27_PD.GT",paste(pheno.ori[pheno.ori[,"PD"],"SAMPLE"],".GT",sep="")) ]
PDs.get<-rep(FALSE,times=dim(pheno.ori)[1])
for(ipd in 1:length(PD.group)){
PDs.get<-PDs.get | pheno.ori[,PD.group[ipd]]
}
PDs<-pheno.ori[PDs.get,"SAMPLE"]
PDs<-pheno.ori[pheno.ori[,"AML-Child"] | pheno.ori[,"Asian-AML-Child"] | pheno.ori[,"Asian-AML"] | pheno.ori[,"AML-NotDiagnosis-Child"] | pheno.ori[, "Asian-AML-NotDiagnosis-Child"],"SAMPLE"]
genotypes.PD<-a.indel[figure, c(paste(PDs,".GT",sep="")) ]
genotypes.PD<-t(genotypes.PD)
genotypes.PD[genotypes.PD=="NA"]<-NA
genotypes.PD[genotypes.PD=="0/0"]<-0
genotypes.PD[genotypes.PD=="0/1"]<-1
genotypes.PD[genotypes.PD=="1/1"]<-2
rownames(genotypes.PD)<-gsub(".GT","",rownames(genotypes.PD))
dim(genotypes.ex)
dim(genotypes.PD)
options(max.print=200)
muts.in.PD<-apply(genotypes.PD,2,function(x) { paste(names(x)[x!=0 & !is.na(x)],collapse=",")})
muts.in.cases<-apply(genotypes.ex[pheno[,cancer.group],],2,function(x) { paste(names(x)[x!=0 & !is.na(x)],collapse=",")})
muts.in.controls<-apply(genotypes.ex[pheno[,control.group],],2,function(x) { paste(names(x)[x!=0 & !is.na(x)],collapse=",")})
controls<- paste(pheno[pheno[,"Control"],"SAMPLE"],".GT",sep="")
########################################################
check<-16
quality.cases<-rep("",times=length(loci))
quality.controls<-rep("",times=length(loci))
quality.PD<-rep("",times=length(loci))
depth.cases<-rep("",times=length(loci))
depth.fad.cases<-rep("",times=length(loci))
dup.cases<-rep("",times=length(loci))
depth.controls<-rep("",times=length(loci))
depth.fad.controls<-rep("",times=length(loci))
dup.controls<-rep("",times=length(loci))
depth.PD<-rep("",times=length(loci))
depth.fad.PD<-rep("",times=length(loci))
dup.PD<-rep("",times=length(loci))
a.indel.sub<-a.indel[figure,]
a.indel.stats.sub<-a.indel.stats[figure,]
########## for somatic guessing
## somatic.matrix.desc.full.sub<-somatic.matrix.desc.full[figure,]
## somatic.matrix.p.full.sub<-somatic.matrix.p.full[figure,]
somatic.cases<-rep("",times=length(loci))
somatic.PD<-rep("",times=length(loci))
somatic.p.cases<-rep("",times=length(loci))
somatic.p.PD<-rep("",times=length(loci))
pval.cases<-rep("",times=length(loci))
pval.PD<-rep("",times=length(loci))
if(exits("geno.pvalues")){
posns<-match(rownames(a.indel.sub),rownames(geno.pvalues)) # rownames(geno.pvalues)[1:100] key[good.quality][1:100]
missing<-is.na(posns)
sum(missing)
sum(!missing)
recovered.pvalues<-geno.pvalues[posns,c(pheno.ori[pheno.ori[,"cancer"],"SAMPLE"], pheno.ori[pheno.ori[,"PD"],"SAMPLE"])]
colnames(recovered.pvalues)<-paste(colnames(recovered.pvalues),"PVAL",sep=".")
recovered.pvalues[1:5,1:10]
}
# a.indel.stats.sub[1:5,1:20]
# dim(a.indel.sub)
check<-1
for(check in 1:length(loci)){
posn<-check
if(muts.in.PD[check]!=""){
#the.gt<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"GT",sep=".")
the.gq<-paste(unlist(strsplit(muts.in.PD[check],split=",")),"GQ",sep=".")
quality.PD[check]<-paste(a.indel.sub[posn,the.gq],collapse=",")
the.ad<-paste(unlist(strsplit(muts.in.PD[check],split=",")),"AD",sep=".")
depth.PD[check]<-paste(a.indel.sub[posn,the.ad],collapse=";")
the.fad<-paste(unlist(strsplit(muts.in.PD[check],split=",")),"FAD",sep=".")
depth.fad.PD[check]<-paste(a.indel.stats.sub[posn,the.fad],collapse=";")
the.dup<-paste(unlist(strsplit(muts.in.PD[check],split=",")),"DUP",sep=".")
dup.PD[check]<-paste(a.indel.stats.sub[posn,the.dup],collapse=";")
if(exits("geno.pvalues")){
the.pval<-paste(unlist(strsplit(muts.in.PD[check],split=",")),"PVAL",sep=".")
pval.PD[check]<-paste(-1*log10(as.numeric(recovered.pvalues[posn,the.pval])),collapse=";")
}
## the.ad.soma<-paste(unlist(strsplit(muts.in.PD[check],split=",")),"GT",sep=".")
## somatic.PD[check]<-paste(somatic.matrix.desc.full.sub[posn,the.ad.soma],collapse=";")
## the.ad.soma<-paste(unlist(strsplit(muts.in.PD[check],split=",")),"GT",sep=".")
## somatic.p.PD[check]<-paste(signif(somatic.matrix.p.full.sub[posn,the.ad.soma],digits=4),collapse=";")
a.indel[posn,the.gq]
## a.indel[posn,the.gt]
## a.indel[posn,the.dp]
}
if(muts.in.cases[check]!=""){
#the.gt<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"GT",sep=".")
the.gq<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"GQ",sep=".")
quality.cases[check]<-paste(a.indel.sub[posn,the.gq],collapse=",")
the.ad<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"AD",sep=".")
depth.cases[check]<-paste(a.indel.sub[posn,the.ad],collapse=";")
the.fad<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"FAD",sep=".")
depth.fad.cases[check]<-paste(a.indel.stats.sub[posn,the.fad],collapse=";")
the.dup<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"DUP",sep=".")
dup.cases[check]<-paste(a.indel.stats.sub[posn,the.dup],collapse=";")
if(exits("geno.pvalues")){
the.pval<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"PVAL",sep=".")
pval.cases[check]<-paste(-1*log10(as.numeric(recovered.pvalues[posn,the.pval])),collapse=";")
}
## the.ad.soma<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"GT",sep=".")
## somatic.cases[check]<-paste(somatic.matrix.desc.full.sub[posn,the.ad.soma],collapse=";")
## the.ad.soma<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"GT",sep=".")
## somatic.p.cases[check]<-paste(signif(somatic.matrix.p.full.sub[posn,the.ad.soma],digits=4),collapse=";")
a.indel[posn,the.gq]
## a.indel[posn,the.gt]
## a.indel[posn,the.dp]
}
if(muts.in.controls[check]!=""){
#the.gt<-paste(unlist(strsplit(muts.in.controls[check],split=",")),"GT",sep=".")
the.gq<-paste(unlist(strsplit(muts.in.controls[check],split=",")),"GQ",sep=".")
quality.controls[check]<-paste(a.indel.sub[posn,the.gq],collapse=",")
the.ad<-paste(unlist(strsplit(muts.in.controls[check],split=",")),"AD",sep=".")
depth.controls[check]<-paste(a.indel.sub[posn,the.ad],collapse=";")
the.fad<-paste(unlist(strsplit(muts.in.controls[check],split=",")),"FAD",sep=".")
depth.fad.controls[check]<-paste(a.indel.stats.sub[posn,the.fad],collapse=";")
the.dup<-paste(unlist(strsplit(muts.in.controls[check],split=",")),"DUP",sep=".")
dup.controls[check]<-paste(a.indel.stats.sub[posn,the.dup],collapse=";")
a.indel[posn,the.gq]
## a.indel[posn,the.gt]
## a.indel[posn,the.dp]
}
} # end check
##########################################################################
#figure
length(figure)
dim(meta.results.burden.ex)
length(muts.in.cases)
length(muts.in.controls)
#pass[figure]
#help[figure,]
## muts.in.cases[1:10]
## x<-muts.in.cases[6]
if("capture" %in% colnames(pheno.ori)){
capture.counts.cases<-apply(as.matrix(muts.in.cases),1,function(x){
if(x!=""){
x<-unlist(strsplit(x,split=","))
x<-pheno.ori[pheno.ori[,"SAMPLE"] %in% x,"capture"]
x<-table(x)
x<-x[x!=0]
x<-paste(paste(names(x),x,sep="="),collapse=" ;")
}else{x<-""}
x
}
)
capture.counts.PD<-apply(as.matrix(muts.in.PD),1,function(x){
if(x!=""){
x<-unlist(strsplit(x,split=","))
x<-pheno.ori[pheno.ori[,"SAMPLE"] %in% x,"capture"]
x<-table(x)
x<-x[x!=0]
x<-paste(paste(names(x),x,sep="="),collapse=" ;")
}else{x<-""}
x
}
)
}# capture present
if("Aligner" %in% colnames(pheno.ori)){
Aligner.counts.cases<-apply(as.matrix(muts.in.cases),1,function(x){
if(x!=""){
x<-unlist(strsplit(x,split=","))
x<-pheno.ori[pheno.ori[,"SAMPLE"] %in% x,"Aligner"]
x<-table(x)
x<-x[x!=0]
x<-paste(paste(names(x),x,sep="="),collapse=" ;")
}else{x<-""}
x
}
)
Aligner.counts.PD<-apply(as.matrix(muts.in.PD),1,function(x){
if(x!=""){
x<-unlist(strsplit(x,split=","))
x<-pheno.ori[pheno.ori[,"SAMPLE"] %in% x,"Aligner"]
x<-table(x)
x<-x[x!=0]
x<-paste(paste(names(x),x,sep="="),collapse=" ;")
}else{x<-""}
x
}
)
}# aligner present
if("sample.Source" %in% colnames(pheno.ori)){
source.counts.cases<-apply(as.matrix(muts.in.cases),1,function(x){
if(x!=""){
x<-unlist(strsplit(x,split=","))
x<-pheno.ori[pheno.ori[,"SAMPLE"] %in% x,"sample.Source"]
x<-table(x)
x<-x[x!=0]
x<-paste(paste(names(x),x,sep="="),collapse=" ;")
}else{x<-""}
x
}
)
source.counts.PD<-apply(as.matrix(muts.in.PD),1,function(x){
if(x!=""){
x<-unlist(strsplit(x,split=","))
x<-pheno.ori[pheno.ori[,"SAMPLE"] %in% x,"sample.Source"]
x<-table(x)
x<-x[x!=0]
x<-paste(paste(names(x),x,sep="="),collapse=" ;")
}else{x<-""}
x
}
)
} # sample.source presnt
if(sum( c("capture","Aligner","sample.Source") %in% colnames(pheno.ori))==3){
extra.lib.info.cases<-cbind(capture.counts.cases,Aligner.counts.cases,source.counts.cases)
extra.lib.info.PD<-cbind(capture.counts.PD,Aligner.counts.PD,source.counts.PD)
}else{
extra.lib.info.cases<-{}
extra.lib.info.PD<-{}
}
################################ end counting in
#toString(colnames(a.indel)[c(1:6,8,11,16,28,7,30,34,35,36,37:42,43,14,32,33)])
#colnames(a.indel)[1:60]
ann.cols<-c("chr","start","end","REF","ALT","TYPE","refGene::type","knownGene::type","ensGene::type","Gene.Names","Genes.mentioned.at.ASH","refGene::location","knownGene::location","ensGene::location","OMIM (Gene::Status::OMIM::description::disease)","Consequence.Embl","Uploaded_variation.Embl","Gene.Embl","Feature.Embl", "Protein_position.Embl", "Amino_acids.Embl" , "ensGene::type","ID::maf","FILTER")# ,"rs.id")
annotations<-a.indel[,ann.cols]
dim(annotations)
dim(help)
dim(summary.geno.extra)
dim(a.indel)
dim(poss.model)
length(quality.cases)
length(figure)
dim(meta.results.burden.ex)
gerp.scores<-a.indel[,"gerp.scores"]
#sum(meta.results.burden.ex[,"gene"]!=loci)
## colnames(a.indel)[1:50]
## key[grep("chr17",key)[1:100]]
## grep("chr17:41197708",key)
## key[grep("10088407",key)]
#out<-cbind(meta.results.burden.ex,a.indel[figure,c(1:6,16,28,7,30,34,37:42,43)],summary.geno.extra[figure,],high.missing[figure,],help[figure,])
## out<-cbind(meta.results.burden.ex,a.indel[figure,c(1:6,16,28,7,30,34,37:42,43,14,32,33)],summary.geno.extra[figure,c("GENO.AML","GENO.Control","GENO.AML.filt","GENO.Control.filt")],high.missing[figure,])
## summary.geno.extra[figure,]
## annotations[figure,]
## help[figure,]
dim(meta.results.burden.ex)
if(!exists("summary.geno.extra.ori")){summary.geno.extra.ori<-summary.geno.extra}
if(is.null(dim(summary.geno.extra.ori))){summary.geno.extra.ori<-summary.geno.extra}
## if(!exists("pass.old")){pass.old<-pass}
## if(!exists("pass.new")){pass.new<-pass}
#out<-cbind(meta.results.burden.ex,a.indel[figure,c(1:6,16,43,28,7,30,34,37:42)],summary.geno.extra[figure,c("GENO.AML","GENO.Control","GENO.AML.filt","GENO.Control.filt")],help[figure,],muts.in.cases,muts.in.controls)
a.functions<-a.indel[,c("PolyPhen.scores","SIFT.scores","PolyPhen.desc","SIFT.desc")]
posns<-match(key,filt.key)
missing<-is.na(posns)
sum(missing)
filt.sub<-filt[posns,]
if(types[itypes]=="sliding.window"){ ### att the cluster name:
posns<-match(meta.results.burden.ex[,"gene"],snpinfo.sliding[,"Name"])
missing<-is.na(posns)
sum(missing)
the.window<-snpinfo.sliding[posns,"cluster"]
meta.results.burden.ex<-cbind(the.window,meta.results.burden.ex)
}
all.GQ<-cbind(the.QG.cancer,the.QG.PD,the.QG.Controls)
colnames(all.GQ)<-paste(colnames(all.GQ),"GQ",sep=".")
if(sum( exits("test.nextera","test.trueSeq","test.bwa","test.novoalign","test.PD.nextera","test.PD.trueSeq","test.PD.bwa","test.PD.novoalign")) >0){
abundance<-cbind(test.nextera,test.trueSeq,test.bwa,test.novoalign,test.PD.nextera,test.PD.trueSeq,test.PD.bwa,test.PD.novoalign)
abundance[1:5,]
colnames(abundance)<-paste(colnames(abundance),"Pval",sep=".")
colnames(abundance)<-gsub("^test","EnRiched",colnames(abundance))
}else{
abundance<-{}
}
#truth.table<-cbind(GQ.cancer.pass,GQ.Control.pass,novoalign.bias,bwa.bias,trueSeq.bias,nextera.bias)
truth.table<-cbind(GQ.cancer.pass,GQ.Control.pass)
enum<-1:dim(meta.results.burden.ex)[1]
## out<-cbind(enum,meta.results.burden.ex,a.functions[figure,],gerp.scores[figure],annotations[figure,],maf.lt.all[figure,],is.benign.missense[figure],annotations[figure,],summary.geno.extra[figure,colnames(summary.geno.extra)[grep("^GENO",colnames(summary.geno.extra))]], filt.sub[figure,c("FILTER_SUMMARY","SUMMARY_CALLED","SUMMARY_NOT_CALLED")],pass[figure],pass.all.cohorts[figure],test.nextera[figure],test.trueSeq[figure],test.bwa[figure],test.novoalign[figure],test.PD.nextera[figure],test.PD.trueSeq[figure],test.PD.bwa[figure],test.PD.novoalign[figure],alt.counts.thresh.4.rare.in.Controls[figure],all.GQ[figure,],abundance[figure,],truth.table[figure,],help[figure,],high.missing.table[figure,],poss.model[figure,],poss.model.lib[figure,],muts.in.cases,somatic.cases,somatic.p.cases,quality.cases,depth.fad.cases,depth.cases,dup.cases,extra.lib.info.cases,muts.in.PD,somatic.PD,somatic.p.PD,quality.PD,depth.fad.PD,depth.PD,extra.lib.info.PD,muts.in.controls,quality.controls,depth.fad.controls,depth.controls,dup.controls,summary.geno.extra.ori[figure,colnames(summary.geno.extra.ori)[grep("^GENO",colnames(summary.geno.extra.ori))]]) ### use for out
out<-cbind(enum,meta.results.burden.ex,a.functions[figure,],gerp.scores[figure],annotations[figure,],maf.lt.all[figure,],is.benign.missense[figure],annotations[figure,],summary.geno.extra[figure,colnames(summary.geno.extra)[grep("^GENO",colnames(summary.geno.extra))]], filt.sub[figure,c("FILTER_SUMMARY","SUMMARY_CALLED","SUMMARY_NOT_CALLED")],pass[figure],pass.all.cohorts[figure],test.nextera[figure],test.trueSeq[figure],test.bwa[figure],test.novoalign[figure],test.PD.nextera[figure],test.PD.trueSeq[figure],test.PD.bwa[figure],test.PD.novoalign[figure],alt.counts.thresh.4.rare.in.Controls[figure],all.GQ[figure,],abundance[figure,],truth.table[figure,],help[figure,],high.missing.table[figure,],poss.model[figure,],poss.model.lib[figure,],muts.in.cases,somatic.cases,somatic.p.cases,quality.cases,depth.fad.cases,depth.cases,dup.cases,extra.lib.info.cases,muts.in.PD,somatic.PD,somatic.p.PD,quality.PD,depth.fad.PD,depth.PD,extra.lib.info.PD,muts.in.controls,quality.controls,depth.fad.controls,depth.controls,dup.controls,summary.geno.extra.ori[figure,colnames(summary.geno.extra.ori)[grep("^GENO",colnames(summary.geno.extra.ori))]]) ### use for out
#all.data[figure,]
#out<-cbind(meta.results.burden.ex,annotations[figure,],muts.in.cases,muts.in.controls)
dim(out)
#out[,1:13]
# help["chr7:150700484:150700484:G:A:snp",]
## table(out[,"refGene::location"])
## table(out[,"Consequence.Embl"]) # to.unwind.name<-"IDH"
getwd()
setwd(analysis.dir)
paste(paste(to.unwind,collapse="."))
paste(to.unwind.name,collapse=".")
paste(paste(to.unwind.name,collapse="."),p,"GENOTYPE.conponents.","SkatO","clusters",snap.file,"txt",sep=".")
order.by<-order(out[,"p"],decreasing=FALSE)
#enum<-1:dim(meta.results.burden.ex)[1]
out[order.by,][1:10,1:10]
setwd(analysis.dir)
write.table(out[order.by,],file=paste(paste(to.unwind.name,collapse="."),p,"GENOTYPE.conponents.",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
getwd()
|
## This script explores the National Emissions Inventory data to answer:
# Across the United States, how have emissions from coal combustion-related
# sources changed from 1999-2008?
#### load data
# variable initialisation
zipfile = "./exdata-data-NEI_data.zip"
nei_file = "./summarySCC_PM25.rds"
scc_file = "./Source_Classification_Code.rds"
# set working directory
setwd("/Users/inesv/Coursera/4-Exploratory/w3")
# unzip if required
if(!(file.exists(nei_file) && file.exists(scc_file))) {
unzip (zipfile, exdir = "./", junkpaths = TRUE)
}
# load data, if not done yet
if(!(exists("NEI") && exists("SCC"))){
NEI <- readRDS(nei_file)
SCC <- readRDS(scc_file)
}
#### Prepare data
# filter coal + combustion related codes
SCC$text <- paste(SCC$SCC.Level.One, SCC$SCC.Level.Two,
SCC$SCC.Level.Three, SCC$SCC.Level.Four,
sep = " ")
scc_index1 <- grepl("coal", SCC$text, ignore.case = T)
scc_index2 <- grepl("comb", SCC$text, ignore.case = T)
scc_coal <- SCC[scc_index1 & scc_index2,]$SCC
# get levels for these particles
NEI_coal <- NEI[NEI$SCC %in% scc_coal, ]
data_coal <- aggregate (Emissions ~ year, data = NEI_coal, sum)
# Plot total emissions (tons) per year
plot_name <- "plot4.png"
png(filename = plot_name, width = 480, height = 480, units = "px", pointsize = 12)
library(ggplot2)
a <- ggplot(data = data_coal, aes(x = year, y = Emissions)) +
geom_line() +
xlab("Year") + ylab("Emissions (Tons)") +
ggtitle("Evolution of Emissions of coal combustion")
print(a)
dev.off();
| /w3/plot4.R | no_license | inesvidal/4-Exploratory | R | false | false | 1,559 | r | ## This script explores the National Emissions Inventory data to answer:
# Across the United States, how have emissions from coal combustion-related
# sources changed from 1999-2008?
#### load data
# variable initialisation
zipfile = "./exdata-data-NEI_data.zip"
nei_file = "./summarySCC_PM25.rds"
scc_file = "./Source_Classification_Code.rds"
# set working directory
setwd("/Users/inesv/Coursera/4-Exploratory/w3")
# unzip if required
if(!(file.exists(nei_file) && file.exists(scc_file))) {
unzip (zipfile, exdir = "./", junkpaths = TRUE)
}
# load data, if not done yet
if(!(exists("NEI") && exists("SCC"))){
NEI <- readRDS(nei_file)
SCC <- readRDS(scc_file)
}
#### Prepare data
# filter coal + combustion related codes
SCC$text <- paste(SCC$SCC.Level.One, SCC$SCC.Level.Two,
SCC$SCC.Level.Three, SCC$SCC.Level.Four,
sep = " ")
scc_index1 <- grepl("coal", SCC$text, ignore.case = T)
scc_index2 <- grepl("comb", SCC$text, ignore.case = T)
scc_coal <- SCC[scc_index1 & scc_index2,]$SCC
# get levels for these particles
NEI_coal <- NEI[NEI$SCC %in% scc_coal, ]
data_coal <- aggregate (Emissions ~ year, data = NEI_coal, sum)
# Plot total emissions (tons) per year
plot_name <- "plot4.png"
png(filename = plot_name, width = 480, height = 480, units = "px", pointsize = 12)
library(ggplot2)
a <- ggplot(data = data_coal, aes(x = year, y = Emissions)) +
geom_line() +
xlab("Year") + ylab("Emissions (Tons)") +
ggtitle("Evolution of Emissions of coal combustion")
print(a)
dev.off();
|
################################################################################
# Associate dbGaP analysis and study with Pubmed articles
AddDbGapPubMed<-function(path=paste(Sys.getenv("RCHIVE_HOME"), 'data/gwas/public/dbgap', sep='/'), update.all.pubmed=FALSE) {
# path Path to the existing dbGaP data previously parsed
# update.all.pubmed If TRUE, update all pubmed-related information; otherwise, using previously saved information when it's available
###################################################################################################################
###################################################################################################################
# Now,
std<-readRDS(file=paste(path, 'r/study.rds', sep='/'));
ana<-readRDS(file=paste(path, 'r/analysis.rds', sep='/'));
id2std<-readRDS(file=paste(path, 'r/study_by_id.rds', sep='/'));
id2ana<-readRDS(file=paste(path, 'r/analysis_by_id.rds', sep='/'));
library(RCurl);
library(rchive);
##################################################################
# get one page of Pubmed IDs based on given URL
getPMID<-function(urls) {
url<-unlist(urls, use.names=FALSE);
id<-rep(names(urls), sapply(urls, length));
pgs<-GetSetURL(url, clean.tags=FALSE, select.lines='db=pubmed'); # Download from dbGaP CGI page
pmid<-lapply(pgs, function(ln) sapply(strsplit(ln, '[=\\[]'), function(ln) ln[length(ln)-1])); # parse page to get PubMed IDs
# Group PubMed IDs
mp<-split(pmid, id);
mp<-lapply(mp, function(x) {
ids<-unlist(x, use.names=FALSE);
if (length(ids) == 0) '-1' else sort(unique(ids));
});
mp[names(urls)];
}
##################################################################
########################
# Analysis to pubmed
########################
###############################################
ana<-readRDS(file=paste(path, 'r/analysis.rds', sep='/'));
url0<-as.vector(ana$URL); # URL to each analysis
names(url0)<-rownames(ana);
# Get URL to CGI script that reports pubmed linked to each analysis
if (!update.all.pubmed & file.exists(paste(path, 'r/url_analysis2pubmed.rds', sep='/'))) {
url1<-readRDS(file=paste(path, 'r/url_analysis2pubmed.rds', sep='/'));
url0<-url0[!(names(url0) %in% names(url1)) | is.na(url0) | url0==''];
} else {
url1<-c();
}
if (length(url0) > 0) {
# Get URL of new pubmed
url.new<-paste("http://www.ncbi.nlm.nih.gov/projects/gap/cgi-bin/GetAnalysisReference.cgi?pha=",
as.numeric(sub('pha', '', names(url0))), '&version=1&page_number=1', sep='');
names(url.new)<-names(url0);
url1<-c(url1, url.new);
}
###
saveRDS(url1[rownames(ana)], file=paste(path, 'r/url_analysis2pubmed.rds', sep='/'));
###
###############################################
# Get Pubmed mapped to the analysis from a cgi script of dbGap
url1<-readRDS(file=paste(path, 'r/url_analysis2pubmed.rds', sep='/'));
if (!update.all.pubmed & file.exists(paste(path, 'r/analysis2pubmed.rds', sep='/'))) {
pmid<-readRDS(file=paste(path, 'r/analysis2pubmed.rds', sep='/'));
url1<-url1[!(names(url1) %in% names(pmid)) & !is.na(url1) & url1!=''];
} else {
pmid<-list();
}
if (length(url1) > 0) {
pmid0<-getPMID(url1);
pmid<-c(pmid, pmid0);
}
pmid<-pmid[rownames(ana)];
###############################################
id2ana[1:length(id2ana)]<-lapply(names(id2ana), function(nm) {
if (!is.null(pmid[[nm]])) {
if (is.null(id2ana[[nm]][['PubMed']])) {
append(id2ana[[nm]], list(PubMed=pmid[[nm]]), after=which(names(id2ana[[nm]])=='URL'));
} else id2ana[[nm]][['PubMed']]<-pmid[[nm]];
}
});
###
saveRDS(id2ana,file=paste(path, 'r/analysis_by_id.rds', sep='/'));
saveRDS(lapply(id2ana, function(x) x$PubMed), file=paste(path, 'r/analysis2pubmed.rds', sep='/'));
###
##################################################################
##################################################################
########################
# Study to pubmed
########################
###############################################
std<-readRDS(file=paste(path, 'r/study.rds', sep='/'));
url0<-as.vector(std$URL);
names(url0)<-rownames(std);
# Get URL to CGI script that reports pubmed linked to each study
if (!update.all.pubmed & file.exists(paste(path, 'r/url_study2pubmed.rds', sep='/'))) {
url1<-readRDS(file=paste(path, 'r/url_study2pubmed.rds', sep='/'));
url0<-url0[!(names(url0) %in% names(url1)) | is.na(url0) | url0==''];
} else {
url1<-c();
}
if (length(url0) > 0) {
pgs<-GetSetURL(url0, select.lines="initializeReferences", clean.tags=FALSE);
lns<-sapply(strsplit(sapply(pgs, function(x) x[1]), '[\";]'), function(x) x[grep('^initializeReferences', x)][1]);
lns<-strsplit(lns, '[(\',)]');
url.new<-lapply(lns, function(ln) if (length(ln) !=7) list() else {
url<-paste("http://www.ncbi.nlm.nih.gov/projects/gap/cgi-bin/GetReference.cgi?study_id=", ln[3], '&study_key=', ln[5], sep='');
ln<-GetSetURL(url, select.lines="There (is)|(are) [0-9]+ selected publications? related to this study.")[[1]];
n<-as.numeric(strsplit(ln, ' ')[[1]][3]);
paste(url, '&page_number=', 1:ceiling(n/10), sep='');
});
names(url.new)<-names(url0);
url1<-c(url1, url.new);
}
###
saveRDS(url1[rownames(std)], file=paste(path, 'r/url_study2pubmed.rds', sep='/'));
###
###############################################
# Get Pubmed mapped to the study from a cgi script of dbGap
url1<-readRDS(file=paste(path, 'r/url_study2pubmed.rds', sep='/'));
if (!update.all.pubmed & file.exists(paste(path, 'r/study2pubmed.rds', sep='/'))) {
pmid<-readRDS(file=paste(path, 'r/study2pubmed.rds', sep='/'));
url1<-url1[!(names(url1) %in% names(pmid)) & !is.na(url1) & url1!=''];
} else {
pmid<-list();
}
# Retrieve new PubMed IDs from dbGaP
if (length(url1) > 0) {
pmid0<-getPMID(url1);
pmid<-c(pmid, pmid0);
}
pmid<-pmid[rownames(std)];
###############################################
names(id2std)<-sapply(id2std, function(x) x$ID);
id2std[1:length(id2std)]<-lapply(names(id2std), function(nm) {
if (!is.null(pmid[[nm]])) {
if (is.null(id2std[[nm]][['PubMed']])) {
append(id2std[[nm]], list(PubMed=pmid[[nm]]), after=which(names(id2std[[nm]])=='URL'));
} else id2std[[nm]];
}
});
###
saveRDS(id2std,file=paste(path, 'r/study_by_id.rds', sep='/'));
saveRDS(lapply(id2std, function(x) x$PubMed), file=paste(path, 'r/study2pubmed.rds', sep='/'));
###
#################################################################
#################################################################
# Retrieve metadata of pubmed articles
pmid<-c(lapply(id2ana, function(x) x$PubMed), lapply(id2std, function(x) x$PubMed)); # All PubMed IDs
pmid<-sort(unique(unlist(pmid)));
pmid<-pmid[pmid!='-1' & pmid!='' & !is.na(pmid)];
if (!update.all.pubmed & file.exists(paste(path, 'r/pubmed_downloaded.rds', sep='/'))) {
pm<-readRDS(file=paste(path, 'r/pubmed_downloaded.rds', sep='/'));
pmid0<-pmid[!(pmid %in% names(pm))];
if (length(pmid0) > 0) {
pm0<-GetPubMedAbstract(pmid0);
pm<-c(pm, pm0);
}
} else {
pm<-GetPubMedAbstract(pmid);
}
saveRDS(pm, file=paste(path, 'r/pubmed_downloaded.rds', sep='/'));
pubmed<-GetPubMedFields(pm);
id2pub<-lapply(rownames(pubmed), function(id) c(ID=id, pubmed[id, ]));
names(id2pub)<-rownames(pubmed);
###
saveRDS(pubmed, file=paste(path, 'r/pubmed.rds', sep='/'));
saveRDS(id2pub, file=paste(path, 'r/pubmed_by_id.rds', sep='/'));
###
}
| /dev/gwas/AddDbGapPubMed.r | no_license | leipzig/rchive | R | false | false | 7,761 | r | ################################################################################
# Associate dbGaP analysis and study with Pubmed articles
AddDbGapPubMed<-function(path=paste(Sys.getenv("RCHIVE_HOME"), 'data/gwas/public/dbgap', sep='/'), update.all.pubmed=FALSE) {
# path Path to the existing dbGaP data previously parsed
# update.all.pubmed If TRUE, update all pubmed-related information; otherwise, using previously saved information when it's available
###################################################################################################################
###################################################################################################################
# Now,
std<-readRDS(file=paste(path, 'r/study.rds', sep='/'));
ana<-readRDS(file=paste(path, 'r/analysis.rds', sep='/'));
id2std<-readRDS(file=paste(path, 'r/study_by_id.rds', sep='/'));
id2ana<-readRDS(file=paste(path, 'r/analysis_by_id.rds', sep='/'));
library(RCurl);
library(rchive);
##################################################################
# get one page of Pubmed IDs based on given URL
getPMID<-function(urls) {
url<-unlist(urls, use.names=FALSE);
id<-rep(names(urls), sapply(urls, length));
pgs<-GetSetURL(url, clean.tags=FALSE, select.lines='db=pubmed'); # Download from dbGaP CGI page
pmid<-lapply(pgs, function(ln) sapply(strsplit(ln, '[=\\[]'), function(ln) ln[length(ln)-1])); # parse page to get PubMed IDs
# Group PubMed IDs
mp<-split(pmid, id);
mp<-lapply(mp, function(x) {
ids<-unlist(x, use.names=FALSE);
if (length(ids) == 0) '-1' else sort(unique(ids));
});
mp[names(urls)];
}
##################################################################
########################
# Analysis to pubmed
########################
###############################################
ana<-readRDS(file=paste(path, 'r/analysis.rds', sep='/'));
url0<-as.vector(ana$URL); # URL to each analysis
names(url0)<-rownames(ana);
# Get URL to CGI script that reports pubmed linked to each analysis
if (!update.all.pubmed & file.exists(paste(path, 'r/url_analysis2pubmed.rds', sep='/'))) {
url1<-readRDS(file=paste(path, 'r/url_analysis2pubmed.rds', sep='/'));
url0<-url0[!(names(url0) %in% names(url1)) | is.na(url0) | url0==''];
} else {
url1<-c();
}
if (length(url0) > 0) {
# Get URL of new pubmed
url.new<-paste("http://www.ncbi.nlm.nih.gov/projects/gap/cgi-bin/GetAnalysisReference.cgi?pha=",
as.numeric(sub('pha', '', names(url0))), '&version=1&page_number=1', sep='');
names(url.new)<-names(url0);
url1<-c(url1, url.new);
}
###
saveRDS(url1[rownames(ana)], file=paste(path, 'r/url_analysis2pubmed.rds', sep='/'));
###
###############################################
# Get Pubmed mapped to the analysis from a cgi script of dbGap
url1<-readRDS(file=paste(path, 'r/url_analysis2pubmed.rds', sep='/'));
if (!update.all.pubmed & file.exists(paste(path, 'r/analysis2pubmed.rds', sep='/'))) {
pmid<-readRDS(file=paste(path, 'r/analysis2pubmed.rds', sep='/'));
url1<-url1[!(names(url1) %in% names(pmid)) & !is.na(url1) & url1!=''];
} else {
pmid<-list();
}
if (length(url1) > 0) {
pmid0<-getPMID(url1);
pmid<-c(pmid, pmid0);
}
pmid<-pmid[rownames(ana)];
###############################################
id2ana[1:length(id2ana)]<-lapply(names(id2ana), function(nm) {
if (!is.null(pmid[[nm]])) {
if (is.null(id2ana[[nm]][['PubMed']])) {
append(id2ana[[nm]], list(PubMed=pmid[[nm]]), after=which(names(id2ana[[nm]])=='URL'));
} else id2ana[[nm]][['PubMed']]<-pmid[[nm]];
}
});
###
saveRDS(id2ana,file=paste(path, 'r/analysis_by_id.rds', sep='/'));
saveRDS(lapply(id2ana, function(x) x$PubMed), file=paste(path, 'r/analysis2pubmed.rds', sep='/'));
###
##################################################################
##################################################################
########################
# Study to pubmed
########################
###############################################
std<-readRDS(file=paste(path, 'r/study.rds', sep='/'));
url0<-as.vector(std$URL);
names(url0)<-rownames(std);
# Get URL to CGI script that reports pubmed linked to each study
if (!update.all.pubmed & file.exists(paste(path, 'r/url_study2pubmed.rds', sep='/'))) {
url1<-readRDS(file=paste(path, 'r/url_study2pubmed.rds', sep='/'));
url0<-url0[!(names(url0) %in% names(url1)) | is.na(url0) | url0==''];
} else {
url1<-c();
}
if (length(url0) > 0) {
pgs<-GetSetURL(url0, select.lines="initializeReferences", clean.tags=FALSE);
lns<-sapply(strsplit(sapply(pgs, function(x) x[1]), '[\";]'), function(x) x[grep('^initializeReferences', x)][1]);
lns<-strsplit(lns, '[(\',)]');
url.new<-lapply(lns, function(ln) if (length(ln) !=7) list() else {
url<-paste("http://www.ncbi.nlm.nih.gov/projects/gap/cgi-bin/GetReference.cgi?study_id=", ln[3], '&study_key=', ln[5], sep='');
ln<-GetSetURL(url, select.lines="There (is)|(are) [0-9]+ selected publications? related to this study.")[[1]];
n<-as.numeric(strsplit(ln, ' ')[[1]][3]);
paste(url, '&page_number=', 1:ceiling(n/10), sep='');
});
names(url.new)<-names(url0);
url1<-c(url1, url.new);
}
###
saveRDS(url1[rownames(std)], file=paste(path, 'r/url_study2pubmed.rds', sep='/'));
###
###############################################
# Get Pubmed mapped to the study from a cgi script of dbGap
url1<-readRDS(file=paste(path, 'r/url_study2pubmed.rds', sep='/'));
if (!update.all.pubmed & file.exists(paste(path, 'r/study2pubmed.rds', sep='/'))) {
pmid<-readRDS(file=paste(path, 'r/study2pubmed.rds', sep='/'));
url1<-url1[!(names(url1) %in% names(pmid)) & !is.na(url1) & url1!=''];
} else {
pmid<-list();
}
# Retrieve new PubMed IDs from dbGaP
if (length(url1) > 0) {
pmid0<-getPMID(url1);
pmid<-c(pmid, pmid0);
}
pmid<-pmid[rownames(std)];
###############################################
names(id2std)<-sapply(id2std, function(x) x$ID);
id2std[1:length(id2std)]<-lapply(names(id2std), function(nm) {
if (!is.null(pmid[[nm]])) {
if (is.null(id2std[[nm]][['PubMed']])) {
append(id2std[[nm]], list(PubMed=pmid[[nm]]), after=which(names(id2std[[nm]])=='URL'));
} else id2std[[nm]];
}
});
###
saveRDS(id2std,file=paste(path, 'r/study_by_id.rds', sep='/'));
saveRDS(lapply(id2std, function(x) x$PubMed), file=paste(path, 'r/study2pubmed.rds', sep='/'));
###
#################################################################
#################################################################
# Retrieve metadata of pubmed articles
pmid<-c(lapply(id2ana, function(x) x$PubMed), lapply(id2std, function(x) x$PubMed)); # All PubMed IDs
pmid<-sort(unique(unlist(pmid)));
pmid<-pmid[pmid!='-1' & pmid!='' & !is.na(pmid)];
if (!update.all.pubmed & file.exists(paste(path, 'r/pubmed_downloaded.rds', sep='/'))) {
pm<-readRDS(file=paste(path, 'r/pubmed_downloaded.rds', sep='/'));
pmid0<-pmid[!(pmid %in% names(pm))];
if (length(pmid0) > 0) {
pm0<-GetPubMedAbstract(pmid0);
pm<-c(pm, pm0);
}
} else {
pm<-GetPubMedAbstract(pmid);
}
saveRDS(pm, file=paste(path, 'r/pubmed_downloaded.rds', sep='/'));
pubmed<-GetPubMedFields(pm);
id2pub<-lapply(rownames(pubmed), function(id) c(ID=id, pubmed[id, ]));
names(id2pub)<-rownames(pubmed);
###
saveRDS(pubmed, file=paste(path, 'r/pubmed.rds', sep='/'));
saveRDS(id2pub, file=paste(path, 'r/pubmed_by_id.rds', sep='/'));
###
}
|
test_that("suggest_to_df works as expected", {
typical_api_response <- list(list(popular = list()), list(recommended = c(
"culture",
"religion", "taiwan", "chinese", "humor"
)))
expected <- data.frame(tag = c(
"culture",
"religion", "taiwan", "chinese", "humor"
), popular = FALSE, recommended = TRUE)
expect_equal(suggest_to_df(typical_api_response), expected)
})
test_that("remove empty vars excluded correctly", {
vec <- c("url=https://www.example.com", "title=an example", "extended=")
expected <- c("url=https://www.example.com", "title=an example")
expect_equal(remove_empty_vars(vec), expected)
})
# concat_args
test_that("tags_parser works as expected", {
tags <- c("dog", "cat ", "bird,")
expected <- "dog+cat+bird"
expect_warning(result <- tags_parser(tags))
expect_equal(result, expected)
expect_equal(tags_parser(), "")
})
test_that("pinboard_dataframe_to_logical_names works", {
pinboard_api_output <- data.frame(
href = "https://nope.com",
description = "wait this is a title",
extended = "this is actually a description",
meta = "ASDFADF8723",
hash = "ASDFA422723",
time = "2020-05-11T04:53:53Z",
shared = "yes",
toread = "yes",
tags = "inspiration anothertag"
)
expected <- data.frame(
href = "https://nope.com",
title = "wait this is a title",
description = "this is actually a description",
meta = "ASDFADF8723",
hash = "ASDFA422723",
time = "2020-05-11T04:53:53Z",
public = "yes",
toread = "yes",
tags = "inspiration anothertag"
)
expect_equal(pinboard_dataframe_to_logical_names(pinboard_api_output), expected)
})
test_that("rename_column works", {
df <- data.frame(falafal = c("one"), house = "dog")
expected <- data.frame(falafal = c("one"), pet = "dog")
expect_equal(rename_column(df, "house", "pet"), expected)
})
| /tests/testthat/test_posts.R | permissive | RMHogervorst/pinboardr | R | false | false | 1,876 | r |
test_that("suggest_to_df works as expected", {
typical_api_response <- list(list(popular = list()), list(recommended = c(
"culture",
"religion", "taiwan", "chinese", "humor"
)))
expected <- data.frame(tag = c(
"culture",
"religion", "taiwan", "chinese", "humor"
), popular = FALSE, recommended = TRUE)
expect_equal(suggest_to_df(typical_api_response), expected)
})
test_that("remove empty vars excluded correctly", {
vec <- c("url=https://www.example.com", "title=an example", "extended=")
expected <- c("url=https://www.example.com", "title=an example")
expect_equal(remove_empty_vars(vec), expected)
})
# concat_args
test_that("tags_parser works as expected", {
tags <- c("dog", "cat ", "bird,")
expected <- "dog+cat+bird"
expect_warning(result <- tags_parser(tags))
expect_equal(result, expected)
expect_equal(tags_parser(), "")
})
test_that("pinboard_dataframe_to_logical_names works", {
pinboard_api_output <- data.frame(
href = "https://nope.com",
description = "wait this is a title",
extended = "this is actually a description",
meta = "ASDFADF8723",
hash = "ASDFA422723",
time = "2020-05-11T04:53:53Z",
shared = "yes",
toread = "yes",
tags = "inspiration anothertag"
)
expected <- data.frame(
href = "https://nope.com",
title = "wait this is a title",
description = "this is actually a description",
meta = "ASDFADF8723",
hash = "ASDFA422723",
time = "2020-05-11T04:53:53Z",
public = "yes",
toread = "yes",
tags = "inspiration anothertag"
)
expect_equal(pinboard_dataframe_to_logical_names(pinboard_api_output), expected)
})
test_that("rename_column works", {
df <- data.frame(falafal = c("one"), house = "dog")
expected <- data.frame(falafal = c("one"), pet = "dog")
expect_equal(rename_column(df, "house", "pet"), expected)
})
|
tabItem(
tabName = "speed_spin",
fluidRow(
column(
width = 6,
plotOutput(
"speed_spin_plot"
)
)
)
)
| /02_speedspin_ui.R | no_license | Yuki-Tigers0626/MLB_vis | R | false | false | 141 | r | tabItem(
tabName = "speed_spin",
fluidRow(
column(
width = 6,
plotOutput(
"speed_spin_plot"
)
)
)
)
|
# remove NOTE about no visible binding for global variable during R CMD check --
if (getRversion() >= "2.15.1") {
utils::globalVariables(
c("ID", "CloneID", "SnpPosition", "CallRate", "AvgCountRef", "AvgCountSnp",
"RepAvg", "NOT_USEFUL", "SNP", "CALL_RATE", "AVG_COUNT_REF",
"AVG_COUNT_SNP", "REP_AVG", "NEW_ID", "SNP_N", "ALLELE_NAME", "ALLELE_NUMBER",
"ALLELES_COUNT", "GENOTYPED_PROP", "MISSING_IND_PROP", "AlleleID", "HET_NUMBER",
"HET_PERCENT", "HET_PROP", "everything", "DP", "AD", "vcf.headers",
"GT_VCF", "INDIVIDUALS2", "ALLELE_REF_DEPTH",
"ALLELE_ALT_DEPTH", "GT_BIN", "GT_HAPLO", "REF_NEW", "REF_ALT_CHANGE",
"GT_VCF_A1", "GT_VCF_A2", "MAF", "Allele1", "Allele2", "POP", "IN_GROUP",
"OUT_GROUP", "ID.FILTER", "ANCESTRAL", "SEQUENCES", "GARBAGE", "SNP_READ_POS",
"FASTA_REF", "BP", "Chr", "Locus", "Locus ID", "Col", "PP", "ALLELE_GROUP",
"PROBLEM", "IND_LEVEL_POLYMORPHISM", "HOM", "HET", "N_GENOT", "DIPLO",
"FREQ_ALLELES", "HOM_E", "HOM_O", "FH", "HET_O", "HET_E", "PI", "pi",
"MONOMORPHIC", "POLYMORPHIC", "CONSENSUS", "PARALOGS", "Seg Dist",
"REF.x", "ALT.x", "REF.y", "ALT.y", "BLACKLIST", "ALLELE_COVERAGE_RATIO",
"..scaled..", "GENOTYPE_LIKELIHOOD_GROUP", "GL_MAX", "GL_MIN", "VALUE",
"GL_DIFF", "ALLELE_ALT_DEPTH_NEW", "ALLELE_REF_DEPTH_NEW", "ALT_NEW",
"CHANGE", "n.al.pop", "n.al.tot", "TOTAL_READ", "Missingness",
"MISSING_GENOTYPE", "INDIVIDUALS_NUMBER", "PERC", "Axis.1", "Axis.2", "V1",
"Axis.3", "Axis.4", "ids", "GQ", "PL", "ARTIFACTS", "GENOTYPED",
"IND_THRESHOLD", "N_IND", "PERCENT", "POP_GENOTYPED", "PROB_POP", "LOCUS",
"HET_DIFF", "HET_O", "HET_MAX", "GT", "GL", "INDIVIDUALS", "POP_ID", "N",
"HET_O", "HOM_O", "HET_E", "HOM_E", "FREQ_ALT", "FREQ_REF",
"GLOBAL_MAF", "PP", "PQ", "QQ", "AND", "OR", "filters.parameters",
"het.global.summary", "het.global.threshold",
"plot.distribution.het.local", "violinplot.het.global",
"violinplot.het.local", "HET_GROUP", "HET_MIN",
"HET_THRESHOLD", "POP_THRESHOLD", "PROP", "0.4", "0.9","DIF_0.1", "DIF_0.9",
"DIF_OUTLIERS", "DIF_THRESHOLD", "HET_DIF", "HET_MEAN", "MAX_0.4",
"MAX_0.9", "MAX_DIF_OUTLIERS", "MAX_DIF_THRESHOLD", "MAX_OUTLIERS",
"MAX_THRESHOLD", "OUTLIERS", "THRESHOLD", "WHITELIST", "het.dif.threshold",
"ALT2", "REF2", "#CHROM", "A1", "A2", "ALLELES", "ALLELES_GROUP", "ALT",
"COL1", "COL3", "COL4", "COUNT", "ERASE", "FILTER", "FORMAT", "FORMAT_ID",
"INDIVIDUALS_ALLELES", "INFO", "KEEPER", "MAF_GLOBAL", "MAF_LOCAL",
"MARKERS", "MARKERS_ALLELES", "MARKER_NUMBER", "MAX_COUNT_MARKERS",
"NUMBER", "QUAL", "READ_DEPTH", "REF", "STRATA", "TOTAL",
"path.folder.step1", "path.folder.step2", "sum.pop", "EIGENVALUES",
"MISSING_GENOTYPE_PROP", "A1_A2", "ALLELE_COPIES", "READ_DEPTH_MEAN",
"READ_DEPTH_MIN", "READ_DEPTH_MAX", "ALLELE_REF_DEPTH", "ALLELE_REF_DEPTH_MEAN",
"ALLELE_REF_DEPTH_MIN", "ALLELE_REF_DEPTH_MAX", "ALLELE_ALT_DEPTH",
"ALLELE_ALT_DEPTH_MEAN", "ALLELE_ALT_DEPTH_MIN", "ALLELE_ALT_DEPTH_MAX",
"GL", "GL_MEAN", "ALLELE_SUM", "FIS", "FIS_MAX", "FIS_MIN", "FIS_DIFF",
"FREQ", "HOM_ALT",
"fh", "HOM_REF", "BETAI", "HB", "HW", "NN", "NN_C", "N_POP", "N_POP_C",
"DIFFERENT", "DISTANCE", "ID1", "ID2", "IDENTICAL", "IDENTICAL_GT",
"PAIRWISE", "POP_COMP", "PROP_IDENTICAL", "TOTAL_MARKERS_GENOTYPED", "X1",
"HET_RANGE", "MISSING_GROUP", "MISSING_PROP", "MISSING_PROP_OVERALL",
"MISSING_PROP_POP", ".", "ALLELE", "ALLELE1", "ALLELE2", "ALLELES_FREQ",
"Catalog ID", "Cnt", "GENOTYPE", "GROUP", "HAPLOTYPES", "MARKERS.ALLELE_GROUP",
"POLYMORPHISM", "ALT_ALLELE_NUMBER", "PROB_HET", "PROB_HOM_ALT", "PROB_HOM_REF",
"NAPL", "DISTANCE_RELATIVE", "MARKERS_COMMON", "id.pairwise", "input.prep",
"CHROM_LOCUS", "NEW_MARKERS", "POP_ID_N", "INDIVIDUALS_N", "GT_N", "GT_IMP",
"INDIVIDUALS_REP", "KEEP", "GT_VCF_NUC", "INTEGERS", "GOF", "NR", "NV",
"A1_NUC", "A2_NUC", "RANK", "MAF_P", "MAF_L", "BAD_ID", "strata",
"SPLIT_VEC", "ALLELES_COUNT_OVERALL", "N_INV", "HS", "GIS", "PI_NEI",
"FQ_FILES", "PARAMETER", "SPLIT", "mtime", "REF_ALT", "ARTIFACT",
"HETEROZYGOSITY", "HOMOZYGOSITY", "LOCUS_NUMBER", "BLACKLIST_ARTIFACT",
"HAPLOTYPE", "MAX_NUMBER_SNP_LOCUS", "MEAN_NUMBER_SNP_LOCUS",
"NUMBER_LOCUS_4SNP", "SNP_LOCUS", "SNP_NUMBER", "BLACKLISTED_FLAG",
"BLACKLIST_USTACKS", "FILTERED", "FOR_CATALOG",
"HAPLOTYPE_NUMBER", "LOCUS_TOTAL", "LUMBERJACKSTACK_FLAG", "SEQUENCE",
"SEQ_TYPE", "BLACKLISTED", "BLACKLISTED_TOTAL", "BLACKLIST_CALL_HAPLOTYPE",
"BLACKLIST_CONFOUNDED_LOCI", "CATALOG_ID", "HOM_TO_UNK", "LNL_FILTERED_LOCI",
"LOG_LIKELIHOOD", "NUCS_CONVERTED", "NUC_NOT_CONVERTED", "PROPORTION",
"PRUNED_HAPLOTYPES", "PRUNED_HAPLOTYPES_RARE", "PRUNED_HAPLOTYPES_TREE",
"RXSTACKS", "SAMPLE_ID", "STATS", "TOTAL_NUCS", "TOTAL_NUCS_CONVERTED",
"UNK_TO_HET", "UNK_TO_HOM", "ALGORITHM", "CORR_VALUE", "FROM_TO",
"ORIG_VALUE", "SQL_ID", "mst", "rare_step_1", "RXSTACKS_MST",
"RXSTACKS_RARE_STEP_1", "BARCODES", "DESCRIPTION", "FILENAME", "LANES",
"LANES_SHORT", "LOWQUALITY", "LOW_QUALITY", "NORADTAG", "NO_RADTAG", "READS",
"REPLICATES", "RETAINED", "MISMATCH", "ALL_LOCUS", "INPUT", "MATCH_PERCENT",
"GENOTYPES", "TYPE", "PATTERN", "BANDS_OBS", "BANDS_EXP", "POLYMORPHIC",
"MISSING_BAND", "MEAN_LOG_LIKELIHOOD", "CHISQ", "GOF", "GOF_PVALUE",
"ONEMAP", "JOINMAP", "TOTAL_GENOTYPES", "MARKERS", "BLACKLIST_PSTACKS", "n"
)
)
}
| /R/global_variables.R | no_license | IdoBar/stackr | R | false | false | 5,644 | r | # remove NOTE about no visible binding for global variable during R CMD check --
if (getRversion() >= "2.15.1") {
utils::globalVariables(
c("ID", "CloneID", "SnpPosition", "CallRate", "AvgCountRef", "AvgCountSnp",
"RepAvg", "NOT_USEFUL", "SNP", "CALL_RATE", "AVG_COUNT_REF",
"AVG_COUNT_SNP", "REP_AVG", "NEW_ID", "SNP_N", "ALLELE_NAME", "ALLELE_NUMBER",
"ALLELES_COUNT", "GENOTYPED_PROP", "MISSING_IND_PROP", "AlleleID", "HET_NUMBER",
"HET_PERCENT", "HET_PROP", "everything", "DP", "AD", "vcf.headers",
"GT_VCF", "INDIVIDUALS2", "ALLELE_REF_DEPTH",
"ALLELE_ALT_DEPTH", "GT_BIN", "GT_HAPLO", "REF_NEW", "REF_ALT_CHANGE",
"GT_VCF_A1", "GT_VCF_A2", "MAF", "Allele1", "Allele2", "POP", "IN_GROUP",
"OUT_GROUP", "ID.FILTER", "ANCESTRAL", "SEQUENCES", "GARBAGE", "SNP_READ_POS",
"FASTA_REF", "BP", "Chr", "Locus", "Locus ID", "Col", "PP", "ALLELE_GROUP",
"PROBLEM", "IND_LEVEL_POLYMORPHISM", "HOM", "HET", "N_GENOT", "DIPLO",
"FREQ_ALLELES", "HOM_E", "HOM_O", "FH", "HET_O", "HET_E", "PI", "pi",
"MONOMORPHIC", "POLYMORPHIC", "CONSENSUS", "PARALOGS", "Seg Dist",
"REF.x", "ALT.x", "REF.y", "ALT.y", "BLACKLIST", "ALLELE_COVERAGE_RATIO",
"..scaled..", "GENOTYPE_LIKELIHOOD_GROUP", "GL_MAX", "GL_MIN", "VALUE",
"GL_DIFF", "ALLELE_ALT_DEPTH_NEW", "ALLELE_REF_DEPTH_NEW", "ALT_NEW",
"CHANGE", "n.al.pop", "n.al.tot", "TOTAL_READ", "Missingness",
"MISSING_GENOTYPE", "INDIVIDUALS_NUMBER", "PERC", "Axis.1", "Axis.2", "V1",
"Axis.3", "Axis.4", "ids", "GQ", "PL", "ARTIFACTS", "GENOTYPED",
"IND_THRESHOLD", "N_IND", "PERCENT", "POP_GENOTYPED", "PROB_POP", "LOCUS",
"HET_DIFF", "HET_O", "HET_MAX", "GT", "GL", "INDIVIDUALS", "POP_ID", "N",
"HET_O", "HOM_O", "HET_E", "HOM_E", "FREQ_ALT", "FREQ_REF",
"GLOBAL_MAF", "PP", "PQ", "QQ", "AND", "OR", "filters.parameters",
"het.global.summary", "het.global.threshold",
"plot.distribution.het.local", "violinplot.het.global",
"violinplot.het.local", "HET_GROUP", "HET_MIN",
"HET_THRESHOLD", "POP_THRESHOLD", "PROP", "0.4", "0.9","DIF_0.1", "DIF_0.9",
"DIF_OUTLIERS", "DIF_THRESHOLD", "HET_DIF", "HET_MEAN", "MAX_0.4",
"MAX_0.9", "MAX_DIF_OUTLIERS", "MAX_DIF_THRESHOLD", "MAX_OUTLIERS",
"MAX_THRESHOLD", "OUTLIERS", "THRESHOLD", "WHITELIST", "het.dif.threshold",
"ALT2", "REF2", "#CHROM", "A1", "A2", "ALLELES", "ALLELES_GROUP", "ALT",
"COL1", "COL3", "COL4", "COUNT", "ERASE", "FILTER", "FORMAT", "FORMAT_ID",
"INDIVIDUALS_ALLELES", "INFO", "KEEPER", "MAF_GLOBAL", "MAF_LOCAL",
"MARKERS", "MARKERS_ALLELES", "MARKER_NUMBER", "MAX_COUNT_MARKERS",
"NUMBER", "QUAL", "READ_DEPTH", "REF", "STRATA", "TOTAL",
"path.folder.step1", "path.folder.step2", "sum.pop", "EIGENVALUES",
"MISSING_GENOTYPE_PROP", "A1_A2", "ALLELE_COPIES", "READ_DEPTH_MEAN",
"READ_DEPTH_MIN", "READ_DEPTH_MAX", "ALLELE_REF_DEPTH", "ALLELE_REF_DEPTH_MEAN",
"ALLELE_REF_DEPTH_MIN", "ALLELE_REF_DEPTH_MAX", "ALLELE_ALT_DEPTH",
"ALLELE_ALT_DEPTH_MEAN", "ALLELE_ALT_DEPTH_MIN", "ALLELE_ALT_DEPTH_MAX",
"GL", "GL_MEAN", "ALLELE_SUM", "FIS", "FIS_MAX", "FIS_MIN", "FIS_DIFF",
"FREQ", "HOM_ALT",
"fh", "HOM_REF", "BETAI", "HB", "HW", "NN", "NN_C", "N_POP", "N_POP_C",
"DIFFERENT", "DISTANCE", "ID1", "ID2", "IDENTICAL", "IDENTICAL_GT",
"PAIRWISE", "POP_COMP", "PROP_IDENTICAL", "TOTAL_MARKERS_GENOTYPED", "X1",
"HET_RANGE", "MISSING_GROUP", "MISSING_PROP", "MISSING_PROP_OVERALL",
"MISSING_PROP_POP", ".", "ALLELE", "ALLELE1", "ALLELE2", "ALLELES_FREQ",
"Catalog ID", "Cnt", "GENOTYPE", "GROUP", "HAPLOTYPES", "MARKERS.ALLELE_GROUP",
"POLYMORPHISM", "ALT_ALLELE_NUMBER", "PROB_HET", "PROB_HOM_ALT", "PROB_HOM_REF",
"NAPL", "DISTANCE_RELATIVE", "MARKERS_COMMON", "id.pairwise", "input.prep",
"CHROM_LOCUS", "NEW_MARKERS", "POP_ID_N", "INDIVIDUALS_N", "GT_N", "GT_IMP",
"INDIVIDUALS_REP", "KEEP", "GT_VCF_NUC", "INTEGERS", "GOF", "NR", "NV",
"A1_NUC", "A2_NUC", "RANK", "MAF_P", "MAF_L", "BAD_ID", "strata",
"SPLIT_VEC", "ALLELES_COUNT_OVERALL", "N_INV", "HS", "GIS", "PI_NEI",
"FQ_FILES", "PARAMETER", "SPLIT", "mtime", "REF_ALT", "ARTIFACT",
"HETEROZYGOSITY", "HOMOZYGOSITY", "LOCUS_NUMBER", "BLACKLIST_ARTIFACT",
"HAPLOTYPE", "MAX_NUMBER_SNP_LOCUS", "MEAN_NUMBER_SNP_LOCUS",
"NUMBER_LOCUS_4SNP", "SNP_LOCUS", "SNP_NUMBER", "BLACKLISTED_FLAG",
"BLACKLIST_USTACKS", "FILTERED", "FOR_CATALOG",
"HAPLOTYPE_NUMBER", "LOCUS_TOTAL", "LUMBERJACKSTACK_FLAG", "SEQUENCE",
"SEQ_TYPE", "BLACKLISTED", "BLACKLISTED_TOTAL", "BLACKLIST_CALL_HAPLOTYPE",
"BLACKLIST_CONFOUNDED_LOCI", "CATALOG_ID", "HOM_TO_UNK", "LNL_FILTERED_LOCI",
"LOG_LIKELIHOOD", "NUCS_CONVERTED", "NUC_NOT_CONVERTED", "PROPORTION",
"PRUNED_HAPLOTYPES", "PRUNED_HAPLOTYPES_RARE", "PRUNED_HAPLOTYPES_TREE",
"RXSTACKS", "SAMPLE_ID", "STATS", "TOTAL_NUCS", "TOTAL_NUCS_CONVERTED",
"UNK_TO_HET", "UNK_TO_HOM", "ALGORITHM", "CORR_VALUE", "FROM_TO",
"ORIG_VALUE", "SQL_ID", "mst", "rare_step_1", "RXSTACKS_MST",
"RXSTACKS_RARE_STEP_1", "BARCODES", "DESCRIPTION", "FILENAME", "LANES",
"LANES_SHORT", "LOWQUALITY", "LOW_QUALITY", "NORADTAG", "NO_RADTAG", "READS",
"REPLICATES", "RETAINED", "MISMATCH", "ALL_LOCUS", "INPUT", "MATCH_PERCENT",
"GENOTYPES", "TYPE", "PATTERN", "BANDS_OBS", "BANDS_EXP", "POLYMORPHIC",
"MISSING_BAND", "MEAN_LOG_LIKELIHOOD", "CHISQ", "GOF", "GOF_PVALUE",
"ONEMAP", "JOINMAP", "TOTAL_GENOTYPES", "MARKERS", "BLACKLIST_PSTACKS", "n"
)
)
}
|
## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly.
## The functions below can cache the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then the cachesolve should retrieve the inverse from
## the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | helen9215/ProgrammingAssignment2 | R | false | false | 1,282 | r | ## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly.
## The functions below can cache the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then the cachesolve should retrieve the inverse from
## the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
#' @title A function to create personalized KM curve for 1 group
#' @author Salil Deo
#' @description Provides KM curve for 1 group with CI band and also provides survival % at midpoint and end
#'
#' @param s survfit object
#' @param xlab x-axis label
#' @param ylab y-axis label
#' @import ggplot2
#' @import broom
#' @param color string providing color label
#' @seealso \code{\link[survival]{survfit}}
#' @return NULL
#' @examples \dontrun{
#' # do not run this
#' # needs my_theme already in .env
#' library(survival)
#' s = survfit(Surv(time, status) ~ 1, data = lung)
#' figure <- proc_kmcurve2(s = s, xlab = "follow-up", ylab = "proportion surviving",
#' color = "blue")
#' gets an object figure is a ggplot2 object; it can be further modified if needed.
#' }
proc_kmcurve1 <- function(s,xlab,ylab,color){
require(ggplot2)
require(broom)
df <- tidy(s) # get the tidy summary suvfit object
finalt <- max(df$time) # maximum time set for graph
# start preparing the graph
g <- ggplot(data = df, aes(x = time, y = estimate)) +
geom_line() +
geom_ribbon(data = df, aes(ymax = conf.high, ymin = conf.low), alpha = 0.2,fill = color)
g2 <- g + xlab(xlab) + ylab(ylab) + ylim(0,1)
g3 <- g2
g3
proc_percent <- function(x){
y <- round((x*100),2)
y
}
t <- summary(s,times = c(finalt/2,finalt))
times <- t$time
values <- t$surv
# est <- paste0((round(values,2)*100),"%",sep = "")
est <- proc_percent(values)
df2 <- data.frame(times, values,est)
g4 <- g3 + geom_label(data = df2, aes(x = times, y = values,
label = est))
g4
}
| /R/proc_kmcurve1.R | no_license | svd09/Smisc | R | false | false | 1,625 | r | #' @title A function to create personalized KM curve for 1 group
#' @author Salil Deo
#' @description Provides KM curve for 1 group with CI band and also provides survival % at midpoint and end
#'
#' @param s survfit object
#' @param xlab x-axis label
#' @param ylab y-axis label
#' @import ggplot2
#' @import broom
#' @param color string providing color label
#' @seealso \code{\link[survival]{survfit}}
#' @return NULL
#' @examples \dontrun{
#' # do not run this
#' # needs my_theme already in .env
#' library(survival)
#' s = survfit(Surv(time, status) ~ 1, data = lung)
#' figure <- proc_kmcurve2(s = s, xlab = "follow-up", ylab = "proportion surviving",
#' color = "blue")
#' gets an object figure is a ggplot2 object; it can be further modified if needed.
#' }
proc_kmcurve1 <- function(s,xlab,ylab,color){
require(ggplot2)
require(broom)
df <- tidy(s) # get the tidy summary suvfit object
finalt <- max(df$time) # maximum time set for graph
# start preparing the graph
g <- ggplot(data = df, aes(x = time, y = estimate)) +
geom_line() +
geom_ribbon(data = df, aes(ymax = conf.high, ymin = conf.low), alpha = 0.2,fill = color)
g2 <- g + xlab(xlab) + ylab(ylab) + ylim(0,1)
g3 <- g2
g3
proc_percent <- function(x){
y <- round((x*100),2)
y
}
t <- summary(s,times = c(finalt/2,finalt))
times <- t$time
values <- t$surv
# est <- paste0((round(values,2)*100),"%",sep = "")
est <- proc_percent(values)
df2 <- data.frame(times, values,est)
g4 <- g3 + geom_label(data = df2, aes(x = times, y = values,
label = est))
g4
}
|
#------------------------------------------------------------------------------#
# Titulo: Prueba del paquete ajedrez
# Tipo: Prueba de funciones
# Nombre: Kenneth Roy Cabrera Torres
# Fecha: Viernes, 24 de junio de 2016
# Enunciado: Prueba para determinar si las funciones del paquete
# trabajan adecuadamente.
#------------------------------------------------------------------------------#
library(ajedrez)
# Muestra las posiciones a las cuales puede saltar un caballo.
saltoCaballo(4, 4)
# Muestra el tablero de ajedrez cuando el caballo está en la posición (4,4).
print(muestraTablero(4, 4))
# Muestra el tablero de ajedrez cuando el caballo está en la posición (1,1).
print(muestraTablero(1, 1))
# Muestra el tablero de ajedrez cuando el caballo está en la posición (1,5).
print(muestraTablero(1, 5))
| /_programacion-R/clases/prueba1.R | no_license | LabSCN-unalmed/labscn-unalmed.github.io | R | false | false | 818 | r | #------------------------------------------------------------------------------#
# Titulo: Prueba del paquete ajedrez
# Tipo: Prueba de funciones
# Nombre: Kenneth Roy Cabrera Torres
# Fecha: Viernes, 24 de junio de 2016
# Enunciado: Prueba para determinar si las funciones del paquete
# trabajan adecuadamente.
#------------------------------------------------------------------------------#
library(ajedrez)
# Muestra las posiciones a las cuales puede saltar un caballo.
saltoCaballo(4, 4)
# Muestra el tablero de ajedrez cuando el caballo está en la posición (4,4).
print(muestraTablero(4, 4))
# Muestra el tablero de ajedrez cuando el caballo está en la posición (1,1).
print(muestraTablero(1, 1))
# Muestra el tablero de ajedrez cuando el caballo está en la posición (1,5).
print(muestraTablero(1, 5))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_gen_AR.R
\name{data.gen.tar2}
\alias{data.gen.tar2}
\title{Generate predictor and response data from TAR2 model.}
\usage{
data.gen.tar2(nobs, ndim = 9, noise = 0.1)
}
\arguments{
\item{nobs}{The data length to be generated.}
\item{ndim}{The number of potential predictors (default is 9).}
\item{noise}{The white noise in the data}
}
\value{
A list of 2 elements: a vector of response (x), and a matrix of potential predictors (dp) with each column containing one potential predictor.
}
\description{
Generate predictor and response data from TAR2 model.
}
\examples{
# TAR2 model from paper with total 9 dimensions
data.tar2<-data.gen.tar2(500)
plot.ts(cbind(data.tar2$x,data.tar2$dp))
}
\references{
Sharma, A. (2000). Seasonal to interannual rainfall probabilistic forecasts for improved water supply management: Part 1 - A strategy for system predictor identification. Journal of Hydrology, 239(1-4), 232-239.
}
| /man/data.gen.tar2.Rd | no_license | zejiang-unsw/synthesis | R | false | true | 1,000 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_gen_AR.R
\name{data.gen.tar2}
\alias{data.gen.tar2}
\title{Generate predictor and response data from TAR2 model.}
\usage{
data.gen.tar2(nobs, ndim = 9, noise = 0.1)
}
\arguments{
\item{nobs}{The data length to be generated.}
\item{ndim}{The number of potential predictors (default is 9).}
\item{noise}{The white noise in the data}
}
\value{
A list of 2 elements: a vector of response (x), and a matrix of potential predictors (dp) with each column containing one potential predictor.
}
\description{
Generate predictor and response data from TAR2 model.
}
\examples{
# TAR2 model from paper with total 9 dimensions
data.tar2<-data.gen.tar2(500)
plot.ts(cbind(data.tar2$x,data.tar2$dp))
}
\references{
Sharma, A. (2000). Seasonal to interannual rainfall probabilistic forecasts for improved water supply management: Part 1 - A strategy for system predictor identification. Journal of Hydrology, 239(1-4), 232-239.
}
|
#' Energy usage data (unretrofitted) of 23 buildings.
#'
#' A dataset containing nomarlized (by squarefoot) energy usage (unretrofitted) data of 23 builidings. Un
#'
#' @format A data frame with 1023 rows and 8 variables:
#' \describe{
#' \item{bdbid}{building id (anonymized)}
#' \item{usage}{normalized energy usage per day per square foot (kWh for Electricity and BTU for fuel).}
#' \item{OAT}{monthly average outside air temperature in Fahrenheit}
#' \item{fiscal_year}{fiscal year}
#' \item{energy_type}{Energy type: Elec (Electricity) and Fuel}
#' \item{prepost}{this indicates whether energy usage data was collected during retrofit period or not. If the data is unretrofit, this column will be all ones. If the data is retrofit, this column will be: 1 for pre-retrotit, 2 for retrofit and 3 for post-retrofit. See \code{\link{retrofit_utility}} for more information about retrofitted data.}
#' \item{estimated}{this indicates whether energy usage data is estimated or actual data. 1 for estimated and 0 for acutal.}
#' \item{end_date}{end date of the month. Format: yyyy-mm-d}
#' }
"unretrofit_utility"
#' Energy usage data (retrofitted) of 4 buildings
#'
#' A dataset containing energy usage (retrofitted) data of 23 builidings.
#' @format A data frame with 194 rows and 8 variables:
#' \describe{
#' \item{bdbid}{building id (anonymized)}
#' \item{usage}{energy usage per day (kWh for Electricity and BTU for fuel).}
#' \item{OAT}{monthly average outside air temperature in Fahrenheit}
#' \item{fiscal_year}{fiscal year}
#' \item{energy_type}{Energy type: Elec (Electricity) and Fuel}
#' \item{prepost}{this indicates whether energy usage data was collected during retrofit period or not. If the data is unretrofit, this column will be all ones. If the data is retrofit, this column will be: 1 for pre-retrotit, 2 for retrofit and 3 for post-retrofit. See \code{\link{unretrofit_utility}} for more information about unretrofitted data.}
#' \item{estimated}{this indicates whether energy usage data is estimated or actual data. 1 for estimated and 0 for acutal.}
#' \item{end_date}{end date of the month. Format: yyyy-mm-d}
#' }
"retrofit_utility"
#' Normalized average monthly temperature
#'
#' A dataset containing normalized average monthly temperature (unit F) TMY3 from Laguardia airport.
#' @format A data frame with 12 rows and 2 variables:
#' \describe{
#' \item{month}{month}
#' \item{avg_temp}{normalized average monthly temperature in Fahrenheit}
#' }
"norm_temp_tmy3" | /R/data.R | permissive | cunybpl/bRema | R | false | false | 2,528 | r | #' Energy usage data (unretrofitted) of 23 buildings.
#'
#' A dataset containing nomarlized (by squarefoot) energy usage (unretrofitted) data of 23 builidings. Un
#'
#' @format A data frame with 1023 rows and 8 variables:
#' \describe{
#' \item{bdbid}{building id (anonymized)}
#' \item{usage}{normalized energy usage per day per square foot (kWh for Electricity and BTU for fuel).}
#' \item{OAT}{monthly average outside air temperature in Fahrenheit}
#' \item{fiscal_year}{fiscal year}
#' \item{energy_type}{Energy type: Elec (Electricity) and Fuel}
#' \item{prepost}{this indicates whether energy usage data was collected during retrofit period or not. If the data is unretrofit, this column will be all ones. If the data is retrofit, this column will be: 1 for pre-retrotit, 2 for retrofit and 3 for post-retrofit. See \code{\link{retrofit_utility}} for more information about retrofitted data.}
#' \item{estimated}{this indicates whether energy usage data is estimated or actual data. 1 for estimated and 0 for acutal.}
#' \item{end_date}{end date of the month. Format: yyyy-mm-d}
#' }
"unretrofit_utility"
#' Energy usage data (retrofitted) of 4 buildings
#'
#' A dataset containing energy usage (retrofitted) data of 23 builidings.
#' @format A data frame with 194 rows and 8 variables:
#' \describe{
#' \item{bdbid}{building id (anonymized)}
#' \item{usage}{energy usage per day (kWh for Electricity and BTU for fuel).}
#' \item{OAT}{monthly average outside air temperature in Fahrenheit}
#' \item{fiscal_year}{fiscal year}
#' \item{energy_type}{Energy type: Elec (Electricity) and Fuel}
#' \item{prepost}{this indicates whether energy usage data was collected during retrofit period or not. If the data is unretrofit, this column will be all ones. If the data is retrofit, this column will be: 1 for pre-retrotit, 2 for retrofit and 3 for post-retrofit. See \code{\link{unretrofit_utility}} for more information about unretrofitted data.}
#' \item{estimated}{this indicates whether energy usage data is estimated or actual data. 1 for estimated and 0 for acutal.}
#' \item{end_date}{end date of the month. Format: yyyy-mm-d}
#' }
"retrofit_utility"
#' Normalized average monthly temperature
#'
#' A dataset containing normalized average monthly temperature (unit F) TMY3 from Laguardia airport.
#' @format A data frame with 12 rows and 2 variables:
#' \describe{
#' \item{month}{month}
#' \item{avg_temp}{normalized average monthly temperature in Fahrenheit}
#' }
"norm_temp_tmy3" |
library(shiny)
library(maps)
data("us.cities")
us.cities[,2] <- toupper(us.cities[,2])
# Define UI for application
shinyUI(fluidPage(
# Header or Title Panel
titlePanel(title = h4("USA - States : Capital & Cities", align="center")),
sidebarLayout(
# Sidebar panel
sidebarPanel(
selectInput("State", "1. Select the State", choices = sort(unique(us.cities$country.etc)), hr())
),
# Main Panel
mainPanel(
tabsetPanel(type="tab",
tabPanel("capital",verbatimTextOutput("capital")),
tabPanel("cities", tableOutput("cities")),
tabPanel("about", pre(includeText("include.txt")))
)
)
)
)
) | /ui.R | no_license | joshipk/DevelopingDataProducts_Project_Part1 | R | false | false | 882 | r | library(shiny)
library(maps)
data("us.cities")
us.cities[,2] <- toupper(us.cities[,2])
# Define UI for application
shinyUI(fluidPage(
# Header or Title Panel
titlePanel(title = h4("USA - States : Capital & Cities", align="center")),
sidebarLayout(
# Sidebar panel
sidebarPanel(
selectInput("State", "1. Select the State", choices = sort(unique(us.cities$country.etc)), hr())
),
# Main Panel
mainPanel(
tabsetPanel(type="tab",
tabPanel("capital",verbatimTextOutput("capital")),
tabPanel("cities", tableOutput("cities")),
tabPanel("about", pre(includeText("include.txt")))
)
)
)
)
) |
#!/usr/bin/Rscript
LIMIT <<- 2e6
nums <<- rep(NA, LIMIT)
nums[1] <- FALSE
for (num in 2:LIMIT) {
if (!is.na(nums[num])) { next }
nums[num] <- TRUE
if (num < LIMIT / 2) {
nums[seq(num*2, LIMIT, num)] <- FALSE
}
}
cat(sum(as.numeric(which(nums))), "\n")
| /r/010.r | no_license | darshan-/euler | R | false | false | 279 | r | #!/usr/bin/Rscript
LIMIT <<- 2e6
nums <<- rep(NA, LIMIT)
nums[1] <- FALSE
for (num in 2:LIMIT) {
if (!is.na(nums[num])) { next }
nums[num] <- TRUE
if (num < LIMIT / 2) {
nums[seq(num*2, LIMIT, num)] <- FALSE
}
}
cat(sum(as.numeric(which(nums))), "\n")
|
# prediction with gam
predict.scores.gam=function(Y,discretspace,map
,formula="~s(F1,2)+s(F2,2)+s(F1*F2,2)"
,pred.na=FALSE){
## map is a data.frame with F1 and F2 obtained after DR on the explained data Y
notespr= nbconsos=matrix(0,nrow(discretspace),ncol(discretspace))
regs=vector("list",ncol(Y))
# preference=array(0,dim=c(nrow(discretspace),ncol(discretspace),ncol(X)))
pred.conso=preference=matrix(0,nrow(discretspace),ncol(Y))
## Firts we preform all regressions
nb.NA=vector("list",ncol(Y))
pos.NA=vector("list",ncol(Y))
nbconsos=c()
for(j in 1:ncol(Y)){
map.reg=cbind.data.frame(Y[,j],map)
colnames(map.reg)[1]="Conso"
modele=as.formula(paste("Conso",formula))
regs[[j]]=gam(modele,data=map.reg)
pred.conso[,j]=predict(regs[[j]],newdata=discretspace)
if (pred.na==TRUE) {
x=pred.conso[,j]
x[x<0]=NA
x[x>10]=NA
pred.conso[,j]=x
x=as.data.frame(x)
nb.NA[[j]] <- apply(x,2,function(a) sum(is.na(a)))# nbre de NA pour chaque conso
pos.NA[[j]]=which(is.na(x))# le point qui contient NA pour chaque consom
occur.NA <- unlist(pos.NA)
occur.NA=as.vector(occur.NA)
occur.NA=as.data.frame(table(occur.NA))# nbre de NA en chaque point du plan pour tous les consos
}
else {
nb.NA=0
pos.NA=0
occur.NA=0
}
preference[,j]=(pred.conso[,j]> mean(Y[,j]))
}
return(list(regression=regs,pred.conso=pred.conso,preference=preference,nb.NA=nb.NA,
pos.NA=pos.NA, occur.NA=occur.NA))
}
| /R/GAM_model.R | no_license | garthtarr/SensMap | R | false | false | 1,586 | r | # prediction with gam
predict.scores.gam=function(Y,discretspace,map
,formula="~s(F1,2)+s(F2,2)+s(F1*F2,2)"
,pred.na=FALSE){
## map is a data.frame with F1 and F2 obtained after DR on the explained data Y
notespr= nbconsos=matrix(0,nrow(discretspace),ncol(discretspace))
regs=vector("list",ncol(Y))
# preference=array(0,dim=c(nrow(discretspace),ncol(discretspace),ncol(X)))
pred.conso=preference=matrix(0,nrow(discretspace),ncol(Y))
## Firts we preform all regressions
nb.NA=vector("list",ncol(Y))
pos.NA=vector("list",ncol(Y))
nbconsos=c()
for(j in 1:ncol(Y)){
map.reg=cbind.data.frame(Y[,j],map)
colnames(map.reg)[1]="Conso"
modele=as.formula(paste("Conso",formula))
regs[[j]]=gam(modele,data=map.reg)
pred.conso[,j]=predict(regs[[j]],newdata=discretspace)
if (pred.na==TRUE) {
x=pred.conso[,j]
x[x<0]=NA
x[x>10]=NA
pred.conso[,j]=x
x=as.data.frame(x)
nb.NA[[j]] <- apply(x,2,function(a) sum(is.na(a)))# nbre de NA pour chaque conso
pos.NA[[j]]=which(is.na(x))# le point qui contient NA pour chaque consom
occur.NA <- unlist(pos.NA)
occur.NA=as.vector(occur.NA)
occur.NA=as.data.frame(table(occur.NA))# nbre de NA en chaque point du plan pour tous les consos
}
else {
nb.NA=0
pos.NA=0
occur.NA=0
}
preference[,j]=(pred.conso[,j]> mean(Y[,j]))
}
return(list(regression=regs,pred.conso=pred.conso,preference=preference,nb.NA=nb.NA,
pos.NA=pos.NA, occur.NA=occur.NA))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iam_operations.R
\name{iam_create_instance_profile}
\alias{iam_create_instance_profile}
\title{Creates a new instance profile}
\usage{
iam_create_instance_profile(InstanceProfileName, Path)
}
\arguments{
\item{InstanceProfileName}{[required] The name of the instance profile to create.
This parameter allows (through its \href{https://en.wikipedia.org/wiki/Regex}{regex pattern}) a string of characters
consisting of upper and lowercase alphanumeric characters with no
spaces. You can also include any of the following characters: \\_+=,.@-}
\item{Path}{The path to the instance profile. For more information about paths, see
\href{https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html}{IAM Identifiers}
in the \emph{IAM User Guide}.
This parameter is optional. If it is not included, it defaults to a
slash (/).
This parameter allows (through its \href{https://en.wikipedia.org/wiki/Regex}{regex pattern}) a string of characters
consisting of either a forward slash (/) by itself or a string that must
begin and end with forward slashes. In addition, it can contain any
ASCII character from the ! (\code{U+0021}) through the DEL character
(\verb{U+007F}), including most punctuation characters, digits, and upper and
lowercased letters.}
}
\description{
Creates a new instance profile. For information about instance profiles,
go to \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html}{About Instance Profiles}.
The number and size of IAM resources in an AWS account are limited. For
more information, see \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html}{IAM and STS Quotas}
in the \emph{IAM User Guide}.
}
\section{Request syntax}{
\preformatted{svc$create_instance_profile(
InstanceProfileName = "string",
Path = "string"
)
}
}
\examples{
\dontrun{
# The following command creates an instance profile named Webserver that
# is ready to have a role attached and then be associated with an EC2
# instance.
svc$create_instance_profile(
InstanceProfileName = "Webserver"
)
}
}
\keyword{internal}
| /cran/paws.security.identity/man/iam_create_instance_profile.Rd | permissive | sanchezvivi/paws | R | false | true | 2,196 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iam_operations.R
\name{iam_create_instance_profile}
\alias{iam_create_instance_profile}
\title{Creates a new instance profile}
\usage{
iam_create_instance_profile(InstanceProfileName, Path)
}
\arguments{
\item{InstanceProfileName}{[required] The name of the instance profile to create.
This parameter allows (through its \href{https://en.wikipedia.org/wiki/Regex}{regex pattern}) a string of characters
consisting of upper and lowercase alphanumeric characters with no
spaces. You can also include any of the following characters: \\_+=,.@-}
\item{Path}{The path to the instance profile. For more information about paths, see
\href{https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html}{IAM Identifiers}
in the \emph{IAM User Guide}.
This parameter is optional. If it is not included, it defaults to a
slash (/).
This parameter allows (through its \href{https://en.wikipedia.org/wiki/Regex}{regex pattern}) a string of characters
consisting of either a forward slash (/) by itself or a string that must
begin and end with forward slashes. In addition, it can contain any
ASCII character from the ! (\code{U+0021}) through the DEL character
(\verb{U+007F}), including most punctuation characters, digits, and upper and
lowercased letters.}
}
\description{
Creates a new instance profile. For information about instance profiles,
go to \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html}{About Instance Profiles}.
The number and size of IAM resources in an AWS account are limited. For
more information, see \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html}{IAM and STS Quotas}
in the \emph{IAM User Guide}.
}
\section{Request syntax}{
\preformatted{svc$create_instance_profile(
InstanceProfileName = "string",
Path = "string"
)
}
}
\examples{
\dontrun{
# The following command creates an instance profile named Webserver that
# is ready to have a role attached and then be associated with an EC2
# instance.
svc$create_instance_profile(
InstanceProfileName = "Webserver"
)
}
}
\keyword{internal}
|
call(
a,
b, c
)
call(
a,
b,
c
)
call(a, )
call(a, )
call(
a,
)
| /tests/testthat/line_breaks_and_other/comma-out.R | no_license | miraisolutions/styler | R | false | false | 78 | r | call(
a,
b, c
)
call(
a,
b,
c
)
call(a, )
call(a, )
call(
a,
)
|
###########################################################################################
# Script name : desc_stat.R
# Created By : Rebecca Holsapple
# Created Dt : 07/26/2018
# Description : R code creates descriptive statistics and displays them in a table
# Source File : This script uses the source file empdata.tidy
###########################################################################################
##@knitr desc_stat
##use library(psych) for describe function to get descriptive statistics
empdata.tidy.des<- (empdata.tidy[,c("Age","Gender","Education","MaritalStat",
"DailyRate", "HourlyRate", "MonthlyInc", "MonthlyRate",
"JobRole")])
des <- describe(empdata.tidy.des)[,c(3:5, 8:10, 13)]
empdata.des <- print(des, digit=2)
##use library(kable) for a nice table of the descriptive statistics
kable(empdata.des, format = "markdown")
##use library(ggplot2) for histogram of the Employee Monthly Income
ggplot(data=empdata.tidy, aes(MonthlyInc)) +
geom_histogram(breaks=seq(1000, 20000, by = 2000),
col="red",
fill="green",
alpha=.2) +
labs(title="Monthly Income of Employees", x="Monthly Income (In Thousands of Dollars)", y="Count")+
theme(plot.title = element_text(hjust = 0.5))
##Historgram of the Employee Ages visually describe the company population
ggplot(data=empdata.tidy, aes(Age)) +
geom_histogram(breaks=seq(18, 62, by = 2),
col="blue",
fill="purple",
alpha=.2) +
labs(title="Age of Employees", x="Age (In Years)", y="Count")+
theme(plot.title = element_text(hjust = 0.5))
##Create Frequency tables for Gender, Education and Occupation (JobRoles) Using library(summarytools)
summarytools::freq(empdata.tidy$Gender, order = "freq")
edu <- matrix(c(sum(empdata.tidy$Education == "1"),
sum(empdata.tidy$Education == "2"),
sum(empdata.tidy$Education == "3"),
sum(empdata.tidy$Education == "4"),
sum(empdata.tidy$Education == "5")),ncol=1,byrow=TRUE)
rownames(edu) <- c("Below College", "College", "Bachelor", "Master", "Doctor")
colnames(edu) <- c("Sums")
edu
summarytools::freq(empdata.tidy$JobRole, order = "freq")
##Management count and table
manage <- empdata.tidy %>%
filter(empdata.tidy$JobLevel=="5")
management <- table(as.character(manage$JobRole))
addmargins(management)
| /R/desc_stat.R | no_license | tanvi-arora/talentmgmt | R | false | false | 2,551 | r | ###########################################################################################
# Script name : desc_stat.R
# Created By : Rebecca Holsapple
# Created Dt : 07/26/2018
# Description : R code creates descriptive statistics and displays them in a table
# Source File : This script uses the source file empdata.tidy
###########################################################################################
##@knitr desc_stat
##use library(psych) for describe function to get descriptive statistics
empdata.tidy.des<- (empdata.tidy[,c("Age","Gender","Education","MaritalStat",
"DailyRate", "HourlyRate", "MonthlyInc", "MonthlyRate",
"JobRole")])
des <- describe(empdata.tidy.des)[,c(3:5, 8:10, 13)]
empdata.des <- print(des, digit=2)
##use library(kable) for a nice table of the descriptive statistics
kable(empdata.des, format = "markdown")
##use library(ggplot2) for histogram of the Employee Monthly Income
ggplot(data=empdata.tidy, aes(MonthlyInc)) +
geom_histogram(breaks=seq(1000, 20000, by = 2000),
col="red",
fill="green",
alpha=.2) +
labs(title="Monthly Income of Employees", x="Monthly Income (In Thousands of Dollars)", y="Count")+
theme(plot.title = element_text(hjust = 0.5))
##Historgram of the Employee Ages visually describe the company population
ggplot(data=empdata.tidy, aes(Age)) +
geom_histogram(breaks=seq(18, 62, by = 2),
col="blue",
fill="purple",
alpha=.2) +
labs(title="Age of Employees", x="Age (In Years)", y="Count")+
theme(plot.title = element_text(hjust = 0.5))
##Create Frequency tables for Gender, Education and Occupation (JobRoles) Using library(summarytools)
summarytools::freq(empdata.tidy$Gender, order = "freq")
edu <- matrix(c(sum(empdata.tidy$Education == "1"),
sum(empdata.tidy$Education == "2"),
sum(empdata.tidy$Education == "3"),
sum(empdata.tidy$Education == "4"),
sum(empdata.tidy$Education == "5")),ncol=1,byrow=TRUE)
rownames(edu) <- c("Below College", "College", "Bachelor", "Master", "Doctor")
colnames(edu) <- c("Sums")
edu
summarytools::freq(empdata.tidy$JobRole, order = "freq")
##Management count and table
manage <- empdata.tidy %>%
filter(empdata.tidy$JobLevel=="5")
management <- table(as.character(manage$JobRole))
addmargins(management)
|
#Q1
file.info("en_US.blogs.txt")$size/1024/2
#102617.2
#Q2
twitter<-readLines("en_US.twitter.txt")
length(twitter)
#2360148
#Q3
max(nchar(twitter))
#140
blogs<-readLines("en_US.blogs.txt")
max(nchar(blogs))
#40833
news<-readLines("en_US.news.txt")
max(nchar(news) )
#11384
#Q4
love<-sum(grepl("love",twitter))
hate<-sum(grepl("hate",twitter))
love_count/hate_count
#4.108592
#Q5
biostats<-grep("biostats",twitter)
twitter[biostats]
# I know how you feel..i have biostats on tuesday and i have yet to
#study=/"
#Q6
sum(grepl("A computer once beat me at chess, but it was no match for me at kickboxing", twitter))
| /quiz1.R | no_license | wenlarry/dsCapstone | R | false | false | 634 | r |
#Q1
file.info("en_US.blogs.txt")$size/1024/2
#102617.2
#Q2
twitter<-readLines("en_US.twitter.txt")
length(twitter)
#2360148
#Q3
max(nchar(twitter))
#140
blogs<-readLines("en_US.blogs.txt")
max(nchar(blogs))
#40833
news<-readLines("en_US.news.txt")
max(nchar(news) )
#11384
#Q4
love<-sum(grepl("love",twitter))
hate<-sum(grepl("hate",twitter))
love_count/hate_count
#4.108592
#Q5
biostats<-grep("biostats",twitter)
twitter[biostats]
# I know how you feel..i have biostats on tuesday and i have yet to
#study=/"
#Q6
sum(grepl("A computer once beat me at chess, but it was no match for me at kickboxing", twitter))
|
# Write out LaTeX for nominal roll
nominalRoll <- function(course, date, rooms, institution="THE UNIVERSITY OF WESTERN ONTARIO",
file = "nominal.tex") {
out <- file("nominal.tex", "wt")
on.exit(close(out))
cat('
\\documentclass[12pt]{article}
\\usepackage{fancyhdr}
\\renewcommand{\\arraystretch}{1.4}
\\setlength{\\oddsidemargin}{-0.5in}
\\setlength{\\evensidemargin}{-0.5in}
\\setlength{\\textwidth}{7.5in}
\\setlength{\\topmargin}{-0.5in}
\\setlength{\\textheight}{9in}
\\pagestyle{fancy}
\\lhead{', institution, '}
\\rhead{NOMINAL ROLL}
\\cfoot{}
\\begin{document}
', file=out)
for (room in names(rooms)) {
for (row in seq_len(rooms[room])) {
cat('\\noindent ', course, '\\hfill', date, '\\hfill', room,
'\\vspace{0.2in} \\\\
Proctor: \\underline{\\hfill}
\\vspace{0.2in} \\\\
Row number in exam room: ', row, '\\\\
\\ \\\\
\\vspace{0.2in}\\begin{tabular}{|c|c|c|c|c|c|}
\\hline
Seat & Print name & Signature & Student No. & ID & EXAM \\\\
& \\hspace{1.8in} & \\hspace{1.8in} & \\hspace{1.8in} & chk & recd \\\\
\\hline
', file=out)
for (i in 1:20)
cat(i, ' & & & & & \\\\
\\hline
', file=out)
cat('
\\end{tabular}\\\\
\\vspace{0.1in}Note irregularities here:
\\newpage', file=out)
}
}
cat('\\end{document}', file=out)
}
# Test code:
# nominalRoll("SS 1024A", "October 17, 2011", c("WSC 248"=1)) | /Sweavetest/R/nominal.R | no_license | dmurdoch/Sweavetest | R | false | false | 1,391 | r | # Write out LaTeX for nominal roll
nominalRoll <- function(course, date, rooms, institution="THE UNIVERSITY OF WESTERN ONTARIO",
file = "nominal.tex") {
out <- file("nominal.tex", "wt")
on.exit(close(out))
cat('
\\documentclass[12pt]{article}
\\usepackage{fancyhdr}
\\renewcommand{\\arraystretch}{1.4}
\\setlength{\\oddsidemargin}{-0.5in}
\\setlength{\\evensidemargin}{-0.5in}
\\setlength{\\textwidth}{7.5in}
\\setlength{\\topmargin}{-0.5in}
\\setlength{\\textheight}{9in}
\\pagestyle{fancy}
\\lhead{', institution, '}
\\rhead{NOMINAL ROLL}
\\cfoot{}
\\begin{document}
', file=out)
for (room in names(rooms)) {
for (row in seq_len(rooms[room])) {
cat('\\noindent ', course, '\\hfill', date, '\\hfill', room,
'\\vspace{0.2in} \\\\
Proctor: \\underline{\\hfill}
\\vspace{0.2in} \\\\
Row number in exam room: ', row, '\\\\
\\ \\\\
\\vspace{0.2in}\\begin{tabular}{|c|c|c|c|c|c|}
\\hline
Seat & Print name & Signature & Student No. & ID & EXAM \\\\
& \\hspace{1.8in} & \\hspace{1.8in} & \\hspace{1.8in} & chk & recd \\\\
\\hline
', file=out)
for (i in 1:20)
cat(i, ' & & & & & \\\\
\\hline
', file=out)
cat('
\\end{tabular}\\\\
\\vspace{0.1in}Note irregularities here:
\\newpage', file=out)
}
}
cat('\\end{document}', file=out)
}
# Test code:
# nominalRoll("SS 1024A", "October 17, 2011", c("WSC 248"=1)) |
#
# This is the user-interface definition of a Shiny web application.
#
# Purpose: This Shiny application uses the mtcars data set to predict the miles per gallon (MPG)
# based on the users input speed which is used to determine the predicted gear of the car to
# perform the calculation.
library(shiny)
# Define UI for application
shinyUI(fluidPage(
# Application title
titlePanel("Predict MPG based on Forward Speed/Gear Ratio"),
# Sidebar with a slider input
sidebarLayout(
sidebarPanel(
sliderInput("sliderSpeed",
"What is the Speed of the car?",
min = 10,
max = 70,
value = 10),
checkboxInput("showModel1", "Show/Hide Model 1 (Red)", value = TRUE),
checkboxInput("showModel2", "Show/Hide Model 2 (Blue)", value = TRUE),
submitButton("Submit"),
h4("Model 1 - Predicted Miles Per Gallon based on Speed/Gear Ratio:"),
textOutput("pred1"),
h4("Model 2 - Predicted Mile Per Gallon based on One Gear Upshift Speed/Gear Ratio:"),
textOutput("pred2")
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(
tabPanel("Plot", plotOutput("Plot1")),
tabPanel("Documentation", verbatimTextOutput("Documentation"))
)
)
)
)
)
| /ui.R | no_license | harmondmm/DevelopingDataProductsShinyApplication | R | false | false | 1,462 | r | #
# This is the user-interface definition of a Shiny web application.
#
# Purpose: This Shiny application uses the mtcars data set to predict the miles per gallon (MPG)
# based on the users input speed which is used to determine the predicted gear of the car to
# perform the calculation.
library(shiny)
# Define UI for application
shinyUI(fluidPage(
# Application title
titlePanel("Predict MPG based on Forward Speed/Gear Ratio"),
# Sidebar with a slider input
sidebarLayout(
sidebarPanel(
sliderInput("sliderSpeed",
"What is the Speed of the car?",
min = 10,
max = 70,
value = 10),
checkboxInput("showModel1", "Show/Hide Model 1 (Red)", value = TRUE),
checkboxInput("showModel2", "Show/Hide Model 2 (Blue)", value = TRUE),
submitButton("Submit"),
h4("Model 1 - Predicted Miles Per Gallon based on Speed/Gear Ratio:"),
textOutput("pred1"),
h4("Model 2 - Predicted Mile Per Gallon based on One Gear Upshift Speed/Gear Ratio:"),
textOutput("pred2")
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(
tabPanel("Plot", plotOutput("Plot1")),
tabPanel("Documentation", verbatimTextOutput("Documentation"))
)
)
)
)
)
|
#!/usr/bin/env Rscript
#
# Created by: Peichen Li
# Last Updated: 07/31/2021
#
# Purpose:
#
# This Rscript file is for getting 79 trades for each Stock in daily TAQ
# Start from 9:30AM, Pick the last trade for each 5-min time interval, until the last trade for the 3:50PM - 3:55PM time interval
# For the last few minutes (3:55PM - 4:05PM), we pick two trades
# One is the Closing Option (the first trade whose TR_SCOND contains character "6") and the other is the trade before the Closing Option
# Thus 79 trades in total
#
# Inputs:
#
# 1. NYSE daily TAQ data "ctm_YYYYMMDD.sas7bdat" (SAS), 2012 to 2020
# 2. "taq_location_HalfTrading.csv"
#
# Outputs:
#
# taq_79 "ctm_YYYYMMDD.csv"
#
# Steps:
# 1. Import the daily TAQ data of SAS format
# 2. Process last 5 min (before_co + closing_options = 2 trades)
# 3. Process 9:30am - 3:55pm (77)
# 4. taq_all = 79 rows, including variables:
# 5. Date | SYM_ROOT | TIME_M | Index | PRICE | SIZE | Volume_5min
# 6. fwrite to csv
# Process half day trading (ends around 13:00)
# Generally 0703, Black Friday and 1224 for each year
# Method: align to the right and fill empty with NA
# This is for taq_79
# Append_taq_20 should be the same (but only rebalance days == half trading days)
library(haven)
library(data.table)
library(tidyverse)
args <- commandArgs(trailingOnly = TRUE)
# taq_location_HalfTrading
file_loc = read.csv('/ocean/projects/ses190002p/peichen3/taq_location_HalfTrading.csv')
file_sas = toString(file_loc$file_location[as.numeric(args)])
file_name <- regmatches(file_sas, regexpr("ctm_[0-9]+", file_sas))
file_csv = paste("/ocean/projects/ses190002p/peichen3/taq_79/", file_name, ".csv", sep="")
# Define function
half_taq_5min_79 <- function(file) {
############################
# 1. Process the daily TAQ #
############################
# Import data and variables
DATE = read_sas(file, n_max = 1)$DATE
taq <- as.data.table(read_sas(file, col_select = c("TIME_M", "SYM_ROOT", "EX", "SYM_SUFFIX", "SIZE", "PRICE", "TR_SCOND")))[SYM_SUFFIX == "", ]
# Calculate "TIME_5min_nth", which stands for n-th 5min time interval
taq$TIME_5min_nth <- as.numeric(unlist(taq$TIME_M)) %/% 300
# Calculate "TIME_5min_remainder", which stands for the position in each 5min time interval
taq$TIME_5min_position <- as.numeric(unlist(taq$TIME_M)) %% 300
# Take out closing options (TR_COND contains "6" & the larger size one)
closing_options <- taq[grepl("6", TR_SCOND) == TRUE] %>%
group_by(SYM_ROOT) %>%
arrange(-SIZE, .by_group = TRUE) %>%
slice(1)
# Quit if not half trading day
if (all(closing_options$TIME_5min_nth < 190)) {print("Half trading day, continue!")
} else { #continue the script
stop("Complete trading day, stop!")
}
# Release
rm(closing_options)
gc()
###########################
# 2. Process last 5min #
###########################
# taq after 12:55pm
# label with closing_option
# label with first_closing_option
# label with id_within_tick
# Align to right (1pm to 4pm)
taq_post1255_label <- as.data.table(taq)[TIME_5min_nth >= 155, ] %>%
mutate(closing_option = ifelse(grepl("6", TR_SCOND), 1, 0)) %>%
group_by(SYM_ROOT) %>%
arrange(as.numeric(unlist(TIME_M)), .by_group = TRUE) %>%
mutate(first_closing_option = closing_option== 1 & !duplicated(closing_option == 1)) %>%
mutate(id_within_tick = row_number())
# 12:55pm to 3:55pm
before_closing_options <- as.data.table(taq_post1255_label)[,if(1 %in% first_closing_option) .SD[1:min(which(1==first_closing_option)) - 1],by=SYM_ROOT] %>%
group_by(SYM_ROOT) %>%
arrange(as.numeric(unlist(TIME_M)), .by_group = TRUE) %>%
mutate(Volume_5min = sum(SIZE)) %>%
slice(n()) %>%
mutate(TIME_5min_nth = 191) %>%
select(-closing_option, -first_closing_option, -id_within_tick)
# 1:00pm to 4:00pm
closing_options <- as.data.table(taq_post1255_label)[,if(1 %in% first_closing_option) .SD[1:min(which(TRUE==first_closing_option))],by=SYM_ROOT] %>%
group_by(SYM_ROOT) %>%
arrange(as.numeric(unlist(TIME_M)), .by_group = TRUE) %>%
mutate(Volume_5min = sum(SIZE)) %>%
slice(n()) %>%
mutate(TIME_5min_nth = 192) %>%
mutate(Volume_5min = SIZE) %>%
select(-closing_option, -first_closing_option, -id_within_tick)
last_5min <- rbind(before_closing_options, closing_options)
# Release
rm(taq_post1255_label)
rm(before_closing_options)
rm(closing_options)
gc()
#################################
## 2.Process 9:30am - 12:55pm ##
#################################
# create 79 time slots for each ticker
taq_5min_79 <- as.data.table(taq) %>%
select(SYM_ROOT) %>%
group_by(SYM_ROOT) %>%
slice(1) %>%
mutate(freq = 79) %>%
slice(rep(seq_len(n()), freq)) %>%
select(-freq) %>%
group_by(SYM_ROOT) %>%
mutate(Index = row_number()) %>%
mutate(TIME_5min_nth = Index + 113)
# Filter for intraday trading (after 9:30am and before 12:55pm)
# Group by Ticker and each 5min time interval
# and then pick the last trade record
# Add trading volume
taq <- as.data.table(taq)[TIME_5min_nth %in% 114:154, ] %>%
group_by(SYM_ROOT, TIME_5min_nth) %>%
mutate(Volume_5min = sum(SIZE)) %>%
arrange(-TIME_5min_position, .by_group = TRUE) %>%
slice(1) %>%
ungroup()
# Align to right (e.g. 12:55pm to 3:55pm, so TIME_5min_nth +36 )
taq$TIME_5min_nth <- taq$TIME_5min_nth + 36
#######################################
###### taq all day 79 x 5min #####
#######################################
# Append the tables
taq_all <- rbind(taq, last_5min) %>%
group_by(SYM_ROOT) %>%
arrange(as.numeric(unlist(TIME_M)), .by_group = TRUE) %>%
select(SYM_ROOT, TIME_M, TIME_5min_nth, PRICE, SIZE, Volume_5min) %>%
as.data.table()
# left join for 79 x 5min for each ticker
taq_79 <- left_join(taq_5min_79, taq_all, by = c("SYM_ROOT", "TIME_5min_nth"), suffix = c(".x", ".y")) %>%
mutate(Date = DATE) %>%
select(Date, SYM_ROOT, TIME_M, TIME_5min_nth, Index, PRICE, SIZE, Volume_5min)
# Release
rm(taq)
rm(last_5min)
gc()
# write to disk
fwrite(taq_79, file = file_csv, row.names = FALSE)
print(paste(file_csv, "written", sep=" "))
rm(taq_all, taq_5min_79)
gc()
return(1)
}
# Apply function
half_taq_5min_79(file_sas)
| /half_taq_5min_79.r | no_license | peichenli3/R-Coding-Sample | R | false | false | 6,452 | r | #!/usr/bin/env Rscript
#
# Created by: Peichen Li
# Last Updated: 07/31/2021
#
# Purpose:
#
# This Rscript file is for getting 79 trades for each Stock in daily TAQ
# Start from 9:30AM, Pick the last trade for each 5-min time interval, until the last trade for the 3:50PM - 3:55PM time interval
# For the last few minutes (3:55PM - 4:05PM), we pick two trades
# One is the Closing Option (the first trade whose TR_SCOND contains character "6") and the other is the trade before the Closing Option
# Thus 79 trades in total
#
# Inputs:
#
# 1. NYSE daily TAQ data "ctm_YYYYMMDD.sas7bdat" (SAS), 2012 to 2020
# 2. "taq_location_HalfTrading.csv"
#
# Outputs:
#
# taq_79 "ctm_YYYYMMDD.csv"
#
# Steps:
# 1. Import the daily TAQ data of SAS format
# 2. Process last 5 min (before_co + closing_options = 2 trades)
# 3. Process 9:30am - 3:55pm (77)
# 4. taq_all = 79 rows, including variables:
# 5. Date | SYM_ROOT | TIME_M | Index | PRICE | SIZE | Volume_5min
# 6. fwrite to csv
# Process half day trading (ends around 13:00)
# Generally 0703, Black Friday and 1224 for each year
# Method: align to the right and fill empty with NA
# This is for taq_79
# Append_taq_20 should be the same (but only rebalance days == half trading days)
library(haven)
library(data.table)
library(tidyverse)
args <- commandArgs(trailingOnly = TRUE)
# taq_location_HalfTrading
file_loc = read.csv('/ocean/projects/ses190002p/peichen3/taq_location_HalfTrading.csv')
file_sas = toString(file_loc$file_location[as.numeric(args)])
file_name <- regmatches(file_sas, regexpr("ctm_[0-9]+", file_sas))
file_csv = paste("/ocean/projects/ses190002p/peichen3/taq_79/", file_name, ".csv", sep="")
# Define function
half_taq_5min_79 <- function(file) {
############################
# 1. Process the daily TAQ #
############################
# Import data and variables
DATE = read_sas(file, n_max = 1)$DATE
taq <- as.data.table(read_sas(file, col_select = c("TIME_M", "SYM_ROOT", "EX", "SYM_SUFFIX", "SIZE", "PRICE", "TR_SCOND")))[SYM_SUFFIX == "", ]
# Calculate "TIME_5min_nth", which stands for n-th 5min time interval
taq$TIME_5min_nth <- as.numeric(unlist(taq$TIME_M)) %/% 300
# Calculate "TIME_5min_remainder", which stands for the position in each 5min time interval
taq$TIME_5min_position <- as.numeric(unlist(taq$TIME_M)) %% 300
# Take out closing options (TR_COND contains "6" & the larger size one)
closing_options <- taq[grepl("6", TR_SCOND) == TRUE] %>%
group_by(SYM_ROOT) %>%
arrange(-SIZE, .by_group = TRUE) %>%
slice(1)
# Quit if not half trading day
if (all(closing_options$TIME_5min_nth < 190)) {print("Half trading day, continue!")
} else { #continue the script
stop("Complete trading day, stop!")
}
# Release
rm(closing_options)
gc()
###########################
# 2. Process last 5min #
###########################
# taq after 12:55pm
# label with closing_option
# label with first_closing_option
# label with id_within_tick
# Align to right (1pm to 4pm)
taq_post1255_label <- as.data.table(taq)[TIME_5min_nth >= 155, ] %>%
mutate(closing_option = ifelse(grepl("6", TR_SCOND), 1, 0)) %>%
group_by(SYM_ROOT) %>%
arrange(as.numeric(unlist(TIME_M)), .by_group = TRUE) %>%
mutate(first_closing_option = closing_option== 1 & !duplicated(closing_option == 1)) %>%
mutate(id_within_tick = row_number())
# 12:55pm to 3:55pm
before_closing_options <- as.data.table(taq_post1255_label)[,if(1 %in% first_closing_option) .SD[1:min(which(1==first_closing_option)) - 1],by=SYM_ROOT] %>%
group_by(SYM_ROOT) %>%
arrange(as.numeric(unlist(TIME_M)), .by_group = TRUE) %>%
mutate(Volume_5min = sum(SIZE)) %>%
slice(n()) %>%
mutate(TIME_5min_nth = 191) %>%
select(-closing_option, -first_closing_option, -id_within_tick)
# 1:00pm to 4:00pm
closing_options <- as.data.table(taq_post1255_label)[,if(1 %in% first_closing_option) .SD[1:min(which(TRUE==first_closing_option))],by=SYM_ROOT] %>%
group_by(SYM_ROOT) %>%
arrange(as.numeric(unlist(TIME_M)), .by_group = TRUE) %>%
mutate(Volume_5min = sum(SIZE)) %>%
slice(n()) %>%
mutate(TIME_5min_nth = 192) %>%
mutate(Volume_5min = SIZE) %>%
select(-closing_option, -first_closing_option, -id_within_tick)
last_5min <- rbind(before_closing_options, closing_options)
# Release
rm(taq_post1255_label)
rm(before_closing_options)
rm(closing_options)
gc()
#################################
## 2.Process 9:30am - 12:55pm ##
#################################
# create 79 time slots for each ticker
taq_5min_79 <- as.data.table(taq) %>%
select(SYM_ROOT) %>%
group_by(SYM_ROOT) %>%
slice(1) %>%
mutate(freq = 79) %>%
slice(rep(seq_len(n()), freq)) %>%
select(-freq) %>%
group_by(SYM_ROOT) %>%
mutate(Index = row_number()) %>%
mutate(TIME_5min_nth = Index + 113)
# Filter for intraday trading (after 9:30am and before 12:55pm)
# Group by Ticker and each 5min time interval
# and then pick the last trade record
# Add trading volume
taq <- as.data.table(taq)[TIME_5min_nth %in% 114:154, ] %>%
group_by(SYM_ROOT, TIME_5min_nth) %>%
mutate(Volume_5min = sum(SIZE)) %>%
arrange(-TIME_5min_position, .by_group = TRUE) %>%
slice(1) %>%
ungroup()
# Align to right (e.g. 12:55pm to 3:55pm, so TIME_5min_nth +36 )
taq$TIME_5min_nth <- taq$TIME_5min_nth + 36
#######################################
###### taq all day 79 x 5min #####
#######################################
# Append the tables
taq_all <- rbind(taq, last_5min) %>%
group_by(SYM_ROOT) %>%
arrange(as.numeric(unlist(TIME_M)), .by_group = TRUE) %>%
select(SYM_ROOT, TIME_M, TIME_5min_nth, PRICE, SIZE, Volume_5min) %>%
as.data.table()
# left join for 79 x 5min for each ticker
taq_79 <- left_join(taq_5min_79, taq_all, by = c("SYM_ROOT", "TIME_5min_nth"), suffix = c(".x", ".y")) %>%
mutate(Date = DATE) %>%
select(Date, SYM_ROOT, TIME_M, TIME_5min_nth, Index, PRICE, SIZE, Volume_5min)
# Release
rm(taq)
rm(last_5min)
gc()
# write to disk
fwrite(taq_79, file = file_csv, row.names = FALSE)
print(paste(file_csv, "written", sep=" "))
rm(taq_all, taq_5min_79)
gc()
return(1)
}
# Apply function
half_taq_5min_79(file_sas)
|
print.arfima <-
function(x, ...)
{
cat("Call: ")
print(x$call)
cat("Estimation method: ")
cat(x$method)
cat("\n")
cat("\n Parameter estimates:\n")
printCoefmat(summary(x)$coefmat, signif.stars = TRUE)
cat("\n Estimated standard deviation of residuals (ratio of periodogram and spectrum):\n")
cat(x$sd.innov)
cat("\n")
}
| /R/print.arfima.R | no_license | cran/afmtools | R | false | false | 375 | r | print.arfima <-
function(x, ...)
{
cat("Call: ")
print(x$call)
cat("Estimation method: ")
cat(x$method)
cat("\n")
cat("\n Parameter estimates:\n")
printCoefmat(summary(x)$coefmat, signif.stars = TRUE)
cat("\n Estimated standard deviation of residuals (ratio of periodogram and spectrum):\n")
cat(x$sd.innov)
cat("\n")
}
|
library("tm")
library("SnowballC")
library("wordcloud")
library("RColorBrewer")
library("quanteda")
electric_blue <- read.csv("electric_blue.csv")
electric_blue[which.max(electric_blue$likes),c("author", "text", "likes", "time", "numReplies")]
max(electric_blue$likes)
electric_blue[which.max(electric_blue$likes), "text"]
comments <- electric_blue$text
ddd <- unlist(comments, recursive = TRUE, use.names = TRUE)
ddddd <- as.vector(ddd)
is.vector(ddddd)
text.tmp <- system2("/Users/aidarzinnatullin/Downloads/mystem", c("-c", "-l", "-d"), input = ddddd, stdout = TRUE)
text.tmp
write.table(text.tmp, "text.txt", append = FALSE, sep = " ", dec = ".",
row.names = TRUE, col.names = TRUE)
# Choose necessary file
text <- readLines(file.choose())
docs <- Corpus(VectorSource(text))
inspect(docs)
toSpace <- content_transformer(function (x , pattern ) gsub(pattern, " ", x))
docs <- tm_map(docs, toSpace, "/")
#docs <- tm_map(docs, toSpace, "\")
docs <- tm_map(docs, toSpace, "—")
docs <- tm_map(docs, toSpace, "-")
docs <- tm_map(docs, toSpace, "–")
docs <- tm_map(docs, toSpace, "@")
docs <- tm_map(docs, toSpace, "\\|")
# Convert the text to lower case
docs <- tm_map(docs, content_transformer(tolower))
# Remove numbers
docs <- tm_map(docs, removeNumbers)
# Remove russian common stopwords
docs <- tm_map(docs, removeWords, stopwords("russian"))
docs <- tm_map(docs, removeWords, stopwords("english"))
# Remove your own stop word
# specify your stopwords as a character vector
docs <- tm_map(docs, removeWords, c("you","the","que","and","your","muito","this","that","are","for","cara",
"from","very","like","have","voce","man","one","nao","com","with","mais",
"was","can","uma","but","ficou","meu","really","seu","would","sua","more",
"it's","it","is","all","i'm","mas","como","just","make","what","esse","how",
"por","favor","sempre","time","esta","every","para","i've","tem","will",
"you're","essa","not","faz","pelo","than","about","acho","isso",
"way","also","aqui","been","out","say","should","when","did","mesmo",
"minha","next","cha","pra","sei","sure","too","das","fazer","made",
"quando","ver","cada","here","need","ter","don't","este","has","tambem",
"una","want","ate","can't","could","dia","fiquei","num","seus","tinha","vez",
"ainda","any","dos","even","get","must","other","sem","vai","agora","desde",
"dessa","fez","many","most","tao","then","tudo","vou","ficaria","foi","pela",
"see","teu","those","were"))
# Remove punctuations
docs <- tm_map(docs, removePunctuation)
# Eliminate extra white spaces
docs <- tm_map(docs, stripWhitespace)
docs_matrix <- TermDocumentMatrix(docs)
m <- as.matrix(docs_matrix)
v <- sort(rowSums(m),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
head(d, 10)
wordcloud(words = d$word, freq = d$freq,scale = c(4, .2), min.freq = 1,
max.words=120, random.order=FALSE, rot.per=.35,
colors=brewer.pal(8, "Dark2"))
barplot(d[1:10,]$freq, las = 1, axes = T, names.arg = d[1:10,]$word,
col = c("green", "red", "blue", "yellow", "orange", "lightblue", "lavender", "cornsilk", "lavender", "lightcyan"), main ="Наиболее употребляемые слова",
xlab = "Частота слов", horiz = T, cex.names = 0.8, cex.axis = 0.8, xlim= c(0,350))
#### Для словосочетаний
docs <- unlist(docs, recursive = TRUE, use.names = TRUE)
docs
write.table(docs, "text_colloc.txt", append = FALSE, sep = " ", dec = ".",
row.names = TRUE, col.names = TRUE)
text <- readLines(file.choose())
collocations <- textstat_collocations(text, size = 2:3)
wordcloud(words = collocations$collocation, freq = collocations$count,
scale=c(4,.6), min.freq = 25, max.words=Inf,
random.order=FALSE, rot.per=0.1, ordered.colors=FALSE,
random.color=TRUE, colors=brewer.pal(8, "Dark2"))
| /electric blue.R | no_license | aidar-zinnatullin/Arcade-Fire-s-Lyrics-and-Music-Analysis | R | false | false | 4,291 | r | library("tm")
library("SnowballC")
library("wordcloud")
library("RColorBrewer")
library("quanteda")
electric_blue <- read.csv("electric_blue.csv")
electric_blue[which.max(electric_blue$likes),c("author", "text", "likes", "time", "numReplies")]
max(electric_blue$likes)
electric_blue[which.max(electric_blue$likes), "text"]
comments <- electric_blue$text
ddd <- unlist(comments, recursive = TRUE, use.names = TRUE)
ddddd <- as.vector(ddd)
is.vector(ddddd)
text.tmp <- system2("/Users/aidarzinnatullin/Downloads/mystem", c("-c", "-l", "-d"), input = ddddd, stdout = TRUE)
text.tmp
write.table(text.tmp, "text.txt", append = FALSE, sep = " ", dec = ".",
row.names = TRUE, col.names = TRUE)
# Choose necessary file
text <- readLines(file.choose())
docs <- Corpus(VectorSource(text))
inspect(docs)
toSpace <- content_transformer(function (x , pattern ) gsub(pattern, " ", x))
docs <- tm_map(docs, toSpace, "/")
#docs <- tm_map(docs, toSpace, "\")
docs <- tm_map(docs, toSpace, "—")
docs <- tm_map(docs, toSpace, "-")
docs <- tm_map(docs, toSpace, "–")
docs <- tm_map(docs, toSpace, "@")
docs <- tm_map(docs, toSpace, "\\|")
# Convert the text to lower case
docs <- tm_map(docs, content_transformer(tolower))
# Remove numbers
docs <- tm_map(docs, removeNumbers)
# Remove russian common stopwords
docs <- tm_map(docs, removeWords, stopwords("russian"))
docs <- tm_map(docs, removeWords, stopwords("english"))
# Remove your own stop word
# specify your stopwords as a character vector
docs <- tm_map(docs, removeWords, c("you","the","que","and","your","muito","this","that","are","for","cara",
"from","very","like","have","voce","man","one","nao","com","with","mais",
"was","can","uma","but","ficou","meu","really","seu","would","sua","more",
"it's","it","is","all","i'm","mas","como","just","make","what","esse","how",
"por","favor","sempre","time","esta","every","para","i've","tem","will",
"you're","essa","not","faz","pelo","than","about","acho","isso",
"way","also","aqui","been","out","say","should","when","did","mesmo",
"minha","next","cha","pra","sei","sure","too","das","fazer","made",
"quando","ver","cada","here","need","ter","don't","este","has","tambem",
"una","want","ate","can't","could","dia","fiquei","num","seus","tinha","vez",
"ainda","any","dos","even","get","must","other","sem","vai","agora","desde",
"dessa","fez","many","most","tao","then","tudo","vou","ficaria","foi","pela",
"see","teu","those","were"))
# Remove punctuations
docs <- tm_map(docs, removePunctuation)
# Eliminate extra white spaces
docs <- tm_map(docs, stripWhitespace)
docs_matrix <- TermDocumentMatrix(docs)
m <- as.matrix(docs_matrix)
v <- sort(rowSums(m),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
head(d, 10)
wordcloud(words = d$word, freq = d$freq,scale = c(4, .2), min.freq = 1,
max.words=120, random.order=FALSE, rot.per=.35,
colors=brewer.pal(8, "Dark2"))
barplot(d[1:10,]$freq, las = 1, axes = T, names.arg = d[1:10,]$word,
col = c("green", "red", "blue", "yellow", "orange", "lightblue", "lavender", "cornsilk", "lavender", "lightcyan"), main ="Наиболее употребляемые слова",
xlab = "Частота слов", horiz = T, cex.names = 0.8, cex.axis = 0.8, xlim= c(0,350))
#### Для словосочетаний
docs <- unlist(docs, recursive = TRUE, use.names = TRUE)
docs
write.table(docs, "text_colloc.txt", append = FALSE, sep = " ", dec = ".",
row.names = TRUE, col.names = TRUE)
text <- readLines(file.choose())
collocations <- textstat_collocations(text, size = 2:3)
wordcloud(words = collocations$collocation, freq = collocations$count,
scale=c(4,.6), min.freq = 25, max.words=Inf,
random.order=FALSE, rot.per=0.1, ordered.colors=FALSE,
random.color=TRUE, colors=brewer.pal(8, "Dark2"))
|
library(shiny)
library(shinydashboard)
library(reshape2)
library(dplyr)
library(ggplot2)
source("./process.R")
ui <- dashboardPage(
skin = "black",
dashboardHeader(title = "Football App"),
dashboardSidebar(sidebarMenu(
collapsed = TRUE,
id = "tabs",
menuItem(
"Dashboard",
tabName = "dashboard",
icon = icon("dashboard")
)
)),
dashboardBody(tabItems(
tabItem(
"dashboard",
infoBox(
title = "Don't gamble.",
subtitle = "Make milions becoming a data scientist instead.
Click this and begin the journey!",
value = NULL,
icon = shiny::icon("exclamation-triangle"),
color = "red",
width = NULL,
href = "https://github.com/mini-pw/WizualizacjaDanych2018",
fill = FALSE
),
div("Choose the season"),
selectInput(
inputId = "chosen_season",
label = "Choose a season",
choices = levels(as.factor(team_results$season))
),
selectInput(
inputId = "chosen_team",
label = "Highlight a team",
choices = levels(as.factor(team_results$team))
),
actionButton(
style = "font-size: 10px;",
inputId = "sort_wins_button",
label = "Sort-A-Z",
icon = icon("sort-alpha-asc")
),
fluidRow(
column(5, plotOutput("wins_plot")),
column(5, plotOutput("points_plot")),
column(5, plotOutput("goals_plot")),
column(5, plotOutput("max_goals_in_match_plot")),
column(5, plotOutput("ties_plot"))
)
)
))
)
server <- function(session, input, output) {
data_r <- reactive({
team_results$selected = 0.5
team_results[team_results$team == input[["chosen_team"]], ]$selected = 1
team_results %>% arrange(wins) %>% filter(season == input[["chosen_season"]])
})
output[["wins_plot"]] <- renderPlot({
ggplot(data = data_r(),
aes(
x = reorder(data_r()$team, data_r()$wins),
y = data_r()$wins
),
alpha = 0.5) +
geom_text(
aes(label = data_r()$wins),
hjust = -0.1,
color = "black",
size = 4
) +
geom_bar(stat = "identity",
fill = "dodgerblue",
alpha = data_r()$selected) + coord_flip() +
xlab("Team") + ylab("wins") + ylim(c(0, max(data_r()$wins)+2)) +
ggtitle("Wins in the season") + theme_minimal()
})
output[["points_plot"]] <- renderPlot({
ggplot(data = data_r(), aes(x = reorder(data_r()$team, data_r()$points), y = data_r()$points)) +
geom_text(
aes(label = data_r()$points),
hjust = -0.1,
color = "black",
size = 4
) +
geom_bar(stat = "identity",
fill = "dodgerblue",
alpha = data_r()$selected) + coord_flip() +
xlab("Team") + ylab("points") +
ggtitle("Points acquired in the season") + theme_minimal()
})
output[["goals_plot"]] <- renderPlot({
ggplot(data = data_r(), aes(x = reorder(data_r()$team, data_r()$goals), y = data_r()$goals)) +
geom_text(
aes(label = data_r()$goals),
hjust = -0.1,
color = "black",
size = 4
) +
geom_bar(stat = "identity",
fill = "dodgerblue",
alpha = data_r()$selected) + coord_flip() +
xlab("Team") + ylab("goals") +
ggtitle("Goals scroed in the season") + theme_minimal()
})
output[["max_goals_in_match_plot"]] <- renderPlot({
ggplot(data = data_r(),
aes(x = reorder(data_r()$team, data_r()$max_goals_in_match), y = data_r()$max_goals_in_match)) +
geom_text(
aes(label = data_r()$max_goals_in_match),
hjust = -0.1,
color = "black",
size = 4
) +
geom_bar(stat = "identity",
fill = "dodgerblue",
alpha = data_r()$selected) + coord_flip() +
xlab("Team") + ylab("Max goals in one match") +
ggtitle("Max goals in one match in the season") + theme_minimal()
})
output[["ties_plot"]] <- renderPlot({
ggplot(data = data_r(),
aes(x = reorder(data_r()$team, data_r()$ties), y = data_r()$ties)) +
geom_text(
aes(label = data_r()$wins),
hjust = -0.1,
color = "black",
size = 4
) +
geom_bar(stat = "identity",
fill = "dodgerblue",
alpha = data_r()$selected) + coord_flip() +
xlab("Team") + ylab("Ties") + ylim(c(0, max(data_r()$ties)+2)) +
ggtitle("Ties in the season") + theme_minimal()
})
}
shinyApp(ui, server)
| /PraceDomowe/PD5/gr2/SawickiJan/app/app.R | no_license | ramusz1/WizualizacjaDanych2018 | R | false | false | 4,628 | r | library(shiny)
library(shinydashboard)
library(reshape2)
library(dplyr)
library(ggplot2)
source("./process.R")
ui <- dashboardPage(
skin = "black",
dashboardHeader(title = "Football App"),
dashboardSidebar(sidebarMenu(
collapsed = TRUE,
id = "tabs",
menuItem(
"Dashboard",
tabName = "dashboard",
icon = icon("dashboard")
)
)),
dashboardBody(tabItems(
tabItem(
"dashboard",
infoBox(
title = "Don't gamble.",
subtitle = "Make milions becoming a data scientist instead.
Click this and begin the journey!",
value = NULL,
icon = shiny::icon("exclamation-triangle"),
color = "red",
width = NULL,
href = "https://github.com/mini-pw/WizualizacjaDanych2018",
fill = FALSE
),
div("Choose the season"),
selectInput(
inputId = "chosen_season",
label = "Choose a season",
choices = levels(as.factor(team_results$season))
),
selectInput(
inputId = "chosen_team",
label = "Highlight a team",
choices = levels(as.factor(team_results$team))
),
actionButton(
style = "font-size: 10px;",
inputId = "sort_wins_button",
label = "Sort-A-Z",
icon = icon("sort-alpha-asc")
),
fluidRow(
column(5, plotOutput("wins_plot")),
column(5, plotOutput("points_plot")),
column(5, plotOutput("goals_plot")),
column(5, plotOutput("max_goals_in_match_plot")),
column(5, plotOutput("ties_plot"))
)
)
))
)
server <- function(session, input, output) {
data_r <- reactive({
team_results$selected = 0.5
team_results[team_results$team == input[["chosen_team"]], ]$selected = 1
team_results %>% arrange(wins) %>% filter(season == input[["chosen_season"]])
})
output[["wins_plot"]] <- renderPlot({
ggplot(data = data_r(),
aes(
x = reorder(data_r()$team, data_r()$wins),
y = data_r()$wins
),
alpha = 0.5) +
geom_text(
aes(label = data_r()$wins),
hjust = -0.1,
color = "black",
size = 4
) +
geom_bar(stat = "identity",
fill = "dodgerblue",
alpha = data_r()$selected) + coord_flip() +
xlab("Team") + ylab("wins") + ylim(c(0, max(data_r()$wins)+2)) +
ggtitle("Wins in the season") + theme_minimal()
})
output[["points_plot"]] <- renderPlot({
ggplot(data = data_r(), aes(x = reorder(data_r()$team, data_r()$points), y = data_r()$points)) +
geom_text(
aes(label = data_r()$points),
hjust = -0.1,
color = "black",
size = 4
) +
geom_bar(stat = "identity",
fill = "dodgerblue",
alpha = data_r()$selected) + coord_flip() +
xlab("Team") + ylab("points") +
ggtitle("Points acquired in the season") + theme_minimal()
})
output[["goals_plot"]] <- renderPlot({
ggplot(data = data_r(), aes(x = reorder(data_r()$team, data_r()$goals), y = data_r()$goals)) +
geom_text(
aes(label = data_r()$goals),
hjust = -0.1,
color = "black",
size = 4
) +
geom_bar(stat = "identity",
fill = "dodgerblue",
alpha = data_r()$selected) + coord_flip() +
xlab("Team") + ylab("goals") +
ggtitle("Goals scroed in the season") + theme_minimal()
})
output[["max_goals_in_match_plot"]] <- renderPlot({
ggplot(data = data_r(),
aes(x = reorder(data_r()$team, data_r()$max_goals_in_match), y = data_r()$max_goals_in_match)) +
geom_text(
aes(label = data_r()$max_goals_in_match),
hjust = -0.1,
color = "black",
size = 4
) +
geom_bar(stat = "identity",
fill = "dodgerblue",
alpha = data_r()$selected) + coord_flip() +
xlab("Team") + ylab("Max goals in one match") +
ggtitle("Max goals in one match in the season") + theme_minimal()
})
output[["ties_plot"]] <- renderPlot({
ggplot(data = data_r(),
aes(x = reorder(data_r()$team, data_r()$ties), y = data_r()$ties)) +
geom_text(
aes(label = data_r()$wins),
hjust = -0.1,
color = "black",
size = 4
) +
geom_bar(stat = "identity",
fill = "dodgerblue",
alpha = data_r()$selected) + coord_flip() +
xlab("Team") + ylab("Ties") + ylim(c(0, max(data_r()$ties)+2)) +
ggtitle("Ties in the season") + theme_minimal()
})
}
shinyApp(ui, server)
|
################# EJEMPLO TF-IDF ####################
## Nombre: frecuencia de términos y frecuencia inversa de documentos
##----------------------------------------------------------------------
## Objetivo: se Calcula el TF-IDF para la colección de documentos
## Usando la matriz de frecuencia de términos, se calcula el peso de idf.
## luego se halla el elbow method para determinar el k optimo
## y a partir de esto se aplica el k-means a los documentos obteniendo
## el resultado de cada cluster.
######################################################
#------------------------------------------------------------------------
#' paso 1: Importar las librerias necesarias
rm(list = ls())
library(cluster)
library(ggplot2)
library(factoextra)
library(tm)
#------------------------------------------------------------------------
#' paso 2: se crea un vector documentos de ejemplo
x <-
c(
"los planetas giran alrededor del sol",
"las agujas del reloj giran",
"las peonzas giran al igual que giran los planetas",
"los planetas y el sol son astros"
)
#------------------------------------------------------------------------
#' paso 3: Se define el corpus
doc_corpus <- Corpus(VectorSource(x))
#------------------------------------------------------------------------
#' paso 4: Pre-procesamiento da forma al corpus que permite analizar métodos estadísticos
#' la cual se realiza la respectiva limpieza
doc_corpus <- tm_map(doc_corpus , stripWhitespace)
doc_corpus <- tm_map(doc_corpus , removePunctuation)
doc_corpus <- tm_map(doc_corpus , removeWords, stopwords("spanish"))
doc_corpus <- tm_map(doc_corpus , stemDocument)
doc_corpus <- tm_map(doc_corpus , removeNumbers)
#------------------------------------------------------------------------
#' paso 5: La representación transforma el corpus de documentos a un espacio vectorial para procesarlos
dtm <-
DocumentTermMatrix(doc_corpus, control = list(weighting = weightTf))
#inspect(dtm)
Tf <- as.matrix(dtm)
#View(Tf)
dtm <-
DocumentTermMatrix(doc_corpus, control = list(weighting = weightTfIdf))
#inspect(dtm)
TfIdf <- as.matrix(dtm)
TfIdf <- t(TfIdf)
#------------------------------------------------------------------------
#' paso 6: Elbow method for k-means clustering
mydata <- TfIdf
k.max <- 4 # Maximal number of clusters
data <- mydata
wss <- sapply(1:k.max,
function(k) {
kmeans(data, k, nstart = 2)$tot.withinss
})
plot(
1:k.max,
wss,
type = "b",
pch = 19,
frame = FALSE,
xlab = "Number of clusters K",
ylab = "Total within-clusters sum of squares"
)
abline(v = 2, lty = 2)
#------------------------------------------------------------------------
#' paso 7: K-means
df <- data.frame(TfIdf)
df <- t(df)
distancias1<-dist(df,method="euclidean")
cluster1<-hclust(distancias1)
res <- kmeans(distancias1, 2)
#res
df <- data.frame(Tf)
#df
res <- kmeans(df, 2)
#res
#------------------------------------------------------------------------ | /TF_IDF.R | no_license | INDUSTRIALOPALO/mineria-de-texto | R | false | false | 3,115 | r | ################# EJEMPLO TF-IDF ####################
## Nombre: frecuencia de términos y frecuencia inversa de documentos
##----------------------------------------------------------------------
## Objetivo: se Calcula el TF-IDF para la colección de documentos
## Usando la matriz de frecuencia de términos, se calcula el peso de idf.
## luego se halla el elbow method para determinar el k optimo
## y a partir de esto se aplica el k-means a los documentos obteniendo
## el resultado de cada cluster.
######################################################
#------------------------------------------------------------------------
#' paso 1: Importar las librerias necesarias
rm(list = ls())
library(cluster)
library(ggplot2)
library(factoextra)
library(tm)
#------------------------------------------------------------------------
#' paso 2: se crea un vector documentos de ejemplo
x <-
c(
"los planetas giran alrededor del sol",
"las agujas del reloj giran",
"las peonzas giran al igual que giran los planetas",
"los planetas y el sol son astros"
)
#------------------------------------------------------------------------
#' paso 3: Se define el corpus
doc_corpus <- Corpus(VectorSource(x))
#------------------------------------------------------------------------
#' paso 4: Pre-procesamiento da forma al corpus que permite analizar métodos estadísticos
#' la cual se realiza la respectiva limpieza
doc_corpus <- tm_map(doc_corpus , stripWhitespace)
doc_corpus <- tm_map(doc_corpus , removePunctuation)
doc_corpus <- tm_map(doc_corpus , removeWords, stopwords("spanish"))
doc_corpus <- tm_map(doc_corpus , stemDocument)
doc_corpus <- tm_map(doc_corpus , removeNumbers)
#------------------------------------------------------------------------
#' paso 5: La representación transforma el corpus de documentos a un espacio vectorial para procesarlos
dtm <-
DocumentTermMatrix(doc_corpus, control = list(weighting = weightTf))
#inspect(dtm)
Tf <- as.matrix(dtm)
#View(Tf)
dtm <-
DocumentTermMatrix(doc_corpus, control = list(weighting = weightTfIdf))
#inspect(dtm)
TfIdf <- as.matrix(dtm)
TfIdf <- t(TfIdf)
#------------------------------------------------------------------------
#' paso 6: Elbow method for k-means clustering
mydata <- TfIdf
k.max <- 4 # Maximal number of clusters
data <- mydata
wss <- sapply(1:k.max,
function(k) {
kmeans(data, k, nstart = 2)$tot.withinss
})
plot(
1:k.max,
wss,
type = "b",
pch = 19,
frame = FALSE,
xlab = "Number of clusters K",
ylab = "Total within-clusters sum of squares"
)
abline(v = 2, lty = 2)
#------------------------------------------------------------------------
#' paso 7: K-means
df <- data.frame(TfIdf)
df <- t(df)
distancias1<-dist(df,method="euclidean")
cluster1<-hclust(distancias1)
res <- kmeans(distancias1, 2)
#res
df <- data.frame(Tf)
#df
res <- kmeans(df, 2)
#res
#------------------------------------------------------------------------ |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kinesis_operations.R
\name{kinesis_describe_stream_consumer}
\alias{kinesis_describe_stream_consumer}
\title{To get the description of a registered consumer, provide the ARN of the
consumer}
\usage{
kinesis_describe_stream_consumer(
StreamARN = NULL,
ConsumerName = NULL,
ConsumerARN = NULL
)
}
\arguments{
\item{StreamARN}{The ARN of the Kinesis data stream that the consumer is registered with.
For more information, see \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html#arn-syntax-kinesis-streams}{Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces}.}
\item{ConsumerName}{The name that you gave to the consumer.}
\item{ConsumerARN}{The ARN returned by Kinesis Data Streams when you registered the
consumer.}
}
\description{
To get the description of a registered consumer, provide the ARN of the consumer. Alternatively, you can provide the ARN of the data stream and the name you gave the consumer when you registered it. You may also provide all three parameters, as long as they don't conflict with each other. If you don't know the name or ARN of the consumer that you want to describe, you can use the \code{\link[=kinesis_list_stream_consumers]{list_stream_consumers}} operation to get a list of the descriptions of all the consumers that are currently registered with a given data stream.
See \url{https://www.paws-r-sdk.com/docs/kinesis_describe_stream_consumer/} for full documentation.
}
\keyword{internal}
| /cran/paws.analytics/man/kinesis_describe_stream_consumer.Rd | permissive | paws-r/paws | R | false | true | 1,553 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kinesis_operations.R
\name{kinesis_describe_stream_consumer}
\alias{kinesis_describe_stream_consumer}
\title{To get the description of a registered consumer, provide the ARN of the
consumer}
\usage{
kinesis_describe_stream_consumer(
StreamARN = NULL,
ConsumerName = NULL,
ConsumerARN = NULL
)
}
\arguments{
\item{StreamARN}{The ARN of the Kinesis data stream that the consumer is registered with.
For more information, see \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html#arn-syntax-kinesis-streams}{Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces}.}
\item{ConsumerName}{The name that you gave to the consumer.}
\item{ConsumerARN}{The ARN returned by Kinesis Data Streams when you registered the
consumer.}
}
\description{
To get the description of a registered consumer, provide the ARN of the consumer. Alternatively, you can provide the ARN of the data stream and the name you gave the consumer when you registered it. You may also provide all three parameters, as long as they don't conflict with each other. If you don't know the name or ARN of the consumer that you want to describe, you can use the \code{\link[=kinesis_list_stream_consumers]{list_stream_consumers}} operation to get a list of the descriptions of all the consumers that are currently registered with a given data stream.
See \url{https://www.paws-r-sdk.com/docs/kinesis_describe_stream_consumer/} for full documentation.
}
\keyword{internal}
|
# UI for DVRPC shiny app
library(shiny)
library(plotly)
library(leaflet)
load("top.tracts")
load("pa.commute.data")
# Define UI for application
shinyUI(fluidPage(
# Application title
titlePanel("Census Tract Explorer"),
# Sidebar layout
sidebarLayout(
sidebarPanel(
selectInput("Level", "Would you like National or State rankings?",
list("National" = "nat",
"State" = "sta")),
conditionalPanel(condition = "input.Level == 'nat'",
sliderInput("N", "How many tracts would you like to plot?", value = 10,
min = 10, max = 30, step = 5),
selectInput("nRank", "Non-white, Low-income, or both?",
list("Non-white" = "nonwhite",
"Low-income" = "lowincome",
"Both" = "combo"))),
conditionalPanel(condition = "input.Level == 'sta'",
selectInput("State", "Which state would you like to see rankings for?",
choices = levels(factor(top.tracts$state)))),
conditionalPanel(condition = "input.Level == 'sta' &&
input.State != ' Pennsylvania'",
div("(Choose Pennsylvania to see extra features)"),
br(),
selectInput("sRank", "Non-white, Low-income, or both?",
list("Non-white" = "nonwhite",
"Low-income" = "lowincome",
"Both" = "combo"))),
conditionalPanel(condition = "input.Level == 'sta' &&
input.State == ' Pennsylvania'",
selectInput("pRank", "Non-white, Low-income, both, or commuter data?",
list("Non-white" = "nonwhite",
"Low-income" = "lowincome",
"Both" = "combo",
"Commuting Data" = "commute"))),
# conditionalPanel(condition = "input.nRank == 'combo' || input.sRank == 'combo' || input.pRank == 'combo'",
# div("Caution: Combo rankings were created by summing low-income and non-white rankings.
# Does not account for overlap between groups!", style = "color:red"),
# br()),
conditionalPanel(condition = "input.Level == 'sta' &&
input.State == ' Pennsylvania' &&
input.pRank == 'commute'",
selectInput("Tract", "Tract:",
choices = levels(factor(pa.commute$GEO.display.label)))),
submitButton("Run Selection")
),
# Output
mainPanel(
# national rankings
conditionalPanel(condition = "input.Level == 'nat' &&
input.nRank == 'nonwhite'",
plotlyOutput("national.plot1", height = 600),
dataTableOutput("national.data.out")),
conditionalPanel(condition = "input.Level == 'nat' &&
input.nRank == 'lowincome'",
plotlyOutput("national.plot2", height = 600),
dataTableOutput("national.data.out2")),
conditionalPanel(condition = "input.Level == 'nat' &&
input.nRank == 'combo'",
# add a warning about the summing might be incorrect?
plotlyOutput("national.plot3", height = 600),
dataTableOutput("national.data.out3")),
# state rankings
conditionalPanel(condition = "input.Level == 'sta' &&
input.State != ' Pennsylvania' &&
input.sRank == 'nonwhite'",
plotlyOutput("state.plot1", height = 600),
dataTableOutput("state.data.out")),
conditionalPanel(condition = "input.Level == 'sta' &&
input.State != ' Pennsylvania' &&
input.sRank == 'lowincome'",
plotlyOutput("state.plot2", height = 600),
dataTableOutput("state.data.out2")),
conditionalPanel(condition = "input.Level == 'sta' &&
input.State != ' Pennsylvania' &&
input.sRank == 'combo'",
plotlyOutput("state.plot3", height = 600),
dataTableOutput("state.data.out3")),
# PA
conditionalPanel(condition = "input.Level == 'sta' &&
input.State == ' Pennsylvania' &&
input.pRank == 'nonwhite'",
plotlyOutput("pa.plot1", height = 600),
dataTableOutput("pa.data.out")),
conditionalPanel(condition = "input.Level == 'sta' &&
input.State == ' Pennsylvania' &&
input.pRank == 'lowincome'",
plotlyOutput("pa.plot2", height = 600),
dataTableOutput("pa.data.out2")),
conditionalPanel(condition = "input.Level == 'sta' &&
input.State == ' Pennsylvania' &&
input.pRank == 'combo'",
plotlyOutput("pa.plot3", height = 600),
dataTableOutput("pa.data.out3")),
conditionalPanel(condition = "input.Level == 'sta' &&
input.State == ' Pennsylvania' &&
input.pRank == 'commute'",
plotlyOutput("commute.plot", height = 600),
dataTableOutput("pa.commute.data")),
# PA map output
conditionalPanel(condition = "input.Level == 'sta' &&
input.State == ' Pennsylvania'",
# imageOutput("philly.tract"), # don't need the picture anymore!
leafletOutput("philly.map", height = 750))
)
)
))
| /dvrpcexercise/ui.R | no_license | cjarayata/dvrpc | R | false | false | 6,382 | r | # UI for DVRPC shiny app
library(shiny)
library(plotly)
library(leaflet)
load("top.tracts")
load("pa.commute.data")
# Define UI for application
shinyUI(fluidPage(
# Application title
titlePanel("Census Tract Explorer"),
# Sidebar layout
sidebarLayout(
sidebarPanel(
selectInput("Level", "Would you like National or State rankings?",
list("National" = "nat",
"State" = "sta")),
conditionalPanel(condition = "input.Level == 'nat'",
sliderInput("N", "How many tracts would you like to plot?", value = 10,
min = 10, max = 30, step = 5),
selectInput("nRank", "Non-white, Low-income, or both?",
list("Non-white" = "nonwhite",
"Low-income" = "lowincome",
"Both" = "combo"))),
conditionalPanel(condition = "input.Level == 'sta'",
selectInput("State", "Which state would you like to see rankings for?",
choices = levels(factor(top.tracts$state)))),
conditionalPanel(condition = "input.Level == 'sta' &&
input.State != ' Pennsylvania'",
div("(Choose Pennsylvania to see extra features)"),
br(),
selectInput("sRank", "Non-white, Low-income, or both?",
list("Non-white" = "nonwhite",
"Low-income" = "lowincome",
"Both" = "combo"))),
conditionalPanel(condition = "input.Level == 'sta' &&
input.State == ' Pennsylvania'",
selectInput("pRank", "Non-white, Low-income, both, or commuter data?",
list("Non-white" = "nonwhite",
"Low-income" = "lowincome",
"Both" = "combo",
"Commuting Data" = "commute"))),
# conditionalPanel(condition = "input.nRank == 'combo' || input.sRank == 'combo' || input.pRank == 'combo'",
# div("Caution: Combo rankings were created by summing low-income and non-white rankings.
# Does not account for overlap between groups!", style = "color:red"),
# br()),
conditionalPanel(condition = "input.Level == 'sta' &&
input.State == ' Pennsylvania' &&
input.pRank == 'commute'",
selectInput("Tract", "Tract:",
choices = levels(factor(pa.commute$GEO.display.label)))),
submitButton("Run Selection")
),
# Output
mainPanel(
# national rankings
conditionalPanel(condition = "input.Level == 'nat' &&
input.nRank == 'nonwhite'",
plotlyOutput("national.plot1", height = 600),
dataTableOutput("national.data.out")),
conditionalPanel(condition = "input.Level == 'nat' &&
input.nRank == 'lowincome'",
plotlyOutput("national.plot2", height = 600),
dataTableOutput("national.data.out2")),
conditionalPanel(condition = "input.Level == 'nat' &&
input.nRank == 'combo'",
# add a warning about the summing might be incorrect?
plotlyOutput("national.plot3", height = 600),
dataTableOutput("national.data.out3")),
# state rankings
conditionalPanel(condition = "input.Level == 'sta' &&
input.State != ' Pennsylvania' &&
input.sRank == 'nonwhite'",
plotlyOutput("state.plot1", height = 600),
dataTableOutput("state.data.out")),
conditionalPanel(condition = "input.Level == 'sta' &&
input.State != ' Pennsylvania' &&
input.sRank == 'lowincome'",
plotlyOutput("state.plot2", height = 600),
dataTableOutput("state.data.out2")),
conditionalPanel(condition = "input.Level == 'sta' &&
input.State != ' Pennsylvania' &&
input.sRank == 'combo'",
plotlyOutput("state.plot3", height = 600),
dataTableOutput("state.data.out3")),
# PA
conditionalPanel(condition = "input.Level == 'sta' &&
input.State == ' Pennsylvania' &&
input.pRank == 'nonwhite'",
plotlyOutput("pa.plot1", height = 600),
dataTableOutput("pa.data.out")),
conditionalPanel(condition = "input.Level == 'sta' &&
input.State == ' Pennsylvania' &&
input.pRank == 'lowincome'",
plotlyOutput("pa.plot2", height = 600),
dataTableOutput("pa.data.out2")),
conditionalPanel(condition = "input.Level == 'sta' &&
input.State == ' Pennsylvania' &&
input.pRank == 'combo'",
plotlyOutput("pa.plot3", height = 600),
dataTableOutput("pa.data.out3")),
conditionalPanel(condition = "input.Level == 'sta' &&
input.State == ' Pennsylvania' &&
input.pRank == 'commute'",
plotlyOutput("commute.plot", height = 600),
dataTableOutput("pa.commute.data")),
# PA map output
conditionalPanel(condition = "input.Level == 'sta' &&
input.State == ' Pennsylvania'",
# imageOutput("philly.tract"), # don't need the picture anymore!
leafletOutput("philly.map", height = 750))
)
)
))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.