content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# function V = proj_TpM_spd(V)
# %PROJ_TPM_SPD projects a set of tangent V vectors onto TpM. Symmetrization.
# %
# % See also MGLM_SPD
#
# % Hyunwoo J. Kim
# % $Revision: 0.1 $ $Date: 2014/06/23 16:59:20 $
#
# for i = 1:size(V,3)
# V(:,:,i) = (V(:,:,i)+V(:,:,i)')/2;
# end
# end
#' @title proj_TpM_spd
#' @description Projects a set of matrices V onto the tangent space of the SPD manifold (transforms each matrix into a symmetric matrix).
#' @param V A dxdxN array of dxd matrices to project onto the space of symmetric matrices.
#' @export
proj_TpM_spd <- function(V) {
# PROJ_TPM_SPD projects a set of tangent V vectors onto TpM. Symmetrization.
#
# See also MGLM_SPD
# Hyunwoo J. Kim
# $Revision: 0.1 $ $Date: 2014/06/23 16:59:20 $
# Migrated to R by Matthew RP Parker
# $Revision: 0.2 $ $Date: 2019/06/06 $
if(length(dim(V))==2) { V <- aug3(V) }
for(i in 1:length(V[1,1,])) {
V[,,i] <- (V[,,i] + t(V[,,i]))/2
}
return(V)
}
| /R/proj_TpM_spd.R | permissive | zhangzjjjjjj/MGLMRiem | R | false | false | 996 | r | # function V = proj_TpM_spd(V)
# %PROJ_TPM_SPD projects a set of tangent V vectors onto TpM. Symmetrization.
# %
# % See also MGLM_SPD
#
# % Hyunwoo J. Kim
# % $Revision: 0.1 $ $Date: 2014/06/23 16:59:20 $
#
# for i = 1:size(V,3)
# V(:,:,i) = (V(:,:,i)+V(:,:,i)')/2;
# end
# end
#' @title proj_TpM_spd
#' @description Projects a set of matrices V onto the tangent space of the SPD manifold (transforms each matrix into a symmetric matrix).
#' @param V A dxdxN array of dxd matrices to project onto the space of symmetric matrices.
#' @export
proj_TpM_spd <- function(V) {
# PROJ_TPM_SPD projects a set of tangent V vectors onto TpM. Symmetrization.
#
# See also MGLM_SPD
# Hyunwoo J. Kim
# $Revision: 0.1 $ $Date: 2014/06/23 16:59:20 $
# Migrated to R by Matthew RP Parker
# $Revision: 0.2 $ $Date: 2019/06/06 $
if(length(dim(V))==2) { V <- aug3(V) }
for(i in 1:length(V[1,1,])) {
V[,,i] <- (V[,,i] + t(V[,,i]))/2
}
return(V)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transitions.R
\name{transitions}
\alias{transitions}
\title{Transitions between elements of a sequence}
\usage{
transitions(S, strucmat = NULL, xstart = TRUE, xstop = FALSE, out = "ml")
}
\arguments{
\item{S}{character, the sequence}
\item{strucmat}{an optional matrix with column and row names (see details and examples)}
\item{xstart}{logical, should start state be included}
\item{xstop}{logical, should end state be included}
\item{out}{character: either \code{"ml"} or \code{"bayes"}}
}
\description{
Transitions between elements of a sequence
}
\details{
usage of \code{strucmat} might be helpful for at least tow reasons:
1) if there are constraints on the possible transitions. For example, a start cannot be followed by a stop (that wouldn't be a sequence). Such impossible transitions are indicated by 0 in this matrix.
2) if sequences are analysed that not all contain the same/complete set of elements. This might be useful if you want to generate matching outputs even though the actual observed sequences (and hence the result of the function) might differ (e.g. in the case of letter transitions in words)
}
\examples{
transitions("AAAAABBC", out="ml")
transitions("AAAAABBC", out="bayes")
smat <- matrix(1, ncol=3, nrow=4);
colnames(smat) <- c("A", "B", "C"); rownames(smat) <- c("start", "A", "B", "C")
transitions("AAAAABBC", out="ml", strucmat = smat)
transitions("AAAAABBC", out="bayes", strucmat = smat)
# errors:
# transitions("AAAAABBC", out="ml", strucmat = smat, xstop = TRUE)
# transitions("AAAAABBC", out="bayes", strucmat = smat, xstop = TRUE)
# add a stop column, but constrain that starts cannot be followed by stops
smat <- cbind(smat, 1); colnames(smat)[4] <- "stop"; smat[1, 4] <- 0
transitions("AAAAABBC", out="ml", strucmat = smat, xstop = TRUE)
transitions("AAAAABBC", out="bayes", strucmat = smat, xstop = TRUE)
}
\references{
Alger, S. J., Larget, B. R., & Riters, L. V. (2016). A novel statistical method for behaviour sequence analysis and its application to birdsong. Animal behaviour, 116, 181-193.
}
| /man/transitions.Rd | no_license | gobbios/cfp | R | false | true | 2,129 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transitions.R
\name{transitions}
\alias{transitions}
\title{Transitions between elements of a sequence}
\usage{
transitions(S, strucmat = NULL, xstart = TRUE, xstop = FALSE, out = "ml")
}
\arguments{
\item{S}{character, the sequence}
\item{strucmat}{an optional matrix with column and row names (see details and examples)}
\item{xstart}{logical, should start state be included}
\item{xstop}{logical, should end state be included}
\item{out}{character: either \code{"ml"} or \code{"bayes"}}
}
\description{
Transitions between elements of a sequence
}
\details{
usage of \code{strucmat} might be helpful for at least tow reasons:
1) if there are constraints on the possible transitions. For example, a start cannot be followed by a stop (that wouldn't be a sequence). Such impossible transitions are indicated by 0 in this matrix.
2) if sequences are analysed that not all contain the same/complete set of elements. This might be useful if you want to generate matching outputs even though the actual observed sequences (and hence the result of the function) might differ (e.g. in the case of letter transitions in words)
}
\examples{
transitions("AAAAABBC", out="ml")
transitions("AAAAABBC", out="bayes")
smat <- matrix(1, ncol=3, nrow=4);
colnames(smat) <- c("A", "B", "C"); rownames(smat) <- c("start", "A", "B", "C")
transitions("AAAAABBC", out="ml", strucmat = smat)
transitions("AAAAABBC", out="bayes", strucmat = smat)
# errors:
# transitions("AAAAABBC", out="ml", strucmat = smat, xstop = TRUE)
# transitions("AAAAABBC", out="bayes", strucmat = smat, xstop = TRUE)
# add a stop column, but constrain that starts cannot be followed by stops
smat <- cbind(smat, 1); colnames(smat)[4] <- "stop"; smat[1, 4] <- 0
transitions("AAAAABBC", out="ml", strucmat = smat, xstop = TRUE)
transitions("AAAAABBC", out="bayes", strucmat = smat, xstop = TRUE)
}
\references{
Alger, S. J., Larget, B. R., & Riters, L. V. (2016). A novel statistical method for behaviour sequence analysis and its application to birdsong. Animal behaviour, 116, 181-193.
}
|
library(RSelenium)
library(rvest)
#네이버 웹툰 댓글 읽기
#1
remDr <- remoteDriver(remoteServerAddr = 'localhost',
port=4445, browserName = 'chrome')
#2
remDr$open()
#3
url = "http://comic.naver.com/comment/comment.nhn?titleId=570503&no=135"
remDr$navigate(url)
#4 bestReview
bestNodes <- remDr$findElements(using= 'css', 'span.u_cbox_contents')
bestRview <- sapply(bestNodes, function(x)(x$getElementText()))
bestRview
#5. commonReview
#~1
totalNodes <- remDr$findElement(using="css", 'span.u_cbox_in_view_comment')
totalNodes$clickElement()
#~2
commonReviewTotal<-NULL
for(i in 4:12){
commonNodes <- remDr$findElements(using= 'css', 'span.u_cbox_contents')
commonReview <- sapply(commonNodes, function(x)(x$getElementText()))
commonReviewTotal <- c(commonReviewTotal,commonReview)
nextNodes <- paste0("#cbox_module > div > div.u_cbox_paginate > div > a:nth-child(",i,")")
nextPageTag <- remDr$findElements(using= 'css', nextNodes)
# nextPageTag$clickElement()
sapply(nextPageTag, function(x)(x$clickElement()))
Sys.sleep(3)
}
commonReviewTotal
data<-unlist(c(bestRview,commonReviewTotal))
data
write(data,'webtoon1.txt')
getwd()
| /R_training/MyCode/2주_Web Basic/day5/2.webtoon1.R | no_license | BaeYS-marketing/R | R | false | false | 1,243 | r | library(RSelenium)
library(rvest)
#네이버 웹툰 댓글 읽기
#1
remDr <- remoteDriver(remoteServerAddr = 'localhost',
port=4445, browserName = 'chrome')
#2
remDr$open()
#3
url = "http://comic.naver.com/comment/comment.nhn?titleId=570503&no=135"
remDr$navigate(url)
#4 bestReview
bestNodes <- remDr$findElements(using= 'css', 'span.u_cbox_contents')
bestRview <- sapply(bestNodes, function(x)(x$getElementText()))
bestRview
#5. commonReview
#~1
totalNodes <- remDr$findElement(using="css", 'span.u_cbox_in_view_comment')
totalNodes$clickElement()
#~2
commonReviewTotal<-NULL
for(i in 4:12){
commonNodes <- remDr$findElements(using= 'css', 'span.u_cbox_contents')
commonReview <- sapply(commonNodes, function(x)(x$getElementText()))
commonReviewTotal <- c(commonReviewTotal,commonReview)
nextNodes <- paste0("#cbox_module > div > div.u_cbox_paginate > div > a:nth-child(",i,")")
nextPageTag <- remDr$findElements(using= 'css', nextNodes)
# nextPageTag$clickElement()
sapply(nextPageTag, function(x)(x$clickElement()))
Sys.sleep(3)
}
commonReviewTotal
data<-unlist(c(bestRview,commonReviewTotal))
data
write(data,'webtoon1.txt')
getwd()
|
#####
merge_counts(file.name, folders) <- function {
temp= c()
file.list.path = c()
file.list = c()
for (i in seq_along(folders)){
temp = list.files(folders[i], pattern=".genes.results.withGeneName")
file.list = c(file.list, temp)
file.list.path = c(file.list.path, paste(folders[i], temp, sep="/"))
}
for(i in seq_along(file.list)){
file.data = read.table(file.list.path[i], sep="\t", head=T, quote="", colClasses="character")
file.sub = data.frame(file.data$gene_id, round(as.numeric(file.data$expected_count), 0))
sample.id = gsub(".genes.results.withGeneName", "", file.list[i])
names(file.sub) = c("gene_id", paste(sample.id, "_counts", sep=""))
if(i == 1){
file.df = file.sub
}else{
file.df = merge(file.df, file.sub, by.x="gene_id", by.y="gene_id")
}
}
write.table(file.df, paste(out.dir, file.name, sep=""), sep="\t", row=F, quote=F)
}
| /01_function/merge_counts.R | permissive | StanleyYang01/early-diet-study | R | false | false | 940 | r | #####
merge_counts(file.name, folders) <- function {
temp= c()
file.list.path = c()
file.list = c()
for (i in seq_along(folders)){
temp = list.files(folders[i], pattern=".genes.results.withGeneName")
file.list = c(file.list, temp)
file.list.path = c(file.list.path, paste(folders[i], temp, sep="/"))
}
for(i in seq_along(file.list)){
file.data = read.table(file.list.path[i], sep="\t", head=T, quote="", colClasses="character")
file.sub = data.frame(file.data$gene_id, round(as.numeric(file.data$expected_count), 0))
sample.id = gsub(".genes.results.withGeneName", "", file.list[i])
names(file.sub) = c("gene_id", paste(sample.id, "_counts", sep=""))
if(i == 1){
file.df = file.sub
}else{
file.df = merge(file.df, file.sub, by.x="gene_id", by.y="gene_id")
}
}
write.table(file.df, paste(out.dir, file.name, sep=""), sep="\t", row=F, quote=F)
}
|
# Eric R. Gamazon
# Create required Expr Table
a <- read.table('/nas40t0/egamazon/VANDY/PREDIXCAN/DGN-WB.exp.IDxGENE', header=F)
a.t <- t(a)
dim(a.t)
genes <- read.table('/nas40t0/egamazon/VANDY/PREDIXCAN/DGN-WB.exp.GENE.list', header=F)
row.names(a.t) = t(genes)
write.table(a.t, file="DGN-WB.forMatrixeQTL.GENExID", sep="\t", row.names=T, quote=F, col.names=F)
| /Paper-Scripts/Eric/createDGNExprTable.r | permissive | cdesyoun/docker_predixcan | R | false | false | 366 | r | # Eric R. Gamazon
# Create required Expr Table
a <- read.table('/nas40t0/egamazon/VANDY/PREDIXCAN/DGN-WB.exp.IDxGENE', header=F)
a.t <- t(a)
dim(a.t)
genes <- read.table('/nas40t0/egamazon/VANDY/PREDIXCAN/DGN-WB.exp.GENE.list', header=F)
row.names(a.t) = t(genes)
write.table(a.t, file="DGN-WB.forMatrixeQTL.GENExID", sep="\t", row.names=T, quote=F, col.names=F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot_themes.R
\name{theme_delabj}
\alias{theme_delabj}
\alias{theme_delabj_dark}
\title{My Prefered ggplot2 themes}
\usage{
theme_delabj(
base_size =12,
base_family = "Poppins",
base_line_size = .5,
base_rect_size = .5)
theme_delabj_dark(
base_size = 12,
base_family = "Poppins",
base_line_size = base_size/22,
base_rect_size = base_size/22)
theme_delabj_dark(
font = "Poppins",
title_font = "Open Sans",
main.text.color = "#D6D6D6",
sub.text.color = "#D6D6D6",
base.size = 15,
plot.background.color = "#343E48",
legend.position = "bottom",
panel.border.color = "#343E48",
panel.background = "#343E48",
panel.grid.color = "#495866",
axis.text.color = "#D6D6D6",
axis.text.size = base.size * 0.67,
subtitle.size = base.size * 0.9,
legend.text = base.size * 0.6,
legend.title = base.size * 0.93,
axis.title.size = base.size * 0.8,
title.size = 15
)
}
\arguments{
\item{base_size}{base font size}
\item{base_family}{base font family (Poppins by default)}
\item{base_line_size}{base size for line elements}
\item{base_rect_size}{base size for rect elements}
}
\description{
A custom theme for ggplot2 based on theme_minimal(). A light and dark theme are both available.
}
\details{
theme_delabj has an off white background and light gridlines.
theme_delabj_dark has a dark grey background and dark gridlines.
}
\examples{
## Not Run
library(ggplot2)
df <- data.frame(x = factor(rep(letters[1:5], each = 10)), y = rnorm(50), color=(rep(c("A", "B", "C", "B", "A"), each=10)))
plot <- ggplot(df, aes(x = x, y = y, color=color)) + geom_jitter()
plot + theme_delabj
plot + theme_delabj_dark
#' @rdname delabj_themes
delabj dark mode
}
| /man/delabj_themes.Rd | no_license | jimsforks/delabj | R | false | true | 1,748 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot_themes.R
\name{theme_delabj}
\alias{theme_delabj}
\alias{theme_delabj_dark}
\title{My Prefered ggplot2 themes}
\usage{
theme_delabj(
base_size =12,
base_family = "Poppins",
base_line_size = .5,
base_rect_size = .5)
theme_delabj_dark(
base_size = 12,
base_family = "Poppins",
base_line_size = base_size/22,
base_rect_size = base_size/22)
theme_delabj_dark(
font = "Poppins",
title_font = "Open Sans",
main.text.color = "#D6D6D6",
sub.text.color = "#D6D6D6",
base.size = 15,
plot.background.color = "#343E48",
legend.position = "bottom",
panel.border.color = "#343E48",
panel.background = "#343E48",
panel.grid.color = "#495866",
axis.text.color = "#D6D6D6",
axis.text.size = base.size * 0.67,
subtitle.size = base.size * 0.9,
legend.text = base.size * 0.6,
legend.title = base.size * 0.93,
axis.title.size = base.size * 0.8,
title.size = 15
)
}
\arguments{
\item{base_size}{base font size}
\item{base_family}{base font family (Poppins by default)}
\item{base_line_size}{base size for line elements}
\item{base_rect_size}{base size for rect elements}
}
\description{
A custom theme for ggplot2 based on theme_minimal(). A light and dark theme are both available.
}
\details{
theme_delabj has an off white background and light gridlines.
theme_delabj_dark has a dark grey background and dark gridlines.
}
\examples{
## Not Run
library(ggplot2)
df <- data.frame(x = factor(rep(letters[1:5], each = 10)), y = rnorm(50), color=(rep(c("A", "B", "C", "B", "A"), each=10)))
plot <- ggplot(df, aes(x = x, y = y, color=color)) + geom_jitter()
plot + theme_delabj
plot + theme_delabj_dark
#' @rdname delabj_themes
delabj dark mode
}
|
#code to run on summarized winter data
#+++++++++++ functions to perform and analyze Tukey test +++++++++++####
plot.CFSR.Annual.var <- function(df,maxday=150,varname,maxbaseyr=2011){
ss<-which((df$BaseYear >1999)&(df$DOYsinceBase < maxday)&(df$BaseYear <= maxbaseyr))
win.graph(width=9,h=5)
plot(as.factor(df$BaseYear[ss]),df[[varname]][ss],ylab=varname,col="grey")
Tukeytab <- TukeyTable(classes=as.factor(df$BaseYear[ss]),dependent=df[[varname]][ss],compnames=
c(paste('higher ',varname,sep=''),'no diff.',paste('lower ',varname,sep='')))
return(Tukeytab)
}
comp <- function(year,ttt){
list(which(substr(rownames(ttt),1,4)==as.character(year)),
which (substr(rownames(ttt),6,9)==as.character(year)))}
TukeyTable <- function(classes,dependent,compnames,alpha=0.05){
classes<-droplevels(classes)
#tt<-TukeyHSD(aov(allNDVI[ss]~years.from.baseline.fac[ss]))
tt<-TukeyHSD(aov(dependent~classes))
ttt<-tt$classes
# a function to summarize the output from TukeyHSD
allcomps<-NULL
for(i in levels(classes)){
signif <- c(ttt[comp(i,ttt=ttt)[[1]],4]<alpha,ttt[comp(i,ttt=ttt)[[2]],4]<alpha)
diffs <- c(ttt[comp(i,ttt=ttt)[[1]],1]>0,ttt[comp(i,ttt=ttt)[[2]],1]<0)
newcol <- c(length(which(diffs==T & signif==T)),
length(which(signif==F)),
length(which(diffs==F & signif ==T)))
allcomps<-cbind(allcomps,newcol)}
colnames(allcomps) <- levels(classes)
rownames(allcomps) <- compnames
return(allcomps)}
#+++++++++++ a simple plot for yearly time series +++++++++++++++++####
Simple.plot <- function(CFSR.dat.d.stats,Varname=NULL,minbaseyr=2000,maxbaseyr=2011,maxday=500){
df<-CFSR.dat.d.stats
df<-df[(is.element(df$BaseYear,c(minbaseyr:maxbaseyr))&(df$DOYsinceBase < maxday)),]
require(lattice)
win.graph()
xyplot(get(Varname) ~ DOYsinceBase, data = df,ylab=Varname,lwd=2,
groups = as.factor(BaseYear),type='smooth',span=.3,auto.key=T)
#groups = as.factor(BaseYear),span=.3,auto.key=T)
}
#+++++++++++ calculate daily anomalies per DOY and cellnr for CFSR data +++++++++++++++++####
Daily.anom.stats <- function(CFSR.dat.d,Varname=NULL,minbaseyr=2000,maxbaseyr=2011,maxday=500){
#restrict the data set as requested
df<-CFSR.dat.d
df<-df[(is.element(df$BaseYear,c(minbaseyr:maxbaseyr))&(df$DOYsinceBase < maxday)),]
#calculate the long-term daily means per cell
long.term.daily.mn <- aggregate(df,by=list(substr(df$imdate,6,10),df$cellnrs),FUN=mean)
#convert the observations to anomalies from the long-term cell&day-level means
long.term.daily.mn$DOYsinceBase <- ceiling(long.term.daily.mn$DOYsinceBase)
require(plyr)
df3 <- join(df,long.term.daily.mn,by=c("DOYsinceBase","cellnrs"),type="left",match="first")
#df3 <- df3[,!duplicated(colnames(df3))]
vardf <- df3[,colnames(df3)==Varname]
anomname <- paste(Varname,"_anom",sep="")
anom <- vardf[,1]-vardf[,2]
df[[anomname]]<-anom
return(df)
}
#++++++++###+++++++++++ load output of NCEP_CFSR_data_extract.R+++++++++++++++++++####
#load("C:\\Data\\WHRC\\Hummers\\R_data\\Mexico_CFSR_level2.rdata")
#load("C:\\Users\\pbeck.WHRC\\Dropbox\\Hummers\\Hummer_code\\P4_Extreme_events_Broad_tailed\\Results\\Mexico_CFSR_level2.rdata")
#load("A:\\Share\\pbeck\\Hummer_NASA\\Code_copy\\Mexico_CFSR_level2.rdata")
#load("C:/Users/sarah/Documents/GitHub/extreme_limits/data/Mexico_CFSR_level2.rdata")
load("C:/Users/tcormier/Dropbox (WHRC)/hb_extremelimits/DATA-USED-IN-EXTREME-LIMITS/TC_recreate/Mexico_CFSR_level2.rdata")
# Cld.dat.d;Cld.data.d.stats
# Prec.dat.d;Prec.data.d.stats
# Tmin.dat.d;Tmin.dat.d.stats
# Wnd.dat.d;Wnd.data.d.stats
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#lines of yearly NDVI and cumulative precip
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++####
precip.fac <- 6*60*60
# maxDOY<-max(annual.anom.df$DOYsinceBase.y)
# rainbowcols<-rainbow(maxDOY*5/4)[1:(maxDOY+1)]
#Simple smoothed plots of annual NCEP CFSR ts +++++++++++++++++++####
Simple.plot(Tes.dat.d.stats,"Tes_mn",minbaseyr=2004,maxday=250)
Simple.plot(Prec.dat.d.stats,"cumpre_mn",minbaseyr=2004,maxday=250)
#Simple.plot(Wnd.dat.d.stats,"wnd_mn",minbaseyr=2004,maxday=250)
#Simple.plot(Cld.dat.d.stats,"cld_mn",minbaseyr=2004,maxday=250)
Simple.plot(Tmin.dat.d.stats,"tmin_mn",minbaseyr=2004,maxday=250)
#Simple.plot(Te.dat.d.stats,"cumallbelow10_mn",minbaseyr=2004,maxbaseyr=2010,maxday=133) #this doesn't get createdin NCEP_CFSR... version that I have - do we need it?
#Tukey tests of yearly means - Can you do this on ts in non-paired fashion? +++++++++++++++++++####
Tukey.cumpre<-plot.CFSR.Annual.var(Prec.dat.d,maxday=150,varname="pre")
Tukey.tmin<-plot.CFSR.Annual.var(Tmin.dat.d,maxday=150,varname="tmin")
#Tukey.wnd<-plot.CFSR.Annual.var(Wnd.dat.d,maxday=150,varname="wnd")
#Tukey.cld<-plot.CFSR.Annual.var(Cld.dat.d,maxday=150,varname="cld")
#anomalies: plot and summary statistics of daily NCEP CFSR +++++++++++++++++++####
Tmin.dat.d <- Daily.anom.stats(Tmin.dat.d,Varname="tmin",minbaseyr=2000,maxbaseyr=2012,maxday=134)
Tukey.tesanom<-plot.CFSR.Annual.var(Tes.dat.d,maxday=134,varname="Tes_anom");abline(h=0)
Tes.dat.d <- Daily.anom.stats(Tes.dat.d,Varname="Tes",minbaseyr=2000,maxbaseyr=2012,maxday=134)
Tukey.tesanom<-plot.CFSR.Annual.var(Tes.dat.d,maxday=134,varname="Tes_anom");abline(h=0)
Prec.dat.d <- Daily.anom.stats(Prec.dat.d,Varname="cumpre",minbaseyr=2000,maxbaseyr=2012,maxday=134)
Tukey.cumpreanom <- plot.CFSR.Annual.var(Prec.dat.d,maxday=150,varname="cumpre_anom");abline(h=0)
#Te.dat.d <- Te.dat.d[,colnames(Te.dat.d)!="allbelow10"]
Te.dat.d <- Daily.anom.stats(Te.dat.d,Varname="Te",minbaseyr=2000,maxbaseyr=2012,maxday=134)
Tukey.cumpreanom <- plot.CFSR.Annual.var(Te.dat.d,maxday=150,varname="Te_anom");abline(h=0)
#Very 0-inflated distribution - do cumulative?
#Wnd.dat.d <- Daily.anom.stats(Wnd.dat.d,Varname="wnd",minbaseyr=2000,maxbaseyr=2012,maxday=250)
# Tukey.wndanom<-plot.CFSR.Annual.var(Wnd.dat.d,maxday=150,varname="wnd_anom");abline(h=0)
#Cld.dat.d <- Daily.anom.stats(Cld.dat.d,Varname="cld",minbaseyr=2000,maxbaseyr=2012,maxday=250)
# Tukey.cldanom <- plot.CFSR.Annual.var(Cld.dat.d,maxday=150,varname="cld_anom");abline(h=0)
#this df will hold yearly anomalies for meteo and NDVI over the focal period
annual.anom.df <- join(Tes.dat.d,Prec.dat.d)
annual.anom.df <- join(annual.anom.df,Tmin.dat.d)
annual.anom.df <- join(annual.anom.df,Te.dat.d)
#annual.anom.df <- join(annual.anom.df,Cld.dat.d)
#annual.anom.df <- join(annual.anom.df,Wnd.dat.d)
#make them daily (ie take averages across points on the same day)
annual.anom.df <- aggregate(annual.anom.df, by=list(annual.anom.df$imdate),FUN=mean)
#+++++++++++++++++++ load the NDVI d.f.+++++++++++++++++++####
#load(file="A:\\Share\\pbeck\\Hummer_NASA\\Code_copy\\NDVI_df.rdata")
#load("C:/Users/sarah/Documents/GitHub/extreme_limits/data/NDVI_df.rdata")
load("C:/Users/tcormier/Dropbox (WHRC)/hb_extremelimits/DATA-USED-IN-EXTREME-LIMITS/NDVI_df.rdata")
#NDVI.dat
NDVI.dat<-NDVI.dat[!duplicated(NDVI.dat),]
colnames(NDVI.dat)
#+++++++++++ make 1 smoothed NDVI ts ++++++++++####
NDVI.dat.daily<-aggregate(NDVI.dat,by=list(NDVI.dat$allDate),FUN=mean)
NDVI.dat.daily$years.from.baseline.fac <- as.factor(NDVI.dat.daily$years.from.baseline)
NDVI.dat.daily$allYEARfac <- as.factor(NDVI.dat.daily$allYEAR)
aqf <- function(x,splinemod=spl) {
zz <- predict(splinemod, x)$y;zz}
win.graph()
plot(NDVI.dat$allDate,NDVI.dat$allNDVI,pch=16,col=rgb(.5,.5,.5,.2),cex=.2,ylab="MODIS (Terra & Aqua) NDVI",
xlab="",main="Vegetation productivity on Broad-tailed Hummingbird wintering grounds",
ylim=c(.4,.9))#,xlim=as.Date(c())
spl <- smooth.spline(NDVI.dat$allDate,NDVI.dat$allNDVI,df=80)
prednrs <- c(min(spl$x):max(spl$x))
lines(as.Date(prednrs,origin="1970-01-01"),aqf(x=prednrs,splinemod=spl),col="dark green",lwd=2.5)
abline(v=as.Date(paste(c(2001:2013),"-01-01",sep="")),lty=2)
spl2 <- smooth.spline(NDVI.dat.daily$allDate,NDVI.dat.daily$allNDVI,df=80)
#plot(NDVI.dat.daily$allDate,NDVI.dat.daily$allNDVI,pch=16,col="grey",cex=.5)
lines(as.Date(prednrs,origin="1970-01-01"),aqf(x=prednrs,splinemod=spl2),col=4)
smoothNDVIdf <- cbind.data.frame(as.Date(prednrs,origin="1970-01-01"),aqf(x=prednrs,splinemod=spl))
colnames(smoothNDVIdf) <- c("imdate","smoothNDVI")
smoothNDVIdf <- cbind.data.frame(smoothNDVIdf,days.since.base(smoothNDVIdf$imdate,baseline="-11-01"))
smoothNDVIdf[["cellnrs"]]<-rep(-1,nrow(smoothNDVIdf))
smoothNDVIdf <- Daily.anom.stats(smoothNDVIdf,Varname="smoothNDVI",minbaseyr=2000,maxbaseyr=2012,maxday=250)
Tukey.NDVIanom <- plot.CFSR.Annual.var(smoothNDVIdf,maxday=150,varname="smoothNDVI_anom");abline(h=0)
#++++++++++++++++++++ A DF TO HOLD ANNUAL SERIES FOR MULTIPLE VARS ++++####smoothNDVIdf <- cbind.data.frame(as.Date(prednrs,origin="1970-01-01"),aqf(x=prednrs,splinemod=spl2))
annual.anom.df.backup <- annual.anom.df
#annual.anom.df<-annual.anom.df.backup
annual.anom.df<- merge(x=annual.anom.df,y=smoothNDVIdf,FUN=mean,by.x="Group.1",by.y="imdate")
#keep only the points in the focal years
annual.anom.df <- annual.anom.df[(annual.anom.df$BaseYear.x <= 2011)&(annual.anom.df$BaseYear.x >= 2000),] #CHECK THE ONE-FIFTY NR
#save(annual.anom.df,file="A:\\Share\\pbeck\\Hummer_NASA\\Code_copy\\annual.anom.df.rdata")
#save(annual.anom.df, file="C:/Users/sarah/Documents/GitHub/extreme_limits/data/annual.anom.df.rdata")
save(annual.anom.df, file="C:/Users/tcormier/Dropbox (WHRC)/hb_extremelimits/DATA-USED-IN-EXTREME-LIMITS/TC_recreate/annual.anom.df.rdata")
# Simple smoothed plots of annual NDVI ts+++++++++++++++++++####
Simple.plot(smoothNDVIdf,"smoothNDVI",minbaseyr=2002,maxday=250)
Tukey.NDVI<-plot.CFSR.Annual.var(smoothNDVIdf,maxday=150,varname="smoothNDVI")
smoothNDVIdf <- Daily.anom.stats(smoothNDVIdf,Varname="smoothNDVI",minbaseyr=2000,maxbaseyr=2012,maxday=250)
Tukey.NDVI <- plot.CFSR.Annual.var(smoothNDVIdf,maxday=150,varname="smoothNDVI");abline(h=0)
#++++++++++++++++++++ A FUNCTION TO COMPARE 2 VARS BETWEEN ONE YEAR AND THE OTHERS ++++####
BivariateWinterComp<- function(df=annual.anom.df,xvar,yvar,maxDOY=250,focalBaseYr=2010,
Focalpoints=T,PointsLegend=T,focalBag=F,
xlabb=NULL,
ylabb=NULL,
xarrow=c(NA,NA),yarrow=c(NA,NA),legpos="topleft")
{
require(aplpack)
df<-df[df$DOYsinceBase.y<=maxDOY,]
xx<-df[[xvar]];yy<-df[[yvar]]
ss<-df$BaseYear.x==focalBaseYr
bagplot(x=xx[!ss],y=yy[!ss],na.rm=T, xlab=xlabb,ylab=ylabb,
show.outlier=T,show.bagpoints=F,show.looppoints=F,show.whiskers=F,
transparency=T,xlim=range(xx,na.rm=T),ylim=range(yy,na.rm=T),cex=0.4
,col.loophull=grey(.75)
,col.baghull=grey(.5),col.bagpoints=1
)
cat(length(which(ss))," observations in focal year\n")
cat(length(which(!ss))," observations in all other years focal year\n")
if(focalBag){bagplot(x=xx[ss],y=yy[ss],na.rm=T,
show.outlier=F,show.bagpoints=F,show.looppoints=F,show.whiskers=F,
transparency=T,xlim=range(xx,na.rm=T),ylim=range(yy,na.rm=T),cex=0,add=T)}
rainbowcols<-rainbow(maxDOY*5/4)[1:(maxDOY+1)]
if(Focalpoints){
#points(xx[ss],yy[ss],pch=16,col=rainbow(df$DOYsinceBase.y[ss]/maxDOY))
points(xx[ss],yy[ss],pch=16,col=rainbowcols[df$DOYsinceBase.y[ss]+1])
#points(xx[ss],yy[ss])
}
if(!is.na(yarrow[1])){
arrows(x0=min(xx,na.rm=T),y0=min(yy,na.rm=T)/3*1,y1=min(yy,na.rm=T)/3*2,length=.1)
mtext(side=2,line=-1,at=max(yy,na.rm=T)/2,text=yarrow[2])
arrows(x0=min(xx,na.rm=T),y0=max(yy,na.rm=T)/3*1,y1=max(yy,na.rm=T)/3*2,length=.1)
mtext(side=2,line=-1,at=min(yy,na.rm=T)/2,text=yarrow[1])
}
if(!is.na(xarrow[1])){
arrows(y0=min(yy,na.rm=T),x0=min(xx,na.rm=T)/3*1,x1=min(xx,na.rm=T)/3*2,length=.1)
mtext(side=1,line=-1,at=max(xx,na.rm=T)/2,text=xarrow[2])
arrows(y0=min(yy,na.rm=T),x0=max(xx,na.rm=T)/3*1,x1=max(xx,na.rm=T)/3*2,length=.1)
mtext(side=1,line=-1,at=min(xx,na.rm=T)/2,text=xarrow[1])
}
#browser()
if(PointsLegend){
levs<-c(0,.2,.4,.6,.8,1)*maxDOY;levs[1]<-levs[1]+1;levs[length(levs)]<-levs[length(levs)]-1
# legend("topright",pch=16,title=paste("Days since 1 Nov ", focalBaseYr,sep=""),col=grey(levs/maxDOY)
# ,legend=paste(round(levs)," days",sep=""),box.col=NULL,bty="n")
#points(pch=16,-4:0,rep(-0.005,length(levs)),col=rainbowcols[round(levs)])
legend(legpos,pch=16,title=paste("Days since 1 Nov ", focalBaseYr,sep=""),col=rainbowcols[round(levs)]
,legend=paste(round(levs)," days",sep=""),box.col=NULL,bty="n")
#legend("topright",pch=1,paste("Days since 1 Nov ", focalBaseYr,sep=""),
# ,legend=paste(round(levs)," days",sep=""),bty="n",text.col=NA)
}
box();abline(h=0,lty=1,col="grey");abline(v=0,lty=1,col="grey")
return()
}
win.graph()
BivariateWinterComp(df=annual.anom.df,xvar="Te_anom",yvar="cumpre_anom",maxDOY=134,focalBaseYr=2010,
Focalpoints=T,PointsLegend=T,focalBag=F,
xlabb="Te anomaly (C)",ylabb="Cumulative precipitation anomaly (mm)"
,xarrow=c("colder","warmer")
,yarrow=c("dryer","wetter"))
win.graph()
BivariateWinterComp(df=annual.anom.df,xvar="smoothNDVI_anom",yvar="cumpre_anom",maxDOY=134,focalBaseYr=2010,
Focalpoints=T,PointsLegend=T,focalBag=F,
xlabb="NDVI anomaly",ylabb="Cumulative precipitation anomaly (mm)"
,xarrow=c("less productive","more productive")
,yarrow=c("dryer","wetter"))
win.graph()
BivariateWinterComp(df=annual.anom.df,xvar="smoothNDVI_anom",yvar="Te_anom",maxDOY=134,focalBaseYr=2010,
Focalpoints=T,PointsLegend=T,focalBag=F,
xlabb="NDVI anomaly",ylabb="Te anomaly (C)")
# win.graph()
# BivariateWinterComp(df=annual.anom.df,xvar="tmin_anom",yvar="cld_anom",maxDOY=134,focalBaseYr=2010,
# Focalpoints=T,PointsLegend=T,focalBag=F,
# xlabb="Tmin anomaly (C)",ylabb="Cloudiness anomaly (%)",
# xarrow=c("colder","warmer")
# ,yarrow=c("cloudier","clearer"))
# win.graph()
# BivariateWinterComp(df=annual.anom.df,xvar="tmin_anom",yvar="wnd_anom",maxDOY=134,focalBaseYr=2006,
# Focalpoints=T,PointsLegend=T,focalBag=F,
# xlabb="Tmin anomaly (C)",ylabb="Wind speed anomaly",legpos="topright",
# xarrow=c("colder","warmer")
# ,yarrow=c("more still","windier"))
win.graph()
BivariateWinterComp(df=annual.anom.df,xvar="smoothNDVI_anom",yvar="cumpre_anom",maxDOY=134,focalBaseYr=2009,
Focalpoints=T,PointsLegend=T,focalBag=F,
xlabb="NDVI anomaly",ylabb="Cumulative precipitation anomaly (mm)"
,xarrow=c("less productive","more productive")
,yarrow=c("dryer","wetter"))
# win.graph()
# BivariateWinterComp(df=annual.anom.df,xvar="tmin_anom",yvar="cld_anom",maxDOY=134,focalBaseYr=2009,
# Focalpoints=T,PointsLegend=T,focalBag=F,
# xlabb="Tmin anomaly (C)",ylabb="Cloudiness anomaly (%)",
# xarrow=c("colder","warmer")
# ,yarrow=c("cloudier","clearer"))
# win.graph()
# BivariateWinterComp(df=annual.anom.df,xvar="tmin_anom",yvar="cld_anom",maxDOY=134,focalBaseYr=2005,
# Focalpoints=T,PointsLegend=T,focalBag=F,
# xlabb="Tmin anomaly (C)",ylabb="Cloudiness anomaly (%)",
# xarrow=c("colder","warmer")
# ,yarrow=c("dryer","wetter"))
#
# win.graph()
# BivariateWinterComp(df=annual.anom.df,xvar="tmin_anom",yvar="cld_anom",maxDOY=134,focalBaseYr=2005,
# Focalpoints=T,PointsLegend=T,focalBag=F,
# xlabb="Tmin anomaly (C)",ylabb="Cloudiness anomaly (%)",
# xarrow=c("colder","warmer")
# ,yarrow=c("cloudier","clearer"))
# plot and summary statistics of daily NDVI anomalies+++++++++++++++++++####
long.term.daily.mnNDVI <- aggregate(smoothNDVIdf,by=list(substr(smoothNDVIdf$allDate,6,10)),FUN=mean)
#convert the observations to anomalies from the long-term cell&day-level means
require(plyr)
df3 <- join(smoothNDVIdf,long.term.daily.mnNDVI,by="DOYsinceBase",type="left")
df3 <- df3[,(ncol(smoothNDVIdf)+1):ncol(df3)]
anomname <- "smoothNDVI_anom"
anom <- smoothNDVIdf[["smoothNDVI"]]-df3[["smoothNDVI"]]
smoothNDVIdf[[anomname]]<-anom
################### older code ##########
#+++++++++++++++++++ load the Tmin Torpor d.f.+++++++++++++++++++####
load(file="C:\\Data\\WHRC\\Hummers\\Hummer_code\\P4_Extreme_events_Broad_tailed\\Results\\Torpor_df.rdata")
#Tmin_torpor.dat
Tmin_torpor.dat<-Tmin_torpor.dat[!duplicated(Tmin_torpor.dat),]
#+++++++++++ test Torpor days differences between years ++++++++++####
ss<-which((Tmin_torpor.dat$years.from.baseline >2000)&(Tmin_torpor.dat$days.sinceOct31 < 150)&(Tmin_torpor.dat$years.from.baseline <2012))
plot(Tmin_torpor.dat$years.from.baseline.fac[ss],Tmin_torpor.dat$allCFSR[ss],col="grey")
TukeyTminTorpor <- TukeyTable(classes=Tmin_torpor.dat$years.from.baseline.fac[ss],dependent=Tmin_torpor.dat$allCFSR[ss],
compnames=c('higher Tmin_stress','no diff.','lower Tmin_stress'))
TukeyTminTorpor
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++####
# days.since.base<-function(mydates,baseline="-01-01")
# {
# my.years<-as.numeric(substr(mydates,1,4))
# prev.baseline <- as.Date(paste(my.years-1,baseline,sep=""))
# nr.of.days.in.year <- as.numeric(as.Date(paste(my.years-1,"-12-31",sep=""))-as.Date(paste(my.years-1,"-01-01",sep="")))+1
# days.since <- as.numeric(mydates-prev.baseline)
# my.years[days.since>=nr.of.days.in.year]<-my.years[days.since>=nr.of.days.in.year]+1
# days.since <- days.since%%nr.of.days.in.year-1
# return(cbind(days.since,my.years))
# }
| /Compare_years_extracted_CFSR_data.r | permissive | sarahsupp/extreme_limits | R | false | false | 18,487 | r |
#code to run on summarized winter data
#+++++++++++ functions to perform and analyze Tukey test +++++++++++####
plot.CFSR.Annual.var <- function(df,maxday=150,varname,maxbaseyr=2011){
ss<-which((df$BaseYear >1999)&(df$DOYsinceBase < maxday)&(df$BaseYear <= maxbaseyr))
win.graph(width=9,h=5)
plot(as.factor(df$BaseYear[ss]),df[[varname]][ss],ylab=varname,col="grey")
Tukeytab <- TukeyTable(classes=as.factor(df$BaseYear[ss]),dependent=df[[varname]][ss],compnames=
c(paste('higher ',varname,sep=''),'no diff.',paste('lower ',varname,sep='')))
return(Tukeytab)
}
comp <- function(year,ttt){
list(which(substr(rownames(ttt),1,4)==as.character(year)),
which (substr(rownames(ttt),6,9)==as.character(year)))}
TukeyTable <- function(classes,dependent,compnames,alpha=0.05){
classes<-droplevels(classes)
#tt<-TukeyHSD(aov(allNDVI[ss]~years.from.baseline.fac[ss]))
tt<-TukeyHSD(aov(dependent~classes))
ttt<-tt$classes
# a function to summarize the output from TukeyHSD
allcomps<-NULL
for(i in levels(classes)){
signif <- c(ttt[comp(i,ttt=ttt)[[1]],4]<alpha,ttt[comp(i,ttt=ttt)[[2]],4]<alpha)
diffs <- c(ttt[comp(i,ttt=ttt)[[1]],1]>0,ttt[comp(i,ttt=ttt)[[2]],1]<0)
newcol <- c(length(which(diffs==T & signif==T)),
length(which(signif==F)),
length(which(diffs==F & signif ==T)))
allcomps<-cbind(allcomps,newcol)}
colnames(allcomps) <- levels(classes)
rownames(allcomps) <- compnames
return(allcomps)}
#+++++++++++ a simple plot for yearly time series +++++++++++++++++####
Simple.plot <- function(CFSR.dat.d.stats,Varname=NULL,minbaseyr=2000,maxbaseyr=2011,maxday=500){
df<-CFSR.dat.d.stats
df<-df[(is.element(df$BaseYear,c(minbaseyr:maxbaseyr))&(df$DOYsinceBase < maxday)),]
require(lattice)
win.graph()
xyplot(get(Varname) ~ DOYsinceBase, data = df,ylab=Varname,lwd=2,
groups = as.factor(BaseYear),type='smooth',span=.3,auto.key=T)
#groups = as.factor(BaseYear),span=.3,auto.key=T)
}
#+++++++++++ calculate daily anomalies per DOY and cellnr for CFSR data +++++++++++++++++####
Daily.anom.stats <- function(CFSR.dat.d,Varname=NULL,minbaseyr=2000,maxbaseyr=2011,maxday=500){
#restrict the data set as requested
df<-CFSR.dat.d
df<-df[(is.element(df$BaseYear,c(minbaseyr:maxbaseyr))&(df$DOYsinceBase < maxday)),]
#calculate the long-term daily means per cell
long.term.daily.mn <- aggregate(df,by=list(substr(df$imdate,6,10),df$cellnrs),FUN=mean)
#convert the observations to anomalies from the long-term cell&day-level means
long.term.daily.mn$DOYsinceBase <- ceiling(long.term.daily.mn$DOYsinceBase)
require(plyr)
df3 <- join(df,long.term.daily.mn,by=c("DOYsinceBase","cellnrs"),type="left",match="first")
#df3 <- df3[,!duplicated(colnames(df3))]
vardf <- df3[,colnames(df3)==Varname]
anomname <- paste(Varname,"_anom",sep="")
anom <- vardf[,1]-vardf[,2]
df[[anomname]]<-anom
return(df)
}
#++++++++###+++++++++++ load output of NCEP_CFSR_data_extract.R+++++++++++++++++++####
#load("C:\\Data\\WHRC\\Hummers\\R_data\\Mexico_CFSR_level2.rdata")
#load("C:\\Users\\pbeck.WHRC\\Dropbox\\Hummers\\Hummer_code\\P4_Extreme_events_Broad_tailed\\Results\\Mexico_CFSR_level2.rdata")
#load("A:\\Share\\pbeck\\Hummer_NASA\\Code_copy\\Mexico_CFSR_level2.rdata")
#load("C:/Users/sarah/Documents/GitHub/extreme_limits/data/Mexico_CFSR_level2.rdata")
load("C:/Users/tcormier/Dropbox (WHRC)/hb_extremelimits/DATA-USED-IN-EXTREME-LIMITS/TC_recreate/Mexico_CFSR_level2.rdata")
# Cld.dat.d;Cld.data.d.stats
# Prec.dat.d;Prec.data.d.stats
# Tmin.dat.d;Tmin.dat.d.stats
# Wnd.dat.d;Wnd.data.d.stats
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#lines of yearly NDVI and cumulative precip
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++####
precip.fac <- 6*60*60
# maxDOY<-max(annual.anom.df$DOYsinceBase.y)
# rainbowcols<-rainbow(maxDOY*5/4)[1:(maxDOY+1)]
#Simple smoothed plots of annual NCEP CFSR ts +++++++++++++++++++####
Simple.plot(Tes.dat.d.stats,"Tes_mn",minbaseyr=2004,maxday=250)
Simple.plot(Prec.dat.d.stats,"cumpre_mn",minbaseyr=2004,maxday=250)
#Simple.plot(Wnd.dat.d.stats,"wnd_mn",minbaseyr=2004,maxday=250)
#Simple.plot(Cld.dat.d.stats,"cld_mn",minbaseyr=2004,maxday=250)
Simple.plot(Tmin.dat.d.stats,"tmin_mn",minbaseyr=2004,maxday=250)
#Simple.plot(Te.dat.d.stats,"cumallbelow10_mn",minbaseyr=2004,maxbaseyr=2010,maxday=133) #this doesn't get createdin NCEP_CFSR... version that I have - do we need it?
#Tukey tests of yearly means - Can you do this on ts in non-paired fashion? +++++++++++++++++++####
Tukey.cumpre<-plot.CFSR.Annual.var(Prec.dat.d,maxday=150,varname="pre")
Tukey.tmin<-plot.CFSR.Annual.var(Tmin.dat.d,maxday=150,varname="tmin")
#Tukey.wnd<-plot.CFSR.Annual.var(Wnd.dat.d,maxday=150,varname="wnd")
#Tukey.cld<-plot.CFSR.Annual.var(Cld.dat.d,maxday=150,varname="cld")
#anomalies: plot and summary statistics of daily NCEP CFSR +++++++++++++++++++####
Tmin.dat.d <- Daily.anom.stats(Tmin.dat.d,Varname="tmin",minbaseyr=2000,maxbaseyr=2012,maxday=134)
Tukey.tesanom<-plot.CFSR.Annual.var(Tes.dat.d,maxday=134,varname="Tes_anom");abline(h=0)
Tes.dat.d <- Daily.anom.stats(Tes.dat.d,Varname="Tes",minbaseyr=2000,maxbaseyr=2012,maxday=134)
Tukey.tesanom<-plot.CFSR.Annual.var(Tes.dat.d,maxday=134,varname="Tes_anom");abline(h=0)
Prec.dat.d <- Daily.anom.stats(Prec.dat.d,Varname="cumpre",minbaseyr=2000,maxbaseyr=2012,maxday=134)
Tukey.cumpreanom <- plot.CFSR.Annual.var(Prec.dat.d,maxday=150,varname="cumpre_anom");abline(h=0)
#Te.dat.d <- Te.dat.d[,colnames(Te.dat.d)!="allbelow10"]
Te.dat.d <- Daily.anom.stats(Te.dat.d,Varname="Te",minbaseyr=2000,maxbaseyr=2012,maxday=134)
Tukey.cumpreanom <- plot.CFSR.Annual.var(Te.dat.d,maxday=150,varname="Te_anom");abline(h=0)
#Very 0-inflated distribution - do cumulative?
#Wnd.dat.d <- Daily.anom.stats(Wnd.dat.d,Varname="wnd",minbaseyr=2000,maxbaseyr=2012,maxday=250)
# Tukey.wndanom<-plot.CFSR.Annual.var(Wnd.dat.d,maxday=150,varname="wnd_anom");abline(h=0)
#Cld.dat.d <- Daily.anom.stats(Cld.dat.d,Varname="cld",minbaseyr=2000,maxbaseyr=2012,maxday=250)
# Tukey.cldanom <- plot.CFSR.Annual.var(Cld.dat.d,maxday=150,varname="cld_anom");abline(h=0)
#this df will hold yearly anomalies for meteo and NDVI over the focal period
annual.anom.df <- join(Tes.dat.d,Prec.dat.d)
annual.anom.df <- join(annual.anom.df,Tmin.dat.d)
annual.anom.df <- join(annual.anom.df,Te.dat.d)
#annual.anom.df <- join(annual.anom.df,Cld.dat.d)
#annual.anom.df <- join(annual.anom.df,Wnd.dat.d)
#make them daily (ie take averages across points on the same day)
annual.anom.df <- aggregate(annual.anom.df, by=list(annual.anom.df$imdate),FUN=mean)
#+++++++++++++++++++ load the NDVI d.f.+++++++++++++++++++####
#load(file="A:\\Share\\pbeck\\Hummer_NASA\\Code_copy\\NDVI_df.rdata")
#load("C:/Users/sarah/Documents/GitHub/extreme_limits/data/NDVI_df.rdata")
load("C:/Users/tcormier/Dropbox (WHRC)/hb_extremelimits/DATA-USED-IN-EXTREME-LIMITS/NDVI_df.rdata")
#NDVI.dat
NDVI.dat<-NDVI.dat[!duplicated(NDVI.dat),]
colnames(NDVI.dat)
#+++++++++++ make 1 smoothed NDVI ts ++++++++++####
NDVI.dat.daily<-aggregate(NDVI.dat,by=list(NDVI.dat$allDate),FUN=mean)
NDVI.dat.daily$years.from.baseline.fac <- as.factor(NDVI.dat.daily$years.from.baseline)
NDVI.dat.daily$allYEARfac <- as.factor(NDVI.dat.daily$allYEAR)
aqf <- function(x,splinemod=spl) {
zz <- predict(splinemod, x)$y;zz}
win.graph()
plot(NDVI.dat$allDate,NDVI.dat$allNDVI,pch=16,col=rgb(.5,.5,.5,.2),cex=.2,ylab="MODIS (Terra & Aqua) NDVI",
xlab="",main="Vegetation productivity on Broad-tailed Hummingbird wintering grounds",
ylim=c(.4,.9))#,xlim=as.Date(c())
spl <- smooth.spline(NDVI.dat$allDate,NDVI.dat$allNDVI,df=80)
prednrs <- c(min(spl$x):max(spl$x))
lines(as.Date(prednrs,origin="1970-01-01"),aqf(x=prednrs,splinemod=spl),col="dark green",lwd=2.5)
abline(v=as.Date(paste(c(2001:2013),"-01-01",sep="")),lty=2)
spl2 <- smooth.spline(NDVI.dat.daily$allDate,NDVI.dat.daily$allNDVI,df=80)
#plot(NDVI.dat.daily$allDate,NDVI.dat.daily$allNDVI,pch=16,col="grey",cex=.5)
lines(as.Date(prednrs,origin="1970-01-01"),aqf(x=prednrs,splinemod=spl2),col=4)
smoothNDVIdf <- cbind.data.frame(as.Date(prednrs,origin="1970-01-01"),aqf(x=prednrs,splinemod=spl))
colnames(smoothNDVIdf) <- c("imdate","smoothNDVI")
smoothNDVIdf <- cbind.data.frame(smoothNDVIdf,days.since.base(smoothNDVIdf$imdate,baseline="-11-01"))
smoothNDVIdf[["cellnrs"]]<-rep(-1,nrow(smoothNDVIdf))
smoothNDVIdf <- Daily.anom.stats(smoothNDVIdf,Varname="smoothNDVI",minbaseyr=2000,maxbaseyr=2012,maxday=250)
Tukey.NDVIanom <- plot.CFSR.Annual.var(smoothNDVIdf,maxday=150,varname="smoothNDVI_anom");abline(h=0)
#++++++++++++++++++++ A DF TO HOLD ANNUAL SERIES FOR MULTIPLE VARS ++++####smoothNDVIdf <- cbind.data.frame(as.Date(prednrs,origin="1970-01-01"),aqf(x=prednrs,splinemod=spl2))
annual.anom.df.backup <- annual.anom.df
#annual.anom.df<-annual.anom.df.backup
annual.anom.df<- merge(x=annual.anom.df,y=smoothNDVIdf,FUN=mean,by.x="Group.1",by.y="imdate")
#keep only the points in the focal years
annual.anom.df <- annual.anom.df[(annual.anom.df$BaseYear.x <= 2011)&(annual.anom.df$BaseYear.x >= 2000),] #CHECK THE ONE-FIFTY NR
#save(annual.anom.df,file="A:\\Share\\pbeck\\Hummer_NASA\\Code_copy\\annual.anom.df.rdata")
#save(annual.anom.df, file="C:/Users/sarah/Documents/GitHub/extreme_limits/data/annual.anom.df.rdata")
save(annual.anom.df, file="C:/Users/tcormier/Dropbox (WHRC)/hb_extremelimits/DATA-USED-IN-EXTREME-LIMITS/TC_recreate/annual.anom.df.rdata")
# Simple smoothed plots of annual NDVI ts+++++++++++++++++++####
Simple.plot(smoothNDVIdf,"smoothNDVI",minbaseyr=2002,maxday=250)
Tukey.NDVI<-plot.CFSR.Annual.var(smoothNDVIdf,maxday=150,varname="smoothNDVI")
smoothNDVIdf <- Daily.anom.stats(smoothNDVIdf,Varname="smoothNDVI",minbaseyr=2000,maxbaseyr=2012,maxday=250)
Tukey.NDVI <- plot.CFSR.Annual.var(smoothNDVIdf,maxday=150,varname="smoothNDVI");abline(h=0)
#++++++++++++++++++++ A FUNCTION TO COMPARE 2 VARS BETWEEN ONE YEAR AND THE OTHERS ++++####
BivariateWinterComp<- function(df=annual.anom.df,xvar,yvar,maxDOY=250,focalBaseYr=2010,
Focalpoints=T,PointsLegend=T,focalBag=F,
xlabb=NULL,
ylabb=NULL,
xarrow=c(NA,NA),yarrow=c(NA,NA),legpos="topleft")
{
require(aplpack)
df<-df[df$DOYsinceBase.y<=maxDOY,]
xx<-df[[xvar]];yy<-df[[yvar]]
ss<-df$BaseYear.x==focalBaseYr
bagplot(x=xx[!ss],y=yy[!ss],na.rm=T, xlab=xlabb,ylab=ylabb,
show.outlier=T,show.bagpoints=F,show.looppoints=F,show.whiskers=F,
transparency=T,xlim=range(xx,na.rm=T),ylim=range(yy,na.rm=T),cex=0.4
,col.loophull=grey(.75)
,col.baghull=grey(.5),col.bagpoints=1
)
cat(length(which(ss))," observations in focal year\n")
cat(length(which(!ss))," observations in all other years focal year\n")
if(focalBag){bagplot(x=xx[ss],y=yy[ss],na.rm=T,
show.outlier=F,show.bagpoints=F,show.looppoints=F,show.whiskers=F,
transparency=T,xlim=range(xx,na.rm=T),ylim=range(yy,na.rm=T),cex=0,add=T)}
rainbowcols<-rainbow(maxDOY*5/4)[1:(maxDOY+1)]
if(Focalpoints){
#points(xx[ss],yy[ss],pch=16,col=rainbow(df$DOYsinceBase.y[ss]/maxDOY))
points(xx[ss],yy[ss],pch=16,col=rainbowcols[df$DOYsinceBase.y[ss]+1])
#points(xx[ss],yy[ss])
}
if(!is.na(yarrow[1])){
arrows(x0=min(xx,na.rm=T),y0=min(yy,na.rm=T)/3*1,y1=min(yy,na.rm=T)/3*2,length=.1)
mtext(side=2,line=-1,at=max(yy,na.rm=T)/2,text=yarrow[2])
arrows(x0=min(xx,na.rm=T),y0=max(yy,na.rm=T)/3*1,y1=max(yy,na.rm=T)/3*2,length=.1)
mtext(side=2,line=-1,at=min(yy,na.rm=T)/2,text=yarrow[1])
}
if(!is.na(xarrow[1])){
arrows(y0=min(yy,na.rm=T),x0=min(xx,na.rm=T)/3*1,x1=min(xx,na.rm=T)/3*2,length=.1)
mtext(side=1,line=-1,at=max(xx,na.rm=T)/2,text=xarrow[2])
arrows(y0=min(yy,na.rm=T),x0=max(xx,na.rm=T)/3*1,x1=max(xx,na.rm=T)/3*2,length=.1)
mtext(side=1,line=-1,at=min(xx,na.rm=T)/2,text=xarrow[1])
}
#browser()
if(PointsLegend){
levs<-c(0,.2,.4,.6,.8,1)*maxDOY;levs[1]<-levs[1]+1;levs[length(levs)]<-levs[length(levs)]-1
# legend("topright",pch=16,title=paste("Days since 1 Nov ", focalBaseYr,sep=""),col=grey(levs/maxDOY)
# ,legend=paste(round(levs)," days",sep=""),box.col=NULL,bty="n")
#points(pch=16,-4:0,rep(-0.005,length(levs)),col=rainbowcols[round(levs)])
legend(legpos,pch=16,title=paste("Days since 1 Nov ", focalBaseYr,sep=""),col=rainbowcols[round(levs)]
,legend=paste(round(levs)," days",sep=""),box.col=NULL,bty="n")
#legend("topright",pch=1,paste("Days since 1 Nov ", focalBaseYr,sep=""),
# ,legend=paste(round(levs)," days",sep=""),bty="n",text.col=NA)
}
box();abline(h=0,lty=1,col="grey");abline(v=0,lty=1,col="grey")
return()
}
win.graph()
BivariateWinterComp(df=annual.anom.df,xvar="Te_anom",yvar="cumpre_anom",maxDOY=134,focalBaseYr=2010,
Focalpoints=T,PointsLegend=T,focalBag=F,
xlabb="Te anomaly (C)",ylabb="Cumulative precipitation anomaly (mm)"
,xarrow=c("colder","warmer")
,yarrow=c("dryer","wetter"))
win.graph()
BivariateWinterComp(df=annual.anom.df,xvar="smoothNDVI_anom",yvar="cumpre_anom",maxDOY=134,focalBaseYr=2010,
Focalpoints=T,PointsLegend=T,focalBag=F,
xlabb="NDVI anomaly",ylabb="Cumulative precipitation anomaly (mm)"
,xarrow=c("less productive","more productive")
,yarrow=c("dryer","wetter"))
win.graph()
BivariateWinterComp(df=annual.anom.df,xvar="smoothNDVI_anom",yvar="Te_anom",maxDOY=134,focalBaseYr=2010,
Focalpoints=T,PointsLegend=T,focalBag=F,
xlabb="NDVI anomaly",ylabb="Te anomaly (C)")
# win.graph()
# BivariateWinterComp(df=annual.anom.df,xvar="tmin_anom",yvar="cld_anom",maxDOY=134,focalBaseYr=2010,
# Focalpoints=T,PointsLegend=T,focalBag=F,
# xlabb="Tmin anomaly (C)",ylabb="Cloudiness anomaly (%)",
# xarrow=c("colder","warmer")
# ,yarrow=c("cloudier","clearer"))
# win.graph()
# BivariateWinterComp(df=annual.anom.df,xvar="tmin_anom",yvar="wnd_anom",maxDOY=134,focalBaseYr=2006,
# Focalpoints=T,PointsLegend=T,focalBag=F,
# xlabb="Tmin anomaly (C)",ylabb="Wind speed anomaly",legpos="topright",
# xarrow=c("colder","warmer")
# ,yarrow=c("more still","windier"))
win.graph()
BivariateWinterComp(df=annual.anom.df,xvar="smoothNDVI_anom",yvar="cumpre_anom",maxDOY=134,focalBaseYr=2009,
Focalpoints=T,PointsLegend=T,focalBag=F,
xlabb="NDVI anomaly",ylabb="Cumulative precipitation anomaly (mm)"
,xarrow=c("less productive","more productive")
,yarrow=c("dryer","wetter"))
# win.graph()
# BivariateWinterComp(df=annual.anom.df,xvar="tmin_anom",yvar="cld_anom",maxDOY=134,focalBaseYr=2009,
# Focalpoints=T,PointsLegend=T,focalBag=F,
# xlabb="Tmin anomaly (C)",ylabb="Cloudiness anomaly (%)",
# xarrow=c("colder","warmer")
# ,yarrow=c("cloudier","clearer"))
# win.graph()
# BivariateWinterComp(df=annual.anom.df,xvar="tmin_anom",yvar="cld_anom",maxDOY=134,focalBaseYr=2005,
# Focalpoints=T,PointsLegend=T,focalBag=F,
# xlabb="Tmin anomaly (C)",ylabb="Cloudiness anomaly (%)",
# xarrow=c("colder","warmer")
# ,yarrow=c("dryer","wetter"))
#
# win.graph()
# BivariateWinterComp(df=annual.anom.df,xvar="tmin_anom",yvar="cld_anom",maxDOY=134,focalBaseYr=2005,
# Focalpoints=T,PointsLegend=T,focalBag=F,
# xlabb="Tmin anomaly (C)",ylabb="Cloudiness anomaly (%)",
# xarrow=c("colder","warmer")
# ,yarrow=c("cloudier","clearer"))
# plot and summary statistics of daily NDVI anomalies+++++++++++++++++++####
long.term.daily.mnNDVI <- aggregate(smoothNDVIdf,by=list(substr(smoothNDVIdf$allDate,6,10)),FUN=mean)
#convert the observations to anomalies from the long-term cell&day-level means
require(plyr)
df3 <- join(smoothNDVIdf,long.term.daily.mnNDVI,by="DOYsinceBase",type="left")
df3 <- df3[,(ncol(smoothNDVIdf)+1):ncol(df3)]
anomname <- "smoothNDVI_anom"
anom <- smoothNDVIdf[["smoothNDVI"]]-df3[["smoothNDVI"]]
smoothNDVIdf[[anomname]]<-anom
################### older code ##########
#+++++++++++++++++++ load the Tmin Torpor d.f.+++++++++++++++++++####
load(file="C:\\Data\\WHRC\\Hummers\\Hummer_code\\P4_Extreme_events_Broad_tailed\\Results\\Torpor_df.rdata")
#Tmin_torpor.dat
Tmin_torpor.dat<-Tmin_torpor.dat[!duplicated(Tmin_torpor.dat),]
#+++++++++++ test Torpor days differences between years ++++++++++####
ss<-which((Tmin_torpor.dat$years.from.baseline >2000)&(Tmin_torpor.dat$days.sinceOct31 < 150)&(Tmin_torpor.dat$years.from.baseline <2012))
plot(Tmin_torpor.dat$years.from.baseline.fac[ss],Tmin_torpor.dat$allCFSR[ss],col="grey")
TukeyTminTorpor <- TukeyTable(classes=Tmin_torpor.dat$years.from.baseline.fac[ss],dependent=Tmin_torpor.dat$allCFSR[ss],
compnames=c('higher Tmin_stress','no diff.','lower Tmin_stress'))
TukeyTminTorpor
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++####
# days.since.base<-function(mydates,baseline="-01-01")
# {
# my.years<-as.numeric(substr(mydates,1,4))
# prev.baseline <- as.Date(paste(my.years-1,baseline,sep=""))
# nr.of.days.in.year <- as.numeric(as.Date(paste(my.years-1,"-12-31",sep=""))-as.Date(paste(my.years-1,"-01-01",sep="")))+1
# days.since <- as.numeric(mydates-prev.baseline)
# my.years[days.since>=nr.of.days.in.year]<-my.years[days.since>=nr.of.days.in.year]+1
# days.since <- days.since%%nr.of.days.in.year-1
# return(cbind(days.since,my.years))
# }
|
library(tidyverse)
n <- 10
beta0 <- -1.6
beta1 <- 0.03
x <- runif(n=n, min=18, max=60)
pi_x <- exp(beta0 + beta1 * x) / (1 + exp(beta0 + beta1 * x))
y <- rbinom(n=length(x), size=1, prob=pi_x)
data <- data.frame(x, pi_x, y)
names(data) <- c("age", "pi", "y")
print(data)
# This will generate a list of faculties -- do it with replacement.
sample(c("Arts", "Science", "Business", "Fine Arts", "Health"), size = 10, replace = TRUE,
prob=c(0.35, 0.25, 0.15, 0.10, 0.15))
| /generate_data.R | no_license | sechilds/cirpa2017 | R | false | false | 478 | r | library(tidyverse)
n <- 10
beta0 <- -1.6
beta1 <- 0.03
x <- runif(n=n, min=18, max=60)
pi_x <- exp(beta0 + beta1 * x) / (1 + exp(beta0 + beta1 * x))
y <- rbinom(n=length(x), size=1, prob=pi_x)
data <- data.frame(x, pi_x, y)
names(data) <- c("age", "pi", "y")
print(data)
# This will generate a list of faculties -- do it with replacement.
sample(c("Arts", "Science", "Business", "Fine Arts", "Health"), size = 10, replace = TRUE,
prob=c(0.35, 0.25, 0.15, 0.10, 0.15))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotSizeCompsGG.Single.R
\name{plotSizeCompsGG.Single}
\alias{plotSizeCompsGG.Single}
\title{Plot size comps from a single year (possibly for multiple models).}
\usage{
plotSizeCompsGG.Single(n_xmsz = NULL, res = NULL,
component = "pop.quants", year = NULL, mdl = "", title = "",
showPlot = TRUE)
}
\arguments{
\item{n_xmsz}{- array (or list of arrays) dimensioned xmsz}
\item{res}{- TCSAM2015 results object (or list of such)}
\item{component}{- object name in res from which to extract n_xmsyz}
\item{year}{- year to extract size comps from component in res}
\item{mdl}{- model name (if single model is given)}
\item{title}{- title for plot}
\item{showPlot}{- flag to show plot immediately}
}
\value{
ggplot2 object
}
\description{
Function to plot size comps from a single year (possibly for multiple models).
}
| /man/plotSizeCompsGG.Single.Rd | permissive | wStockhausen/rTCSAM2015 | R | false | true | 904 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotSizeCompsGG.Single.R
\name{plotSizeCompsGG.Single}
\alias{plotSizeCompsGG.Single}
\title{Plot size comps from a single year (possibly for multiple models).}
\usage{
plotSizeCompsGG.Single(n_xmsz = NULL, res = NULL,
component = "pop.quants", year = NULL, mdl = "", title = "",
showPlot = TRUE)
}
\arguments{
\item{n_xmsz}{- array (or list of arrays) dimensioned xmsz}
\item{res}{- TCSAM2015 results object (or list of such)}
\item{component}{- object name in res from which to extract n_xmsyz}
\item{year}{- year to extract size comps from component in res}
\item{mdl}{- model name (if single model is given)}
\item{title}{- title for plot}
\item{showPlot}{- flag to show plot immediately}
}
\value{
ggplot2 object
}
\description{
Function to plot size comps from a single year (possibly for multiple models).
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rowGrpMeans.R
\name{.rowGrpMeans}
\alias{.rowGrpMeans}
\title{row group mean (main)}
\usage{
.rowGrpMeans(x, grp, na.replVa = NULL, na.rm = TRUE)
}
\arguments{
\item{x}{numeric matrix where relplicates are organized into separate columns}
\item{grp}{(factor) defining which columns should be grouped (considered as replicates)}
\item{na.replVa}{(numeric) value to replace \code{NA} values}
\item{na.rm}{(logical) remove all \code{NA} values}
}
\value{
This function returns a matrix of mean values per row and group of replicates
}
\description{
This function calculates CVs for matrix with multiple groups of data, ie one CV for each group of data.
}
\examples{
set.seed(2016); dat1 <- matrix(c(runif(200)+rep(1:10,20)),ncol=10)
grp1 <- gl(4,3,labels=LETTERS[1:4])[2:11]
head(.rowGrpMeans(dat1, grp1))
}
\seealso{
\code{\link{rowGrpCV}}, \code{\link{rowCVs}}, \code{\link{arrayCV}}, \code{\link{replPlateCV}}
}
| /man/dot-rowGrpMeans.Rd | no_license | cran/wrMisc | R | false | true | 993 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rowGrpMeans.R
\name{.rowGrpMeans}
\alias{.rowGrpMeans}
\title{row group mean (main)}
\usage{
.rowGrpMeans(x, grp, na.replVa = NULL, na.rm = TRUE)
}
\arguments{
\item{x}{numeric matrix where relplicates are organized into separate columns}
\item{grp}{(factor) defining which columns should be grouped (considered as replicates)}
\item{na.replVa}{(numeric) value to replace \code{NA} values}
\item{na.rm}{(logical) remove all \code{NA} values}
}
\value{
This function returns a matrix of mean values per row and group of replicates
}
\description{
This function calculates CVs for matrix with multiple groups of data, ie one CV for each group of data.
}
\examples{
set.seed(2016); dat1 <- matrix(c(runif(200)+rep(1:10,20)),ncol=10)
grp1 <- gl(4,3,labels=LETTERS[1:4])[2:11]
head(.rowGrpMeans(dat1, grp1))
}
\seealso{
\code{\link{rowGrpCV}}, \code{\link{rowCVs}}, \code{\link{arrayCV}}, \code{\link{replPlateCV}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mortality_rate.R
\docType{data}
\name{mortality_rate}
\alias{mortality_rate}
\title{Arabidopsis QTL data on gravitropism}
\format{
An object of class \code{spec_tbl_df} (inherits from \code{tbl_df}, \code{tbl}, \code{data.frame}) with 40 rows and 13 columns.
}
\source{
\href{https://phenome.jax.org/projects/Moore1b}{QTL Archive}
}
\usage{
data(mortality_rate)
}
\description{
Data from a QTL experiment on gravitropism in
Arabidopsis, with data on 162 recombinant inbred lines (Ler x
Cvi). The outcome is the root tip angle (in degrees) at two-minute
increments over eight hours.
}
\examples{
data(mortality_rate)
}
\references{
Moore et al. (2013) Genetics 195:1077-1086
(\href{https://www.ncbi.nlm.nih.gov/pubmed/23979570}{PubMed})
}
\keyword{datasets}
| /man/mortality_rate.Rd | permissive | lewishounkpevi/BeninStats | R | false | true | 835 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mortality_rate.R
\docType{data}
\name{mortality_rate}
\alias{mortality_rate}
\title{Arabidopsis QTL data on gravitropism}
\format{
An object of class \code{spec_tbl_df} (inherits from \code{tbl_df}, \code{tbl}, \code{data.frame}) with 40 rows and 13 columns.
}
\source{
\href{https://phenome.jax.org/projects/Moore1b}{QTL Archive}
}
\usage{
data(mortality_rate)
}
\description{
Data from a QTL experiment on gravitropism in
Arabidopsis, with data on 162 recombinant inbred lines (Ler x
Cvi). The outcome is the root tip angle (in degrees) at two-minute
increments over eight hours.
}
\examples{
data(mortality_rate)
}
\references{
Moore et al. (2013) Genetics 195:1077-1086
(\href{https://www.ncbi.nlm.nih.gov/pubmed/23979570}{PubMed})
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/private.R
\name{writeGenoFile}
\alias{writeGenoFile}
\title{Print the genotype file}
\usage{
writeGenoFile(object)
}
\description{
Print the genotype file
}
\examples{
my_simulation <- new("simulation")
writeMainFile(my_simulation)
}
| /man/writeGenoFile.Rd | no_license | frederic-michaud/RQuantiNemo | R | false | true | 312 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/private.R
\name{writeGenoFile}
\alias{writeGenoFile}
\title{Print the genotype file}
\usage{
writeGenoFile(object)
}
\description{
Print the genotype file
}
\examples{
my_simulation <- new("simulation")
writeMainFile(my_simulation)
}
|
test_that("test built-in distributions", {
print("~~~~~~ Testing built-in distributions ~~~~~~~~")
tt <- 2.5
## ============================== weibull =================================
print("***** weibull ...")
distP <- list(scale = 1.2, shape = 1.16)
alpha <- exp(-log(distP[["scale"]]) / distP[["shape"]])
expect_equal(pweibull(q = tt, scale = alpha, shape = distP[["shape"]],
lower.tail = FALSE),
surv(tt, distP, "weibull"),
tolerance = 1e-3)
## =============================== gamma ===================================
print("***** gamma ...")
distP <- list(shape = 0.5, rate = 1.0 / 0.7)
expect_equal(pgamma(q = tt, rate = distP[["rate"]], shape = distP[["shape"]],
lower.tail = FALSE),
surv(tt, distP, "gamma"),
tolerance = 1e-3)
## =============================== gengamma ================================
print("***** generalized gamma ...")
distP <- list(mu = 0.5, sigma = 0.7, Q = 0.7)
expect_equal(flexsurv::pgengamma(q = tt, mu = -distP[["mu"]],
sigma = distP[["sigma"]],
Q = distP[["Q"]],
lower.tail = FALSE),
surv(tt, distP, "gengamma"))
})
| /fuzzedpackages/Countr/tests/testthat/test-built_in_distributions.R | no_license | akhikolla/testpackages | R | false | false | 1,362 | r | test_that("test built-in distributions", {
print("~~~~~~ Testing built-in distributions ~~~~~~~~")
tt <- 2.5
## ============================== weibull =================================
print("***** weibull ...")
distP <- list(scale = 1.2, shape = 1.16)
alpha <- exp(-log(distP[["scale"]]) / distP[["shape"]])
expect_equal(pweibull(q = tt, scale = alpha, shape = distP[["shape"]],
lower.tail = FALSE),
surv(tt, distP, "weibull"),
tolerance = 1e-3)
## =============================== gamma ===================================
print("***** gamma ...")
distP <- list(shape = 0.5, rate = 1.0 / 0.7)
expect_equal(pgamma(q = tt, rate = distP[["rate"]], shape = distP[["shape"]],
lower.tail = FALSE),
surv(tt, distP, "gamma"),
tolerance = 1e-3)
## =============================== gengamma ================================
print("***** generalized gamma ...")
distP <- list(mu = 0.5, sigma = 0.7, Q = 0.7)
expect_equal(flexsurv::pgengamma(q = tt, mu = -distP[["mu"]],
sigma = distP[["sigma"]],
Q = distP[["Q"]],
lower.tail = FALSE),
surv(tt, distP, "gengamma"))
})
|
# "airport_short.csv" file is a csv file consisting of useful columns taken from the dataset which we are provided with.
# Reading the csv file into a dataframe called airport
airport = read.csv('airport_short.csv',stringsAsFactors = FALSE)
# Converting the airport_name and author_country into factors
airport$airport_name = as.factor(airport$airport_name)
airport$author_country = as.factor(airport$author_country)
# As the imputation should be logical hence we had split the airport dataframe
# on the basis of the airport_name and done median imputation on that.
# Splitting
X <- split(airport,airport$airport_name)
# Iterative loop for median imputation
for (i in 1:741)
{
X[[i]]$overall_rating[is.na(X[[i]]$overall_rating)] = median(X[[i]]$overall_rating[!is.na(X[[i]]$overall_rating)])
X[[i]]$queuing_rating[is.na(X[[i]]$queuing_rating)] = median(X[[i]]$queuing_rating[!is.na(X[[i]]$queuing_rating)])
X[[i]]$terminal_cleanliness_rating[is.na(X[[i]]$terminal_cleanliness_rating)] = median(X[[i]]$terminal_cleanliness_rating[!is.na(X[[i]]$terminal_cleanliness_rating)])
X[[i]]$airport_shopping_rating[is.na(X[[i]]$airport_shopping_rating)] = median(X[[i]]$airport_shopping_rating[!is.na(X[[i]]$airport_shopping_rating)])
}
# Recombining the dataframes with imputed value into a dataframe of the size equal to that of original
imputed1 = do.call("rbind", X)
# Now after doing this some NA values are left. These corrosponds to the airports which
# has only one review and the variable value is missing for that also. So keeping such rows
# in the dataset make no sense.
#Removal of unnecesary rows
imputed2 = na.omit(imputed1)
# Since our variable is categorical we don't want the variable to take decimal
# values and hence we round it off to nearest integer
# A round of function
round = function(x,n)
{
posneg = sign(x)
z = abs(x)*10^n
z = z + 0.5
z = trunc(z)
z = z/10^n
z*posneg
}
# Calling the round of function
imputed2$overall_rating = as.factor(round(imputed2$overall_rating,0))
imputed2$queuing_rating = as.factor(round(imputed2$queuing_rating,0))
imputed2$terminal_cleanliness_rating = as.factor(round(imputed2$terminal_cleanliness_rating,0))
imputed2$airport_shopping_rating = as.factor(round(imputed2$airport_shopping_rating,0))
# Storing the file into a csv file.
write.csv(imputed2,"median_imputed.csv")
| /R Code/median_imputation.R | no_license | prerit1998jain/Aspect-Based-sentiment-analysis | R | false | false | 2,418 | r | # "airport_short.csv" file is a csv file consisting of useful columns taken from the dataset which we are provided with.
# Reading the csv file into a dataframe called airport
airport = read.csv('airport_short.csv',stringsAsFactors = FALSE)
# Converting the airport_name and author_country into factors
airport$airport_name = as.factor(airport$airport_name)
airport$author_country = as.factor(airport$author_country)
# As the imputation should be logical hence we had split the airport dataframe
# on the basis of the airport_name and done median imputation on that.
# Splitting
X <- split(airport,airport$airport_name)
# Iterative loop for median imputation
for (i in 1:741)
{
X[[i]]$overall_rating[is.na(X[[i]]$overall_rating)] = median(X[[i]]$overall_rating[!is.na(X[[i]]$overall_rating)])
X[[i]]$queuing_rating[is.na(X[[i]]$queuing_rating)] = median(X[[i]]$queuing_rating[!is.na(X[[i]]$queuing_rating)])
X[[i]]$terminal_cleanliness_rating[is.na(X[[i]]$terminal_cleanliness_rating)] = median(X[[i]]$terminal_cleanliness_rating[!is.na(X[[i]]$terminal_cleanliness_rating)])
X[[i]]$airport_shopping_rating[is.na(X[[i]]$airport_shopping_rating)] = median(X[[i]]$airport_shopping_rating[!is.na(X[[i]]$airport_shopping_rating)])
}
# Recombining the dataframes with imputed value into a dataframe of the size equal to that of original
imputed1 = do.call("rbind", X)
# Now after doing this some NA values are left. These corrosponds to the airports which
# has only one review and the variable value is missing for that also. So keeping such rows
# in the dataset make no sense.
#Removal of unnecesary rows
imputed2 = na.omit(imputed1)
# Since our variable is categorical we don't want the variable to take decimal
# values and hence we round it off to nearest integer
# A round of function
round = function(x,n)
{
posneg = sign(x)
z = abs(x)*10^n
z = z + 0.5
z = trunc(z)
z = z/10^n
z*posneg
}
# Calling the round of function
imputed2$overall_rating = as.factor(round(imputed2$overall_rating,0))
imputed2$queuing_rating = as.factor(round(imputed2$queuing_rating,0))
imputed2$terminal_cleanliness_rating = as.factor(round(imputed2$terminal_cleanliness_rating,0))
imputed2$airport_shopping_rating = as.factor(round(imputed2$airport_shopping_rating,0))
# Storing the file into a csv file.
write.csv(imputed2,"median_imputed.csv")
|
## Combine all the running results
CombLOSS <- function(RDPath)
{
RDS <- file.path(RDPath, dir(RDPath))
nRDS <- length(RDS)
HwangLOSS <- NULL
for(i in 1:nRDS)
{
load(RDS[i])
HwangLOSS <- rbind(HwangLOSS, OUT.LOSS)
}
return(HwangLOSS)
}
HwangRadiaLOSS <- CombLOSS("~/running/hwang/Results/radial")
HwangHarmonicLOSS <- CombLOSS("~/running/hwang/Results/harmonic")
## Make a box plot
HwangFixedCol <- "gray"
HwangFreeCol <- NA
boxplot(HwangRadiaLOSS, col = c(rep(HwangFreeCol, 3), rep(HwangFixedCol, 6)),
# ylim = c(0, 0.012),
axes = TRUE, notch = TRUE,
main = "Radial",
ylab = "LOSS",
xlab = "No. of knots",
names = c(10, 20, 40, 10, 20, 40, 60, 80, 100))
grid2(y.at = seq(0.002, 0.010, 0.001))
legend(6.5, 0.0108, c("Free knots model", "Fixed knots model"),
fill = c(HwangFreeCol, HwangFixedCol),
bg = "white")
dev.copy2eps(file = "simul_radial.eps")
dev.new()
boxplot(HwangHarmonicLOSS, col = c(rep(HwangFreeCol, 3), rep(HwangFixedCol, 6)),
# ylim = c(0, 0.012),
axes = TRUE, notch = TRUE,
main = "Harmonic",
ylab = "LOSS",
xlab = "No. of knots",
names = c(10, 20, 40, 10, 20, 40, 60, 80, 100))
grid2(y.at = seq(0.05, 0.55, 0.05))
legend(6.5, 0.58, c("Free knots model", "Fixed knots model"),
fill = c(HwangFreeCol, HwangFixedCol),
bg = "white")
dev.copy2eps(file = "simul_harmonic.eps")
| /inst/scripts/plots/PlotSimulHwang.R | no_license | feng-li/movingknots | R | false | false | 1,483 | r |
## Combine all the running results
CombLOSS <- function(RDPath)
{
RDS <- file.path(RDPath, dir(RDPath))
nRDS <- length(RDS)
HwangLOSS <- NULL
for(i in 1:nRDS)
{
load(RDS[i])
HwangLOSS <- rbind(HwangLOSS, OUT.LOSS)
}
return(HwangLOSS)
}
HwangRadiaLOSS <- CombLOSS("~/running/hwang/Results/radial")
HwangHarmonicLOSS <- CombLOSS("~/running/hwang/Results/harmonic")
## Make a box plot
HwangFixedCol <- "gray"
HwangFreeCol <- NA
boxplot(HwangRadiaLOSS, col = c(rep(HwangFreeCol, 3), rep(HwangFixedCol, 6)),
# ylim = c(0, 0.012),
axes = TRUE, notch = TRUE,
main = "Radial",
ylab = "LOSS",
xlab = "No. of knots",
names = c(10, 20, 40, 10, 20, 40, 60, 80, 100))
grid2(y.at = seq(0.002, 0.010, 0.001))
legend(6.5, 0.0108, c("Free knots model", "Fixed knots model"),
fill = c(HwangFreeCol, HwangFixedCol),
bg = "white")
dev.copy2eps(file = "simul_radial.eps")
dev.new()
boxplot(HwangHarmonicLOSS, col = c(rep(HwangFreeCol, 3), rep(HwangFixedCol, 6)),
# ylim = c(0, 0.012),
axes = TRUE, notch = TRUE,
main = "Harmonic",
ylab = "LOSS",
xlab = "No. of knots",
names = c(10, 20, 40, 10, 20, 40, 60, 80, 100))
grid2(y.at = seq(0.05, 0.55, 0.05))
legend(6.5, 0.58, c("Free knots model", "Fixed knots model"),
fill = c(HwangFreeCol, HwangFixedCol),
bg = "white")
dev.copy2eps(file = "simul_harmonic.eps")
|
#' Fit a single layer neural network
#'
#' `lantern_mlp()` fits a model.
#'
#' @param x Depending on the context:
#'
#' * A __data frame__ of predictors.
#' * A __matrix__ of predictors.
#' * A __recipe__ specifying a set of preprocessing steps
#' created from [recipes::recipe()].
#'
#' The predictor data should be standardized (e.g. centered or scaled).
#'
#' @param y When `x` is a __data frame__ or __matrix__, `y` is the outcome
#' specified as:
#'
#' * A __data frame__ with 1 numeric column.
#' * A __matrix__ with 1 numeric column.
#' * A numeric __vector__.
#'
#' @param data When a __recipe__ or __formula__ is used, `data` is specified as:
#'
#' * A __data frame__ containing both the predictors and the outcome.
#'
#' @param formula A formula specifying the outcome terms on the left-hand side,
#' and the predictor terms on the right-hand side.
#'
#' @param epochs An integer for the number of epochs of training.
#' @param hidden_units An integer for the number of hidden units.
#' @param activation A string for the activation function. Possible values are
#' "relu", "elu", "tanh", and "linear".
#' @param penalty The amount of weight decay (i.e., L2 regularization).
#' @param dropout The proportion of parameters set to zero.
#' @param learn_rate A positive number (usually less than 0.1).
#' @param momentum A positive number on `[0, 1]` for the momentum parameter in
#' gradient decent.
#' @param validation The proportion of the data randomly assigned to a
#' validation set.
#' @param batch_size An integer for the number of training set points in each
#' batch.
#' @param conv_crit A non-negative number for convergence.
#' @param verbose A logical that prints out the iteration history.
#'
#' @param ... Not currently used, but required for extensibility.
#'
#' @details
#'
#' This function fits single layer, feed-forward neural network models for
#' regression (when the outcome is a number) or classification (a factor). For
#' regression, the mean squared error is optimized and cross-entropy is the loss
#' function for classification.
#'
#' The _predictors_ data should all be numeric and encoded in the same units (e.g.
#' standardized to the same range or distribution). If there are factor
#' predictors, use a recipe or formula to create indicator variables (or some
#' other method) to make them numeric.
#'
#' When the outcome is a number, the function internally standardizes the
#' outcome data to have mean zero and a standard deviation of one. The prediction
#' function creates predictions on the original scale.
#'
#' If `conv_crit` is used, it stops training when the difference in the loss
#' function is below `conv_crit` or if it gets worse. The default trains the
#' model over the specified number of epochs.
#'
#' @return
#'
#' A `lantern_mlp` object with elements:
#' * `models`: a list object of serialized models for each epoch.
#' * `loss`: A vector of loss values (MSE for regression, negative log-
#' likelihood for classification) at each epoch.
#' * `dim`: A list of data dimensions.
#' * `y_stats`: A list of summary statistics for numeric outcomes.
#' * `parameters`: A list of some tuning parameter values.
#' * `blueprint`: The `hardhat` blueprint data.
#'
#' @examples
#' \donttest{
#' if (torch::torch_is_installed()) {
#'
#' ## -----------------------------------------------------------------------------
#' # regression examples (increase # epochs to get better results)
#'
#' data(ames, package = "modeldata")
#'
#' ames$Sale_Price <- log10(ames$Sale_Price)
#'
#' set.seed(122)
#' in_train <- sample(1:nrow(ames), 2000)
#' ames_train <- ames[ in_train,]
#' ames_test <- ames[-in_train,]
#'
#'
#' # Using matrices
#' set.seed(1)
#' lantern_mlp(x = as.matrix(ames_train[, c("Longitude", "Latitude")]),
#' y = ames_train$Sale_Price,
#' penalty = 0.10, epochs = 20, batch_size = 32)
#'
#' # Using recipe
#' library(recipes)
#'
#' ames_rec <-
#' recipe(Sale_Price ~ Bldg_Type + Neighborhood + Year_Built + Gr_Liv_Area +
#' Full_Bath + Year_Sold + Lot_Area + Central_Air + Longitude + Latitude,
#' data = ames_train) %>%
#' # Transform some highly skewed predictors
#' step_BoxCox(Lot_Area, Gr_Liv_Area) %>%
#' # Lump some rarely occuring categories into "other"
#' step_other(Neighborhood, threshold = 0.05) %>%
#' # Encode categorical predictors as binary.
#' step_dummy(all_nominal(), one_hot = TRUE) %>%
#' # Add an interaction effect:
#' step_interact(~ starts_with("Central_Air"):Year_Built) %>%
#' step_zv(all_predictors()) %>%
#' step_normalize(all_predictors())
#'
#' set.seed(2)
#' fit <- lantern_mlp(ames_rec, data = ames_train, hidden_units = 20,
#' dropout = 0.05, epochs = 20, batch_size = 32)
#' fit
#'
#' autoplot(fit)
#'
#' library(ggplot2)
#'
#' predict(fit, ames_test) %>%
#' bind_cols(ames_test) %>%
#' ggplot(aes(x = .pred, y = Sale_Price)) +
#' geom_abline(col = "green") +
#' geom_point(alpha = .3) +
#' lims(x = c(4, 6), y = c(4, 6)) +
#' coord_fixed(ratio = 1)
#'
#' library(yardstick)
#' predict(fit, ames_test) %>%
#' bind_cols(ames_test) %>%
#' rmse(Sale_Price, .pred)
#' }
#'
#' }
#' @export
lantern_mlp <- function(x, ...) {
UseMethod("lantern_mlp")
}
#' @export
#' @rdname lantern_mlp
lantern_mlp.default <- function(x, ...) {
stop("`lantern_mlp()` is not defined for a '", class(x)[1], "'.", call. = FALSE)
}
# XY method - data frame
#' @export
#' @rdname lantern_mlp
lantern_mlp.data.frame <-
function(x,
y,
epochs = 100L,
hidden_units = 3L,
activation = "relu",
penalty = 0,
dropout = 0,
validation = 0.1,
learn_rate = 0.01,
momentum = 0.0,
batch_size = NULL,
conv_crit = -Inf,
verbose = FALSE,
...) {
processed <- hardhat::mold(x, y)
lantern_mlp_bridge(
processed,
epochs = epochs,
hidden_units = hidden_units,
activation = activation,
learn_rate = learn_rate,
penalty = penalty,
dropout = dropout,
validation = validation,
momentum = momentum,
batch_size = batch_size,
conv_crit = conv_crit,
verbose = verbose,
...
)
}
# XY method - matrix
#' @export
#' @rdname lantern_mlp
lantern_mlp.matrix <- function(x,
y,
epochs = 100L,
hidden_units = 3L,
activation = "relu",
penalty = 0,
dropout = 0,
validation = 0.1,
learn_rate = 0.01,
momentum = 0.0,
batch_size = NULL,
conv_crit = -Inf,
verbose = FALSE,
...) {
processed <- hardhat::mold(x, y)
lantern_mlp_bridge(
processed,
epochs = epochs,
hidden_units = hidden_units,
activation = activation,
learn_rate = learn_rate,
momentum = momentum,
penalty = penalty,
dropout = dropout,
validation = validation,
batch_size = batch_size,
conv_crit = conv_crit,
verbose = verbose,
...
)
}
# Formula method
#' @export
#' @rdname lantern_mlp
lantern_mlp.formula <-
function(formula,
data,
epochs = 100L,
hidden_units = 3L,
activation = "relu",
penalty = 0,
dropout = 0,
validation = 0.1,
learn_rate = 0.01,
momentum = 0.0,
batch_size = NULL,
conv_crit = -Inf,
verbose = FALSE,
...) {
processed <- hardhat::mold(formula, data)
lantern_mlp_bridge(
processed,
epochs = epochs,
hidden_units = hidden_units,
activation = activation,
learn_rate = learn_rate,
momentum = momentum,
penalty = penalty,
dropout = dropout,
validation = validation,
batch_size = batch_size,
conv_crit = conv_crit,
verbose = verbose,
...
)
}
# Recipe method
#' @export
#' @rdname lantern_mlp
lantern_mlp.recipe <-
function(x,
data,
epochs = 100L,
hidden_units = 3L,
activation = "relu",
penalty = 0,
dropout = 0,
validation = 0.1,
learn_rate = 0.01,
momentum = 0.0,
batch_size = NULL,
conv_crit = -Inf,
verbose = FALSE,
...) {
processed <- hardhat::mold(x, data)
lantern_mlp_bridge(
processed,
epochs = epochs,
hidden_units = hidden_units,
activation = activation,
learn_rate = learn_rate,
momentum = momentum,
penalty = penalty,
dropout = dropout,
validation = validation,
batch_size = batch_size,
conv_crit = conv_crit,
verbose = verbose,
...
)
}
# ------------------------------------------------------------------------------
# Bridge
lantern_mlp_bridge <- function(processed, epochs, hidden_units, activation,
learn_rate, momentum, penalty, dropout,
validation, batch_size, conv_crit, verbose, ...) {
if(!torch::torch_is_installed()) {
rlang::abort("The torch backend has not been installed; use `torch::install_torch()`.")
}
f_nm <- "lantern_mlp"
# check values of various argument values
if (is.numeric(epochs) & !is.integer(epochs)) {
epochs <- as.integer(epochs)
}
if (is.numeric(hidden_units) & !is.integer(hidden_units)) {
hidden_units <- as.integer(hidden_units)
}
check_integer(epochs, single = TRUE, 1, fn = f_nm)
if (!is.null(batch_size)) {
if (is.numeric(batch_size) & !is.integer(batch_size)) {
batch_size <- as.integer(batch_size)
}
check_integer(batch_size, single = TRUE, 1, fn = f_nm)
}
check_integer(hidden_units, single = TRUE, 1, fn = f_nm)
check_double(penalty, single = TRUE, 0, incl = c(TRUE, TRUE), fn = f_nm)
check_double(dropout, single = TRUE, 0, 1, incl = c(TRUE, FALSE), fn = f_nm)
check_double(validation, single = TRUE, 0, 1, incl = c(TRUE, FALSE), fn = f_nm)
check_double(momentum, single = TRUE, 0, 1, incl = c(TRUE, TRUE), fn = f_nm)
check_double(learn_rate, single = TRUE, 0, incl = c(FALSE, TRUE), fn = f_nm)
check_logical(verbose, single = TRUE, fn = f_nm)
check_character(activation, single = TRUE, fn = f_nm)
## -----------------------------------------------------------------------------
predictors <- processed$predictors
if (!is.matrix(predictors)) {
predictors <- as.matrix(predictors)
if (is.character(predictors)) {
rlang::abort(
paste(
"There were some non-numeric columns in the predictors.",
"Please use a formula or recipe to encode all of the predictors as numeric."
)
)
}
}
## -----------------------------------------------------------------------------
outcome <- processed$outcomes[[1]]
## -----------------------------------------------------------------------------
fit <-
lantern_mlp_reg_fit_imp(
x = predictors,
y = outcome,
epochs = epochs,
hidden_units = hidden_units,
activation = activation,
learn_rate = learn_rate,
momentum = momentum,
penalty = penalty,
dropout = dropout,
validation = validation,
batch_size = batch_size,
conv_crit = conv_crit,
verbose = verbose
)
new_lantern_mlp(
models = fit$models,
loss = fit$loss,
dims = fit$dims,
y_stats = fit$y_stats,
parameters = fit$parameters,
blueprint = processed$blueprint
)
}
new_lantern_mlp <- function( models, loss, dims, y_stats, parameters, blueprint) {
if (!is.list(models)) {
rlang::abort("'models' should be a list.")
}
if (!is.vector(loss) || !is.numeric(loss)) {
rlang::abort("'loss' should be a numeric vector")
}
if (!is.list(dims)) {
rlang::abort("'dims' should be a list")
}
if (!is.list(parameters)) {
rlang::abort("'parameters' should be a list")
}
if (!inherits(blueprint, "hardhat_blueprint")) {
rlang::abort("'blueprint' should be a hardhat blueprint")
}
hardhat::new_model(models = models,
loss = loss,
dims = dims,
y_stats = y_stats,
parameters = parameters,
blueprint = blueprint,
class = "lantern_mlp")
}
## -----------------------------------------------------------------------------
# Fit code
lantern_mlp_reg_fit_imp <-
function(x, y,
epochs = 100L,
batch_size = 32,
hidden_units = 3L,
penalty = 0,
dropout = 0,
validation = 0.1,
learn_rate = 0.01,
momentum = 0.0,
activation = "relu",
conv_crit = -Inf,
verbose = FALSE,
...) {
torch::torch_manual_seed(sample.int(10^5, 1))
## ---------------------------------------------------------------------------
# General data checks:
check_data_att(x, y)
# Check missing values
compl_data <- check_missing_data(x, y, "lantern_mlp", verbose)
x <- compl_data$x
y <- compl_data$y
n <- length(y)
p <- ncol(x)
if (is.factor(y)) {
y_dim <- length(levels(y))
# the model will output softmax values.
# so we need to use negative likelihood loss and
# pass the log of softmax.
loss_fn <- function(input, target) {
nnf_nll_loss(
input = torch::torch_log(input),
target = target
)
}
} else {
y_dim <- 1
loss_fn <- function(input, target) {
nnf_mse_loss(input, target$view(c(-1,1)))
}
}
if (validation > 0) {
in_val <- sample(seq_along(y), floor(n * validation))
x_val <- x[in_val,, drop = FALSE]
y_val <- y[in_val]
x <- x[-in_val,, drop = FALSE]
y <- y[-in_val]
}
if (!is.factor(y)) {
y_stats <- scale_stats(y)
y <- scale_y(y, y_stats)
if (validation > 0) {
y_val <- scale_y(y_val, y_stats)
}
loss_label <- "\tLoss (scaled):"
} else {
y_stats <- list(mean = NA_real_, sd = NA_real_)
loss_label <- "\tLoss:"
}
if (is.null(batch_size)) {
batch_size <- nrow(x)
} else {
batch_size <- min(batch_size, nrow(x))
}
## ---------------------------------------------------------------------------
# Convert to index sampler and data loader
ds <- lantern::matrix_to_dataset(x, y)
dl <- torch::dataloader(ds, batch_size = batch_size)
if (validation > 0) {
ds_val <- lantern::matrix_to_dataset(x_val, y_val)
dl_val <- torch::dataloader(ds_val)
}
## ---------------------------------------------------------------------------
# Initialize model and optimizer
model <- mlp_module(ncol(x), hidden_units, activation, dropout, y_dim)
# Write a optim wrapper
optimizer <-
torch::optim_sgd(model$parameters, lr = learn_rate,
weight_decay = penalty, momentum = momentum)
## ---------------------------------------------------------------------------
loss_prev <- 10^38
loss_vec <- rep(NA_real_, epochs)
if (verbose) {
epoch_chr <- format(1:epochs)
}
## -----------------------------------------------------------------------------
model_per_epoch <- list()
# Optimize parameters
for (epoch in 1:epochs) {
# training loop
for (batch in torch::enumerate(dl)) {
pred <- model(batch$x)
loss <- loss_fn(pred, batch$y)
optimizer$zero_grad()
loss$backward()
optimizer$step()
}
# calculate loss on the full datasets
if (validation > 0) {
pred <- model(dl_val$dataset$data$x)
loss <- loss_fn(pred, dl_val$dataset$data$y)
} else {
pred <- model(dl$dataset$data$x)
loss <- loss_fn(pred, dl$dataset$data$y)
}
# calculate losses
loss_curr <- loss$item()
loss_vec[epoch] <- loss_curr
if (is.nan(loss_curr)) {
rlang::warn("Current loss in NaN. Training wil be stopped.")
break()
}
loss_diff <- (loss_prev - loss_curr)/loss_prev
loss_prev <- loss_curr
# persists models and coefficients
model_per_epoch[[epoch]] <- model_to_raw(model)
if (verbose) {
rlang::inform(
paste("epoch:", epoch_chr[epoch], loss_label, signif(loss_curr, 5))
)
}
if (loss_diff <= conv_crit) {
break()
}
model_per_epoch[[epoch]] <- model_to_raw(model)
}
## ---------------------------------------------------------------------------
list(
models = model_per_epoch,
loss = loss_vec[!is.na(loss_vec)],
dims = list(p = p, n = n, h = hidden_units, y = y_dim),
y_stats = y_stats,
stats = y_stats,
parameters = list(activation = activation, learn_rate = learn_rate,
penalty = penalty, dropout = dropout, validation = validation,
batch_size = batch_size, momentum = momentum)
)
}
mlp_module <-
torch::nn_module(
"mlp_module",
initialize = function(num_pred, hidden_units, act_type, dropout, y_dim) {
self$x_to_h <- torch::nn_linear(num_pred, hidden_units)
self$h_to_y <- torch::nn_linear(hidden_units, y_dim)
if (dropout > 0) {
self$dropout <- torch::nn_dropout(p = dropout)
} else {
self$dropout <- identity
}
self$activation <- get_activation_fn(act_type)
if (y_dim > 1) {
self$transform <- torch::nn_softmax(dim = 2)
} else {
self$transform <- identity
}
},
forward = function(x) {
x %>%
self$x_to_h() %>%
self$activation() %>%
self$dropout() %>%
self$h_to_y() %>%
self$transform()
}
)
## -----------------------------------------------------------------------------
get_num_mlp_coef <- function(x) {
model <- revive_model(x, 1)$parameters
param <- vapply(model, function(.x) prod(dim(.x)), double(1))
sum(unlist(param))
}
#' @export
print.lantern_mlp <- function(x, ...) {
cat("Multilayer perceptron\n\n")
cat(x$param$activation, "activation\n")
lvl <- get_levels(x)
if (is.null(lvl)) {
chr_y <- "numeric outcome"
} else {
chr_y <- paste(length(lvl), "classes")
}
cat(
format(x$dims$n, big.mark = ","), "samples,",
format(x$dims$p, big.mark = ","), "features,",
chr_y, "\n"
)
cat(
x$dims$h, "hidden units,",
format(get_num_mlp_coef(x), big.mark = ","), "model parameters\n"
)
if (x$parameters$penalty > 0) {
cat("weight decay:", x$parameters$penalty, "\n")
}
if (x$parameters$dropout > 0) {
cat("dropout proportion:", x$parameters$dropout, "\n")
}
cat("batch size:", x$parameters$batch_size, "\n")
if (!is.null(x$loss)) {
if(x$parameters$validation > 0) {
if (is.na(x$y_stats$mean)) {
cat("final validation loss after", length(x$loss), "epochs:",
signif(x$loss[length(x$loss)]), "\n")
} else {
cat("final scaled validation loss after", length(x$loss), "epochs:",
signif(x$loss[length(x$loss)]), "\n")
}
} else {
if (is.na(x$y_stats$mean)) {
cat("final training set loss after", length(x$loss), "epochs:",
signif(x$loss[length(x$loss)]), "\n")
} else {
cat("final scaled training set loss after", length(x$loss), "epochs:",
signif(x$loss[length(x$loss)]), "\n")
}
}
}
invisible(x)
}
coef.lantern_mlp <- function(object, ...) {
module <- revive_model(object, epoch = length(object$models))
parameters <- module$parameters
lapply(parameters, as.array)
}
## -----------------------------------------------------------------------------
get_activation_fn <- function(arg, ...) {
if (arg == "relu") {
res <- torch::nn_relu(...)
} else if (arg == "elu") {
res <- torch::nn_elu(...)
} else if (arg == "tanh") {
res <- torch::nn_tanh(...)
} else {
res <- identity
}
res
}
## -----------------------------------------------------------------------------
#' Plot model loss over epochs
#'
#' @param object A `lantern_mlp` object.
#' @param ... Not currently used
#' @return A `ggplot` object.
#' @details This function plots the loss function across the available epochs.
#' @export
autoplot.lantern_mlp <- function(object, ...) {
x <- tibble::tibble(iteration = seq(along = object$loss), loss = object$loss)
if(object$parameters$validation > 0) {
if (is.na(object$y_stats$mean)) {
lab <- "loss (validation set)"
} else {
lab <- "loss (validation set, scaled)"
}
} else {
if (is.na(object$y_stats$mean)) {
lab <- "loss (training set)"
} else {
lab <- "loss (training set, scaled)"
}
}
ggplot2::ggplot(x, ggplot2::aes(x = iteration, y = loss)) +
ggplot2::geom_line() +
ggplot2::labs(y = lab)
}
model_to_raw <- function(model) {
con <- rawConnection(raw(), open = "wr")
torch::torch_save(model, con)
on.exit({close(con)}, add = TRUE)
r <- rawConnectionValue(con)
r
}
| /R/lantern_mlp-fit.R | permissive | EmilHvitfeldt/lantern | R | false | false | 21,551 | r | #' Fit a single layer neural network
#'
#' `lantern_mlp()` fits a model.
#'
#' @param x Depending on the context:
#'
#' * A __data frame__ of predictors.
#' * A __matrix__ of predictors.
#' * A __recipe__ specifying a set of preprocessing steps
#' created from [recipes::recipe()].
#'
#' The predictor data should be standardized (e.g. centered or scaled).
#'
#' @param y When `x` is a __data frame__ or __matrix__, `y` is the outcome
#' specified as:
#'
#' * A __data frame__ with 1 numeric column.
#' * A __matrix__ with 1 numeric column.
#' * A numeric __vector__.
#'
#' @param data When a __recipe__ or __formula__ is used, `data` is specified as:
#'
#' * A __data frame__ containing both the predictors and the outcome.
#'
#' @param formula A formula specifying the outcome terms on the left-hand side,
#' and the predictor terms on the right-hand side.
#'
#' @param epochs An integer for the number of epochs of training.
#' @param hidden_units An integer for the number of hidden units.
#' @param activation A string for the activation function. Possible values are
#' "relu", "elu", "tanh", and "linear".
#' @param penalty The amount of weight decay (i.e., L2 regularization).
#' @param dropout The proportion of parameters set to zero.
#' @param learn_rate A positive number (usually less than 0.1).
#' @param momentum A positive number on `[0, 1]` for the momentum parameter in
#' gradient decent.
#' @param validation The proportion of the data randomly assigned to a
#' validation set.
#' @param batch_size An integer for the number of training set points in each
#' batch.
#' @param conv_crit A non-negative number for convergence.
#' @param verbose A logical that prints out the iteration history.
#'
#' @param ... Not currently used, but required for extensibility.
#'
#' @details
#'
#' This function fits single layer, feed-forward neural network models for
#' regression (when the outcome is a number) or classification (a factor). For
#' regression, the mean squared error is optimized and cross-entropy is the loss
#' function for classification.
#'
#' The _predictors_ data should all be numeric and encoded in the same units (e.g.
#' standardized to the same range or distribution). If there are factor
#' predictors, use a recipe or formula to create indicator variables (or some
#' other method) to make them numeric.
#'
#' When the outcome is a number, the function internally standardizes the
#' outcome data to have mean zero and a standard deviation of one. The prediction
#' function creates predictions on the original scale.
#'
#' If `conv_crit` is used, it stops training when the difference in the loss
#' function is below `conv_crit` or if it gets worse. The default trains the
#' model over the specified number of epochs.
#'
#' @return
#'
#' A `lantern_mlp` object with elements:
#' * `models`: a list object of serialized models for each epoch.
#' * `loss`: A vector of loss values (MSE for regression, negative log-
#' likelihood for classification) at each epoch.
#' * `dim`: A list of data dimensions.
#' * `y_stats`: A list of summary statistics for numeric outcomes.
#' * `parameters`: A list of some tuning parameter values.
#' * `blueprint`: The `hardhat` blueprint data.
#'
#' @examples
#' \donttest{
#' if (torch::torch_is_installed()) {
#'
#' ## -----------------------------------------------------------------------------
#' # regression examples (increase # epochs to get better results)
#'
#' data(ames, package = "modeldata")
#'
#' ames$Sale_Price <- log10(ames$Sale_Price)
#'
#' set.seed(122)
#' in_train <- sample(1:nrow(ames), 2000)
#' ames_train <- ames[ in_train,]
#' ames_test <- ames[-in_train,]
#'
#'
#' # Using matrices
#' set.seed(1)
#' lantern_mlp(x = as.matrix(ames_train[, c("Longitude", "Latitude")]),
#' y = ames_train$Sale_Price,
#' penalty = 0.10, epochs = 20, batch_size = 32)
#'
#' # Using recipe
#' library(recipes)
#'
#' ames_rec <-
#' recipe(Sale_Price ~ Bldg_Type + Neighborhood + Year_Built + Gr_Liv_Area +
#' Full_Bath + Year_Sold + Lot_Area + Central_Air + Longitude + Latitude,
#' data = ames_train) %>%
#' # Transform some highly skewed predictors
#' step_BoxCox(Lot_Area, Gr_Liv_Area) %>%
#' # Lump some rarely occuring categories into "other"
#' step_other(Neighborhood, threshold = 0.05) %>%
#' # Encode categorical predictors as binary.
#' step_dummy(all_nominal(), one_hot = TRUE) %>%
#' # Add an interaction effect:
#' step_interact(~ starts_with("Central_Air"):Year_Built) %>%
#' step_zv(all_predictors()) %>%
#' step_normalize(all_predictors())
#'
#' set.seed(2)
#' fit <- lantern_mlp(ames_rec, data = ames_train, hidden_units = 20,
#' dropout = 0.05, epochs = 20, batch_size = 32)
#' fit
#'
#' autoplot(fit)
#'
#' library(ggplot2)
#'
#' predict(fit, ames_test) %>%
#' bind_cols(ames_test) %>%
#' ggplot(aes(x = .pred, y = Sale_Price)) +
#' geom_abline(col = "green") +
#' geom_point(alpha = .3) +
#' lims(x = c(4, 6), y = c(4, 6)) +
#' coord_fixed(ratio = 1)
#'
#' library(yardstick)
#' predict(fit, ames_test) %>%
#' bind_cols(ames_test) %>%
#' rmse(Sale_Price, .pred)
#' }
#'
#' }
#' @export
lantern_mlp <- function(x, ...) {
UseMethod("lantern_mlp")
}
#' @export
#' @rdname lantern_mlp
lantern_mlp.default <- function(x, ...) {
stop("`lantern_mlp()` is not defined for a '", class(x)[1], "'.", call. = FALSE)
}
# XY method - data frame
#' @export
#' @rdname lantern_mlp
lantern_mlp.data.frame <-
function(x,
y,
epochs = 100L,
hidden_units = 3L,
activation = "relu",
penalty = 0,
dropout = 0,
validation = 0.1,
learn_rate = 0.01,
momentum = 0.0,
batch_size = NULL,
conv_crit = -Inf,
verbose = FALSE,
...) {
processed <- hardhat::mold(x, y)
lantern_mlp_bridge(
processed,
epochs = epochs,
hidden_units = hidden_units,
activation = activation,
learn_rate = learn_rate,
penalty = penalty,
dropout = dropout,
validation = validation,
momentum = momentum,
batch_size = batch_size,
conv_crit = conv_crit,
verbose = verbose,
...
)
}
# XY method - matrix
#' @export
#' @rdname lantern_mlp
lantern_mlp.matrix <- function(x,
y,
epochs = 100L,
hidden_units = 3L,
activation = "relu",
penalty = 0,
dropout = 0,
validation = 0.1,
learn_rate = 0.01,
momentum = 0.0,
batch_size = NULL,
conv_crit = -Inf,
verbose = FALSE,
...) {
processed <- hardhat::mold(x, y)
lantern_mlp_bridge(
processed,
epochs = epochs,
hidden_units = hidden_units,
activation = activation,
learn_rate = learn_rate,
momentum = momentum,
penalty = penalty,
dropout = dropout,
validation = validation,
batch_size = batch_size,
conv_crit = conv_crit,
verbose = verbose,
...
)
}
# Formula method
#' @export
#' @rdname lantern_mlp
lantern_mlp.formula <-
function(formula,
data,
epochs = 100L,
hidden_units = 3L,
activation = "relu",
penalty = 0,
dropout = 0,
validation = 0.1,
learn_rate = 0.01,
momentum = 0.0,
batch_size = NULL,
conv_crit = -Inf,
verbose = FALSE,
...) {
processed <- hardhat::mold(formula, data)
lantern_mlp_bridge(
processed,
epochs = epochs,
hidden_units = hidden_units,
activation = activation,
learn_rate = learn_rate,
momentum = momentum,
penalty = penalty,
dropout = dropout,
validation = validation,
batch_size = batch_size,
conv_crit = conv_crit,
verbose = verbose,
...
)
}
# Recipe method
#' @export
#' @rdname lantern_mlp
lantern_mlp.recipe <-
function(x,
data,
epochs = 100L,
hidden_units = 3L,
activation = "relu",
penalty = 0,
dropout = 0,
validation = 0.1,
learn_rate = 0.01,
momentum = 0.0,
batch_size = NULL,
conv_crit = -Inf,
verbose = FALSE,
...) {
processed <- hardhat::mold(x, data)
lantern_mlp_bridge(
processed,
epochs = epochs,
hidden_units = hidden_units,
activation = activation,
learn_rate = learn_rate,
momentum = momentum,
penalty = penalty,
dropout = dropout,
validation = validation,
batch_size = batch_size,
conv_crit = conv_crit,
verbose = verbose,
...
)
}
# ------------------------------------------------------------------------------
# Bridge
lantern_mlp_bridge <- function(processed, epochs, hidden_units, activation,
learn_rate, momentum, penalty, dropout,
validation, batch_size, conv_crit, verbose, ...) {
if(!torch::torch_is_installed()) {
rlang::abort("The torch backend has not been installed; use `torch::install_torch()`.")
}
f_nm <- "lantern_mlp"
# check values of various argument values
if (is.numeric(epochs) & !is.integer(epochs)) {
epochs <- as.integer(epochs)
}
if (is.numeric(hidden_units) & !is.integer(hidden_units)) {
hidden_units <- as.integer(hidden_units)
}
check_integer(epochs, single = TRUE, 1, fn = f_nm)
if (!is.null(batch_size)) {
if (is.numeric(batch_size) & !is.integer(batch_size)) {
batch_size <- as.integer(batch_size)
}
check_integer(batch_size, single = TRUE, 1, fn = f_nm)
}
check_integer(hidden_units, single = TRUE, 1, fn = f_nm)
check_double(penalty, single = TRUE, 0, incl = c(TRUE, TRUE), fn = f_nm)
check_double(dropout, single = TRUE, 0, 1, incl = c(TRUE, FALSE), fn = f_nm)
check_double(validation, single = TRUE, 0, 1, incl = c(TRUE, FALSE), fn = f_nm)
check_double(momentum, single = TRUE, 0, 1, incl = c(TRUE, TRUE), fn = f_nm)
check_double(learn_rate, single = TRUE, 0, incl = c(FALSE, TRUE), fn = f_nm)
check_logical(verbose, single = TRUE, fn = f_nm)
check_character(activation, single = TRUE, fn = f_nm)
## -----------------------------------------------------------------------------
predictors <- processed$predictors
if (!is.matrix(predictors)) {
predictors <- as.matrix(predictors)
if (is.character(predictors)) {
rlang::abort(
paste(
"There were some non-numeric columns in the predictors.",
"Please use a formula or recipe to encode all of the predictors as numeric."
)
)
}
}
## -----------------------------------------------------------------------------
outcome <- processed$outcomes[[1]]
## -----------------------------------------------------------------------------
fit <-
lantern_mlp_reg_fit_imp(
x = predictors,
y = outcome,
epochs = epochs,
hidden_units = hidden_units,
activation = activation,
learn_rate = learn_rate,
momentum = momentum,
penalty = penalty,
dropout = dropout,
validation = validation,
batch_size = batch_size,
conv_crit = conv_crit,
verbose = verbose
)
new_lantern_mlp(
models = fit$models,
loss = fit$loss,
dims = fit$dims,
y_stats = fit$y_stats,
parameters = fit$parameters,
blueprint = processed$blueprint
)
}
new_lantern_mlp <- function( models, loss, dims, y_stats, parameters, blueprint) {
if (!is.list(models)) {
rlang::abort("'models' should be a list.")
}
if (!is.vector(loss) || !is.numeric(loss)) {
rlang::abort("'loss' should be a numeric vector")
}
if (!is.list(dims)) {
rlang::abort("'dims' should be a list")
}
if (!is.list(parameters)) {
rlang::abort("'parameters' should be a list")
}
if (!inherits(blueprint, "hardhat_blueprint")) {
rlang::abort("'blueprint' should be a hardhat blueprint")
}
hardhat::new_model(models = models,
loss = loss,
dims = dims,
y_stats = y_stats,
parameters = parameters,
blueprint = blueprint,
class = "lantern_mlp")
}
## -----------------------------------------------------------------------------
# Fit code
lantern_mlp_reg_fit_imp <-
function(x, y,
epochs = 100L,
batch_size = 32,
hidden_units = 3L,
penalty = 0,
dropout = 0,
validation = 0.1,
learn_rate = 0.01,
momentum = 0.0,
activation = "relu",
conv_crit = -Inf,
verbose = FALSE,
...) {
torch::torch_manual_seed(sample.int(10^5, 1))
## ---------------------------------------------------------------------------
# General data checks:
check_data_att(x, y)
# Check missing values
compl_data <- check_missing_data(x, y, "lantern_mlp", verbose)
x <- compl_data$x
y <- compl_data$y
n <- length(y)
p <- ncol(x)
if (is.factor(y)) {
y_dim <- length(levels(y))
# the model will output softmax values.
# so we need to use negative likelihood loss and
# pass the log of softmax.
loss_fn <- function(input, target) {
nnf_nll_loss(
input = torch::torch_log(input),
target = target
)
}
} else {
y_dim <- 1
loss_fn <- function(input, target) {
nnf_mse_loss(input, target$view(c(-1,1)))
}
}
if (validation > 0) {
in_val <- sample(seq_along(y), floor(n * validation))
x_val <- x[in_val,, drop = FALSE]
y_val <- y[in_val]
x <- x[-in_val,, drop = FALSE]
y <- y[-in_val]
}
if (!is.factor(y)) {
y_stats <- scale_stats(y)
y <- scale_y(y, y_stats)
if (validation > 0) {
y_val <- scale_y(y_val, y_stats)
}
loss_label <- "\tLoss (scaled):"
} else {
y_stats <- list(mean = NA_real_, sd = NA_real_)
loss_label <- "\tLoss:"
}
if (is.null(batch_size)) {
batch_size <- nrow(x)
} else {
batch_size <- min(batch_size, nrow(x))
}
## ---------------------------------------------------------------------------
# Convert to index sampler and data loader
ds <- lantern::matrix_to_dataset(x, y)
dl <- torch::dataloader(ds, batch_size = batch_size)
if (validation > 0) {
ds_val <- lantern::matrix_to_dataset(x_val, y_val)
dl_val <- torch::dataloader(ds_val)
}
## ---------------------------------------------------------------------------
# Initialize model and optimizer
model <- mlp_module(ncol(x), hidden_units, activation, dropout, y_dim)
# Write a optim wrapper
optimizer <-
torch::optim_sgd(model$parameters, lr = learn_rate,
weight_decay = penalty, momentum = momentum)
## ---------------------------------------------------------------------------
loss_prev <- 10^38
loss_vec <- rep(NA_real_, epochs)
if (verbose) {
epoch_chr <- format(1:epochs)
}
## -----------------------------------------------------------------------------
model_per_epoch <- list()
# Optimize parameters
for (epoch in 1:epochs) {
# training loop
for (batch in torch::enumerate(dl)) {
pred <- model(batch$x)
loss <- loss_fn(pred, batch$y)
optimizer$zero_grad()
loss$backward()
optimizer$step()
}
# calculate loss on the full datasets
if (validation > 0) {
pred <- model(dl_val$dataset$data$x)
loss <- loss_fn(pred, dl_val$dataset$data$y)
} else {
pred <- model(dl$dataset$data$x)
loss <- loss_fn(pred, dl$dataset$data$y)
}
# calculate losses
loss_curr <- loss$item()
loss_vec[epoch] <- loss_curr
if (is.nan(loss_curr)) {
rlang::warn("Current loss in NaN. Training wil be stopped.")
break()
}
loss_diff <- (loss_prev - loss_curr)/loss_prev
loss_prev <- loss_curr
# persists models and coefficients
model_per_epoch[[epoch]] <- model_to_raw(model)
if (verbose) {
rlang::inform(
paste("epoch:", epoch_chr[epoch], loss_label, signif(loss_curr, 5))
)
}
if (loss_diff <= conv_crit) {
break()
}
model_per_epoch[[epoch]] <- model_to_raw(model)
}
## ---------------------------------------------------------------------------
list(
models = model_per_epoch,
loss = loss_vec[!is.na(loss_vec)],
dims = list(p = p, n = n, h = hidden_units, y = y_dim),
y_stats = y_stats,
stats = y_stats,
parameters = list(activation = activation, learn_rate = learn_rate,
penalty = penalty, dropout = dropout, validation = validation,
batch_size = batch_size, momentum = momentum)
)
}
mlp_module <-
torch::nn_module(
"mlp_module",
initialize = function(num_pred, hidden_units, act_type, dropout, y_dim) {
self$x_to_h <- torch::nn_linear(num_pred, hidden_units)
self$h_to_y <- torch::nn_linear(hidden_units, y_dim)
if (dropout > 0) {
self$dropout <- torch::nn_dropout(p = dropout)
} else {
self$dropout <- identity
}
self$activation <- get_activation_fn(act_type)
if (y_dim > 1) {
self$transform <- torch::nn_softmax(dim = 2)
} else {
self$transform <- identity
}
},
forward = function(x) {
x %>%
self$x_to_h() %>%
self$activation() %>%
self$dropout() %>%
self$h_to_y() %>%
self$transform()
}
)
## -----------------------------------------------------------------------------
get_num_mlp_coef <- function(x) {
model <- revive_model(x, 1)$parameters
param <- vapply(model, function(.x) prod(dim(.x)), double(1))
sum(unlist(param))
}
#' @export
print.lantern_mlp <- function(x, ...) {
cat("Multilayer perceptron\n\n")
cat(x$param$activation, "activation\n")
lvl <- get_levels(x)
if (is.null(lvl)) {
chr_y <- "numeric outcome"
} else {
chr_y <- paste(length(lvl), "classes")
}
cat(
format(x$dims$n, big.mark = ","), "samples,",
format(x$dims$p, big.mark = ","), "features,",
chr_y, "\n"
)
cat(
x$dims$h, "hidden units,",
format(get_num_mlp_coef(x), big.mark = ","), "model parameters\n"
)
if (x$parameters$penalty > 0) {
cat("weight decay:", x$parameters$penalty, "\n")
}
if (x$parameters$dropout > 0) {
cat("dropout proportion:", x$parameters$dropout, "\n")
}
cat("batch size:", x$parameters$batch_size, "\n")
if (!is.null(x$loss)) {
if(x$parameters$validation > 0) {
if (is.na(x$y_stats$mean)) {
cat("final validation loss after", length(x$loss), "epochs:",
signif(x$loss[length(x$loss)]), "\n")
} else {
cat("final scaled validation loss after", length(x$loss), "epochs:",
signif(x$loss[length(x$loss)]), "\n")
}
} else {
if (is.na(x$y_stats$mean)) {
cat("final training set loss after", length(x$loss), "epochs:",
signif(x$loss[length(x$loss)]), "\n")
} else {
cat("final scaled training set loss after", length(x$loss), "epochs:",
signif(x$loss[length(x$loss)]), "\n")
}
}
}
invisible(x)
}
coef.lantern_mlp <- function(object, ...) {
module <- revive_model(object, epoch = length(object$models))
parameters <- module$parameters
lapply(parameters, as.array)
}
## -----------------------------------------------------------------------------
get_activation_fn <- function(arg, ...) {
if (arg == "relu") {
res <- torch::nn_relu(...)
} else if (arg == "elu") {
res <- torch::nn_elu(...)
} else if (arg == "tanh") {
res <- torch::nn_tanh(...)
} else {
res <- identity
}
res
}
## -----------------------------------------------------------------------------
#' Plot model loss over epochs
#'
#' @param object A `lantern_mlp` object.
#' @param ... Not currently used
#' @return A `ggplot` object.
#' @details This function plots the loss function across the available epochs.
#' @export
autoplot.lantern_mlp <- function(object, ...) {
x <- tibble::tibble(iteration = seq(along = object$loss), loss = object$loss)
if(object$parameters$validation > 0) {
if (is.na(object$y_stats$mean)) {
lab <- "loss (validation set)"
} else {
lab <- "loss (validation set, scaled)"
}
} else {
if (is.na(object$y_stats$mean)) {
lab <- "loss (training set)"
} else {
lab <- "loss (training set, scaled)"
}
}
ggplot2::ggplot(x, ggplot2::aes(x = iteration, y = loss)) +
ggplot2::geom_line() +
ggplot2::labs(y = lab)
}
model_to_raw <- function(model) {
con <- rawConnection(raw(), open = "wr")
torch::torch_save(model, con)
on.exit({close(con)}, add = TRUE)
r <- rawConnectionValue(con)
r
}
|
#' Distance Over Time
#'
#' @param player_profile player profile from player_profile function
#' @param split_time Time
#'
#' @return The distance traveled over a given amount of time from zero velocity
#'
distance_time <- function(player_profile, split_time) {
max_speed <- player_profile[[1]]
player_tau <- player_profile[[3]]
pos_time <- max_speed * (split_time + player_tau * exp(-split_time/player_tau)) - max_speed * player_tau
return(pos_time)
}
| /R/distance_time.R | no_license | aadler/midsprint | R | false | false | 480 | r | #' Distance Over Time
#'
#' @param player_profile player profile from player_profile function
#' @param split_time Time
#'
#' @return The distance traveled over a given amount of time from zero velocity
#'
distance_time <- function(player_profile, split_time) {
max_speed <- player_profile[[1]]
player_tau <- player_profile[[3]]
pos_time <- max_speed * (split_time + player_tau * exp(-split_time/player_tau)) - max_speed * player_tau
return(pos_time)
}
|
subgraph_rank <- grViz("
digraph subgraph_rank {
node [shape = circle,
style = filled,
color = grey]
node [fillcolor = red]
a
node [fillcolor = green]
b c
node [fillcolor = orange]
d e f g
subgraph abc {a; b; c}
edge [color = grey]
a -> b a -> c
b -> d b -> e
c -> f c -> g
}
")
print(subgraph_rank) | /Graphviz/scripts/subgraph_rank.R | permissive | jonmcalder/DiagrammeR | R | false | false | 412 | r | subgraph_rank <- grViz("
digraph subgraph_rank {
node [shape = circle,
style = filled,
color = grey]
node [fillcolor = red]
a
node [fillcolor = green]
b c
node [fillcolor = orange]
d e f g
subgraph abc {a; b; c}
edge [color = grey]
a -> b a -> c
b -> d b -> e
c -> f c -> g
}
")
print(subgraph_rank) |
##-----------------------------------------------------------##
## ##
## WBP Demographic Model ##
## Libby Pansing ##
##-----------------------------------------------------------##
# Note... having issues with masking of dplyr functions when loading
# plyr later in the script. IF you want to re-source the other
# scripts, you'll need to unload dplyr and plyr and reload plyr.
# Or restart R.
library(dplyr)
library(popbio)
library(tidyr)
## Import relevant parameter estimates calculated in other scripts.
## Required files:
## 1) GRIN parameter estimates.R
## 2) GRIN WBP Survival Estimates RMark Nest.R
## 3) 2017 YNP Data.Rda
## 4) survival.Rda
source("/Users/elizabethpansing/Box Sync/PhD/Code/WBP Demographic Model Master/WBP Model Active/GRIN parameter estimates.R")
## Write function for determining leaf area index (LAI) as a function
## of stage specific dbh and the number of trees on the landscape
##***********************************************************##
## Define LAI ##
##***********************************************************##
#mean dbh for each stage
d1 <- function() { # dbh SEED1
return(0)
}
d2 <- function() { #dbh SEED2
return(0)
}
d3 <- function(){ #dbh CS
return(0)
}
d4 <- function(){ #dbh SD1
return(0)
}
d5 <- function(){ # dbh SAP
return(6.69)
}
d6 <-function() { #dbh MA
return(30.2) # mean dbh taken from trees cored on HM in 2015. The average dbh of trees >12.5cm dbh (the sapling)
}
## Leaf area coefficients. Define the relationship between leaf area and diameter.
## Estimated via MLE assuming the general form y = ax^b
alpha1 <- function(){
return(0.456)
}
alpha2 <- function(){
return(0.0736)
}
alpha3 <- function(){
return(2.070)
}
l <- function(){
c(d1(), # SEED1 do not contribute to LAI
d2(), # SEED2 do not contribute to LAI
d3(), # CS do not contribute to LAI
alpha1(), # SD1 don't have DBH but do contribute to LAI
alpha2() * d5() ^ alpha3(),
alpha2() * d6() ^ alpha3())
}
LAIb <- function(){ # Background leaf area index. This is where competition can be incorporated...
return(0)
}
LAI <- function(x) { # LAI of the study area
l <- l()
return((t(l) %*% x)/10000 + LAIb())
}
LAI(n)
## Now define the distributions from which survival and
## transition rates will be drawn to create stochastic demographic rates
##-----------------------------------------------------------##
## Empirically derived survival ##
## & transition distributions ##
##-----------------------------------------------------------##
##-----------------------------------------------------------##
## SEED1 ##
##-----------------------------------------------------------##
s_SEED1 <- 0 # Assume seeds either transition to SEED2,
# transition to CS (first year seedling)
# or die
t1_SEED1 <- function(size = 1){ # survival probability of seeds
rbeta(n = size, # Drawn from a beta to give a
shape1 = SEED1_survive_alpha, # probability of seeds transitioning
shape2 = SEED1_survive_beta) # to SEED2 stage
}
t2_SEED1 <- function(size = 1){ # Germination probability of seeds
rbeta(n = size, # Drawn from a beta to give a
shape1 = SEED1_germ_alpha, # probability of seeds transitioning
shape2 = SEED1_germ_beta) # to SEED2 stage
}
##-----------------------------------------------------------##
## SEED2 ##
##-----------------------------------------------------------##
s_SEED2 <- 0 # Assume seeds either transition to
# CS (first year seedling) or die
t3_SEED2 <- function(size = 1){
# Germination probability of seeds
rbeta(n = size, # Drawn from a beta to give a
shape1 = SEED2_germ_alpha, # probability of seeds transitioning
shape2 = SEED2_germ_beta) # to from SEED2 to CS stage (i.e., germination)
}
##-----------------------------------------------------------##
## CS ##
## First Year Seedling ##
##-----------------------------------------------------------##
s_CS <- 0 # Assume seeds transition to
# SD or die
t_CS <- function(size = 1){ # Survival probability of first
rbeta(n = size, # year seedlings (cotyledon seedlings)
shape1 = CS_survive_alpha, # Drawn from a beta to give prob
shape2 = CS_survive_beta) # of transitioning to SD stage
}
##-----------------------------------------------------------##
## SD ##
##-----------------------------------------------------------##
s_SD <- function(size = 1){ # survival probability of seedlings
rbeta(n = size, # Drawn from a beta to give prob
shape1 = SD_survive_alpha, # surviving any given year
shape2 = SD_survive_beta)
}
##-----------------------------------------------------------##
## SAP ##
##-----------------------------------------------------------##
s_SAP <- function(){ # Survival probability of saplings
return(0.8) # Still looking for a distribution to
} # use, so for now assuming constant
##-----------------------------------------------------------##
## MA ##
##-----------------------------------------------------------##
s_MA <- function(){ # Survival rate of reproductively mature adults
0.99 # Assume limited death from senescence because
} # of long lived nature of wbp (lifespan up to 1200 yrs)
# For now, assuming constant.
##-----------------------------------------------------------##
## DEFINE ##
## SURVIVAL ##
## VECTOR ##
##-----------------------------------------------------------##
survival_vector <- function(size = 1){ #survival vector
c(
s_SD(size = size),
s_SAP(),
s_MA())
}
survival_vector()
##-----------------------------------------------------------##
## DEFINE ##
## RESIDENCE TIME ##
## VECTOR ##
##-----------------------------------------------------------##
residence_SEED1 <- 1 # # years as seedling (SEED1)
residence_SEED2 <- 1 # # years as seedling (SEED2)
residence_CS <- 1 # # years as seedling (CS)
residence_SD <- 28 # # years as seedling (SD)
residence_SAP <- 20 # # years as sapling (SAP)
residence_MA <- Inf # # years as reproductively mature
##-----------------------------------------------------------##
## DEFINE ##
## RESIDENCE TIME ##
## VECTOR ##
##-----------------------------------------------------------##
residence_vector <-
c(residence_SD,
residence_SAP,
residence_MA)
residence_vector
##-----------------------------------------------------------##
## GET MATRIX ELEMENTS ##
##-----------------------------------------------------------##
si <- function(size = 1){ # Gives probability of surviving and staying in the
(1 - (1/residence_vector)) * # same life stage for those life stages
survival_vector(size = size) # that have residence time > 1 (i.e., persist in the
} # same life stage for > 1 year)
ti <- function(size = 1) { # Gives probability of surviving and transitioning
(1/residence_vector) * # to the next life stage for those life stages
survival_vector(size = size) # that have residence time > 1 (i.e., persist in the
} # same life stage for > 1 year)
S <- function(t){ # Germination rates ([3,1] & [3,2]) and fecundity ([1,6]) are included
# later as non-linear functions and added to the population vectors later
matrix(c( 0, 0, 0, 0, 0, 0,
t1_SEED1(1), 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, t_CS(1), si(1)[1], 0, 0,
0, 0, 0, ti(1)[1], si(1)[2], 0,
0, 0, 0, 0, ti(1)[2], si(1)[3]),
byrow = T, nrow = 6,
dimnames = list(c("SEED1", "SEED2", "CS", "SD", "SAP", "MA"),
c("SEED1", "SEED2", "CS", "SD", "SAP", "MA")))
}
t <- 1
S(t)
##-----------------------------------------------------------##
## FERTILITY ##
##-----------------------------------------------------------##
No_seeds_per_cone <- 45
No_cones <- function(t, size = 1){ #Seed production in wbp is periodic
result <- NULL # with masting years every ~ 4 years
# so define cone production as a function
for(i in 1:size){ # of time described by cos with normally distributed error
value <- (12.5*cos(1.5 * t) + 14 + rnorm(1, sd = 3.5))
if( value >= 0){ # Max values and expected values from
result[i] <- value # IGBST cone monitoring since 1980
} else if(value < 0){ #
result[i] <- value - value # # caches assumes 45 seeds/cone
} # and 3 seeds/cache. All available seeds cachek
} # Assumes 45% of caches created are left for regeneration.
return(result)
}
e1 <- matrix(c(1,0,0,0,0,0))
No_seeds <- function(t, size = 1, x){
No_cones(t,1) * No_seeds_per_cone * x[6] * e1
}
##-----------------------------------------------------------##
## Germination ##
##-----------------------------------------------------------##
##-----------------------------------------------------------##
## Define variables assumed fixed & known ##
##-----------------------------------------------------------##
Pfind <- 0.55 # Proportion of seeds found by nutcrackers
Pcons <- 0.3 # Proportion of seeds consumed by nutcracers (prior to caching?)
nBirds <- 3 # No. Clark's nutcrackers in the theoretical population
SpC <- 3.7 # No. seeds per cache
##-----------------------------------------------------------##
## Define variables dependent on time vars ##
##-----------------------------------------------------------##
SpB <- function(x){ # Number of seeds available to each bird
x[1]/nBirds
}
## Define reduction factors. These variables reduce
## 1) rALS decreases germination as light availability decreases
## 2) rCache increases caching propensity as seed availability increases
rALS <- function(x){
1/(1 + exp(2*(LAI(x)-3)))
}
# rCache <- function(x){
# 0.73/(1+ exp((31000-SpB(x))/3000))
# }
e3 <- matrix(c(0,0,1,0,0,0))
## Define germination probability
## First try, do not include the reduction factor that reduces caching as seed availability decreases.
## So we assume the proportion of seeds cached is constant and does not vary as a function of cone production
germ1st <- function(t, size = 1, x){
as.vector(No_seeds(t = t, size = 1, x = n)[1])*((1-Pcons)/3.7) * (1-Pfind) * as.vector(rALS(x)) * rbeta(n = 1, shape1 = SEED1_germ_alpha, shape2= SEED1_germ_beta) * e3
}
germ1st_rcache <- function(t, size = 1, x){
as.vector(No_seeds(t = t, size = 1, x = n)[1])*((1-Pcons)*rCache()/3.7) * (1-Pfind) * as.vector(rALS(x)) * rbeta(n = 1, shape1 = SEED1_germ_alpha, shape2= SEED1_germ_beta) * e3
}
germ2nd <- function(t, size = 1, x){
as.vector(x[2]/3.7 * rALS(x)) * rbeta(n = 1, shape1 = SEED2_germ_alpha, shape2 = SEED2_germ_beta) * e3
}
##-----------------------------------------------------------##
## Function that projects pop ##
## sizes and incorporates fire ##
##-----------------------------------------------------------##
library(plyr)
project <- function(projection_time, n0, reps = 100, fire = T){ # stochastic projection function
# that tracks stage based pop sizes
# over time for reps number of iterations
results <- stochastic_matrixes <- #Create null matrix that will hold stage
array(0, dim = c(projection_time, length(n0) + 1, reps)) # based population sizes
for(j in 1:reps){ # Iterate through i years (projection_time) of population growth j times (iterations)
# Incorporate FIRE
if(j == 1){
intervals <- NULL # Create null vectors that will hold fire intervals for each iteration
iteration <- NULL # Create null vectors that will show the iteration during which each fire occured
LAI_tracker <- matrix(c(rep(1:reps, each = projection_time),rep(0, projection_time*reps*2)),
nrow = projection_time * reps, ncol = 3, byrow = F)
} else if(j != 1){
intervals <- intervals
iteration <- iteration
LAI_tracker <- LAI_tracker
}
if(fire == TRUE){
interval <- rnbinom(n = 4, size = 1, mu = 230) %>% # Select fire years from a negative binomial distribution (assumes mean fire return interval = 230 yrs)
cumsum(.) # representing the waiting times btwn fires
# cumsum gives the time = t years during which fires should occur in projection
interval <- interval[-which(interval > projection_time)] #trim to only include those within the projection time
## Creates dataframe result that tracks the fire years for each iteration.
intervals <- append(intervals, interval, after = length(intervals))
} else if(fire == FALSE){
intervals <- NULL
interval <- NULL
}
iteration <- append(iteration, rep(j, length(interval)),
after = length(iteration))
pops <- matrix(0, nrow = projection_time, ncol = length(n)) # Creates empty matrix to hold population sizes and LAI
for(i in 1:projection_time){ # get population
if (i == 1){
n <- n0
}else if(i != 1){
n <- n
}
t <- i # time counter
tSinceFire <- ifelse(i == 1, 1, tSinceFire)
fire_current <- ifelse(t %in% interval, TRUE, FALSE) # Determine whether this iteration experiences a stand replacing fire
# LAI_tracker_each_iteraton <- matrix(0, nrow = projection_time, ncol = 2)
LAI_tracker[j*projection_time - (projection_time)+i,2:3] <- c(i, LAI(n))
# Now, multiple possibilities
# 1) There's a fire. This kills the population. Assumes stand replacing burn that impacts entire population
# And that no regeneration occurs the year of the fire
# 2) It's a year after a fire, in which case we assume input from an outside population (i.e., system
# isn't entirely closed) and that the outside population is on a similar masting schedule as our
# population
# 3) There's no fire and it's >1 year after fire. In this case, there are no modifications and the
# system proceeds as normal.
if(fire_current == T){
tSinceFire <- 0
pops[i,] <- c(0, 0, 0, 0, 0, 0) # Assuming stand replacing burn with no survival and no regeneration.
n <- pops[i,] # Most fires go out with first snow. e.g., Romme 1982
} else if(fire_current == F & tSinceFire == 1 & t != 1){
tSinceFire <- tSinceFire + 1
pops[i,] <- t(No_seeds(size = 1, t = t, x = n))
n <- pops[i,]
} else if((fire_current == F & tSinceFire != 1) |
(fire_current == F & tSinceFire == 1 & t == 1)){
tSinceFire <- tSinceFire + 1
mat <- S(t = t)
pops[i,] <- t(mat%*%n + germ1st(t = t, size = 1, x = n) + germ2nd(t = t, size = 1, x = n) + No_seeds(t = t, size = 1, x = n)) # Defines the intermediate population size
n <- as.matrix(pops[i,], nrow = length(pops[i,]), ncol = 1)
# } else if(fire == F & tSinceFire == 1 & t == 2){
# tSinceFire <- tSinceFire + 1
#
# pops[i,] <- c(500000, 0, 0, 0, 0, 0)
# n <- as.matrix(pops[i,], nrow = length(pops[i,]), ncol = 1)
}
}
pops <- cbind(pops, rep(1:projection_time)) # Appends matrix to keep track of time during iteration
results[, ,j] <- pops # Combines iterations into a j dimensional array
}
pop_sizes <- plyr::adply(results, 3) # Changes array to dataframe so easier to manipulate later
colnames(pop_sizes) <- c("Iteration", "SEED1", "SEED2", "CS", "SD", "SAP", "MA", "t")
fire_intervals <- cbind(iteration, intervals) # Dataframe keeping track of fire return intervals
results <- list(pop_sizes = pop_sizes, fire_intervals = fire_intervals, LAI_track = LAI_tracker)
return(results)
}
##-----------------------------------------------------------##
## Population projection ##
##-----------------------------------------------------------##
n <- c(62, 580 + 38, 79, 65, 91, 353) # Arbitrary starting pop size vectors
projection <- project(projection_time = 500, n0 = n, reps = 100, fire = FALSE)
pops <- gather(projection$pop_sizes, Stage, Count, - Iteration, - t)
pop_sizes <- gather(projection$pop_sizes, Stage, Count, -Iteration, -t) %>%
filter(., !Stage == "SEED1") %>% # Pop sizes in dataframe format
filter(., !Stage == "SEED2") %>% # and excluding seed numbers (most don't think of seeds)
group_by(., Iteration, t) %>% # as a part of the population, so presenting numbers as
summarise_at(., vars(Count), funs(sum)) %>% # number of living trees (i.e., post germination) is more
ungroup(.) %>% # intuitive
mutate(., Density = Count/(10000*10000))
fire_intervals <- as.data.frame(projection$fire_intervals)
ggplot(data = pop_sizes, aes(x = t, y = Density, col = Iteration)) + # plot pop sizes for all iterations.
geom_line(lwd = 1) +
theme(axis.title.x=element_text( size=18, vjust=0)) +
theme(axis.text.x=element_text(size=18)) +
theme(axis.title.y=element_text( size=18, vjust=2.75, face = "bold")) +
theme(axis.text.y=element_text(size = 18)) +
labs(x = "Years", y = expression(paste("Density (no./",m^2,")"))) #+
# theme(legend.position="none")
## Plot of projection iteration 1
projection1 <- pop_sizes %>%
filter(., Iteration == 1)
ggplot(data = projection1, aes(x = t, y = Count)) + # plot pop sizes for all iterations.
geom_line(lwd = 1) +
theme(legend.position="none") +
theme(axis.title.x=element_text( size=18, vjust=0)) +
theme(axis.text.x=element_text(size=18)) +
theme(axis.title.y=element_text( size=18, vjust=2.75, face = "bold")) +
theme(axis.text.y=element_text(size = 18))
(mean_density <- pop_sizes %>%
group_by(., Iteration) %>%
filter(., Count == max(Count)) %>%
ungroup(.) %>%
summarise(., mean(Density)))
(max_density <- pop_sizes %>%
group_by(., Iteration) %>%
filter(., Count == max(Count)) %>%
ungroup(.) %>%
summarise(., max(Density)))
## Plot of projection iteration 1 for t = 30 years
projection1t30 <- projection1 %>%
filter(., t < 3)
ggplot(data = projection1t30, aes(x = t, y = Count)) + # plot pop sizes for all iterations.
geom_point() +
geom_line(lwd = 1) +
theme(legend.position="none") +
theme(axis.title.x=element_text( size=18, vjust=0)) +
theme(axis.text.x=element_text(size=18)) +
theme(axis.title.y=element_text( size=18, vjust=2.75, face = "bold")) +
theme(axis.text.y=element_text(size = 18))
hist(projection$Count[projection$t == 500], breaks = 20,
main = "", xlab = "Population size at time = 500 years")
##-----------------------------------------------------------##
## LAI Diagnostics ##
##-----------------------------------------------------------##
LAI_data <- data.frame(DBH = c(0, 0, 0, 2.05, 12.5, 37),
LA = c(0, 0, alpha1(), alpha2()*2.05^alpha3(), alpha2()*12.5^alpha3(), alpha2()*37^alpha3()))
ggplot(data = LAI_data, aes(x = DBH, y = LA))+
geom_line()+
geom_point()
LAI_values <- as.data.frame(projection[[3]]) %>%
dplyr::select(., Iteration = V1, Time = V2, LAI = V3) %>%
group_by(., Time) %>%
summarise_at(., vars(LAI), funs(mean, min, max)) %>%
dplyr::select(.,Time, LAI = mean, min, max)
ggplot(data = LAI_values, aes(x = Time, y = LAI))+
geom_line() +
geom_ribbon(data = LAI_values, aes(ymin = min, ymax = max), alpha = 0.5)
# ggplot(predictions, aes(x = time, y = prob)) +
# # geom_point() +
# geom_ribbon(aes(ymin = lcl, ymax = ucl), alpha = 0.5, fill = "#ef8a62") +
# geom_line(size = 0.5) +
# xlab("Year") +
# ylab("Annual survival rate") +
# scale_y_continuous(limits = c(0.75,1)) +
# scale_x_continuous(limits = c(1990, 2017))
##-----------------------------------------------------------##
## Get stochastic lambda and ##
## elasticities ##
##-----------------------------------------------------------##
## Create a list of 10,000 matrixes to use in stochastic lambda
## and elasticity analyses
reps <- 10000
stochastic_matrixes <- array(0, dim = c(6,6,reps))
for(i in 1:reps){
stochastic_matrixes[, , i] <- S(i)
}
A <- list(NULL)
for(i in 1:reps){
mat <- stochastic_matrixes[,,i]
A[[i]] <- mat
}
rm(stochastic_matrixes)
## Estimate elasticities
stoch.elast<- stoch.sens(A, tlimit = 500)
stoch.elast
## Estimate stochastic lambda
sgr <- stoch.growth.rate(A)
sgr_real <- exp(sgr$approx)
| /IAVS Density Dependent effects.R | no_license | erpansing/whitebark-pine-demographic-model-master | R | false | false | 22,923 | r | ##-----------------------------------------------------------##
## ##
## WBP Demographic Model ##
## Libby Pansing ##
##-----------------------------------------------------------##
# Note... having issues with masking of dplyr functions when loading
# plyr later in the script. IF you want to re-source the other
# scripts, you'll need to unload dplyr and plyr and reload plyr.
# Or restart R.
library(dplyr)
library(popbio)
library(tidyr)
## Import relevant parameter estimates calculated in other scripts.
## Required files:
## 1) GRIN parameter estimates.R
## 2) GRIN WBP Survival Estimates RMark Nest.R
## 3) 2017 YNP Data.Rda
## 4) survival.Rda
source("/Users/elizabethpansing/Box Sync/PhD/Code/WBP Demographic Model Master/WBP Model Active/GRIN parameter estimates.R")
## Write function for determining leaf area index (LAI) as a function
## of stage specific dbh and the number of trees on the landscape
##***********************************************************##
## Define LAI ##
##***********************************************************##
#mean dbh for each stage
d1 <- function() { # dbh SEED1
return(0)
}
d2 <- function() { #dbh SEED2
return(0)
}
d3 <- function(){ #dbh CS
return(0)
}
d4 <- function(){ #dbh SD1
return(0)
}
d5 <- function(){ # dbh SAP
return(6.69)
}
d6 <-function() { #dbh MA
return(30.2) # mean dbh taken from trees cored on HM in 2015. The average dbh of trees >12.5cm dbh (the sapling)
}
## Leaf area coefficients. Define the relationship between leaf area and diameter.
## Estimated via MLE assuming the general form y = ax^b
alpha1 <- function(){
return(0.456)
}
alpha2 <- function(){
return(0.0736)
}
alpha3 <- function(){
return(2.070)
}
l <- function(){
c(d1(), # SEED1 do not contribute to LAI
d2(), # SEED2 do not contribute to LAI
d3(), # CS do not contribute to LAI
alpha1(), # SD1 don't have DBH but do contribute to LAI
alpha2() * d5() ^ alpha3(),
alpha2() * d6() ^ alpha3())
}
LAIb <- function(){ # Background leaf area index. This is where competition can be incorporated...
return(0)
}
LAI <- function(x) { # LAI of the study area
l <- l()
return((t(l) %*% x)/10000 + LAIb())
}
LAI(n)
## Now define the distributions from which survival and
## transition rates will be drawn to create stochastic demographic rates
##-----------------------------------------------------------##
## Empirically derived survival ##
## & transition distributions ##
##-----------------------------------------------------------##
##-----------------------------------------------------------##
## SEED1 ##
##-----------------------------------------------------------##
s_SEED1 <- 0 # Assume seeds either transition to SEED2,
# transition to CS (first year seedling)
# or die
t1_SEED1 <- function(size = 1){ # survival probability of seeds
rbeta(n = size, # Drawn from a beta to give a
shape1 = SEED1_survive_alpha, # probability of seeds transitioning
shape2 = SEED1_survive_beta) # to SEED2 stage
}
t2_SEED1 <- function(size = 1){ # Germination probability of seeds
rbeta(n = size, # Drawn from a beta to give a
shape1 = SEED1_germ_alpha, # probability of seeds transitioning
shape2 = SEED1_germ_beta) # to SEED2 stage
}
##-----------------------------------------------------------##
## SEED2 ##
##-----------------------------------------------------------##
s_SEED2 <- 0 # Assume seeds either transition to
# CS (first year seedling) or die
t3_SEED2 <- function(size = 1){
# Germination probability of seeds
rbeta(n = size, # Drawn from a beta to give a
shape1 = SEED2_germ_alpha, # probability of seeds transitioning
shape2 = SEED2_germ_beta) # to from SEED2 to CS stage (i.e., germination)
}
##-----------------------------------------------------------##
## CS ##
## First Year Seedling ##
##-----------------------------------------------------------##
s_CS <- 0 # Assume seeds transition to
# SD or die
t_CS <- function(size = 1){ # Survival probability of first
rbeta(n = size, # year seedlings (cotyledon seedlings)
shape1 = CS_survive_alpha, # Drawn from a beta to give prob
shape2 = CS_survive_beta) # of transitioning to SD stage
}
##-----------------------------------------------------------##
## SD ##
##-----------------------------------------------------------##
s_SD <- function(size = 1){ # survival probability of seedlings
rbeta(n = size, # Drawn from a beta to give prob
shape1 = SD_survive_alpha, # surviving any given year
shape2 = SD_survive_beta)
}
##-----------------------------------------------------------##
## SAP ##
##-----------------------------------------------------------##
s_SAP <- function(){ # Survival probability of saplings
return(0.8) # Still looking for a distribution to
} # use, so for now assuming constant
##-----------------------------------------------------------##
## MA ##
##-----------------------------------------------------------##
s_MA <- function(){ # Survival rate of reproductively mature adults
0.99 # Assume limited death from senescence because
} # of long lived nature of wbp (lifespan up to 1200 yrs)
# For now, assuming constant.
##-----------------------------------------------------------##
## DEFINE ##
## SURVIVAL ##
## VECTOR ##
##-----------------------------------------------------------##
survival_vector <- function(size = 1){ #survival vector
c(
s_SD(size = size),
s_SAP(),
s_MA())
}
survival_vector()
##-----------------------------------------------------------##
## DEFINE ##
## RESIDENCE TIME ##
## VECTOR ##
##-----------------------------------------------------------##
residence_SEED1 <- 1 # # years as seedling (SEED1)
residence_SEED2 <- 1 # # years as seedling (SEED2)
residence_CS <- 1 # # years as seedling (CS)
residence_SD <- 28 # # years as seedling (SD)
residence_SAP <- 20 # # years as sapling (SAP)
residence_MA <- Inf # # years as reproductively mature
##-----------------------------------------------------------##
## DEFINE ##
## RESIDENCE TIME ##
## VECTOR ##
##-----------------------------------------------------------##
residence_vector <-
c(residence_SD,
residence_SAP,
residence_MA)
residence_vector
##-----------------------------------------------------------##
## GET MATRIX ELEMENTS ##
##-----------------------------------------------------------##
si <- function(size = 1){ # Gives probability of surviving and staying in the
(1 - (1/residence_vector)) * # same life stage for those life stages
survival_vector(size = size) # that have residence time > 1 (i.e., persist in the
} # same life stage for > 1 year)
ti <- function(size = 1) { # Gives probability of surviving and transitioning
(1/residence_vector) * # to the next life stage for those life stages
survival_vector(size = size) # that have residence time > 1 (i.e., persist in the
} # same life stage for > 1 year)
S <- function(t){ # Germination rates ([3,1] & [3,2]) and fecundity ([1,6]) are included
# later as non-linear functions and added to the population vectors later
matrix(c( 0, 0, 0, 0, 0, 0,
t1_SEED1(1), 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, t_CS(1), si(1)[1], 0, 0,
0, 0, 0, ti(1)[1], si(1)[2], 0,
0, 0, 0, 0, ti(1)[2], si(1)[3]),
byrow = T, nrow = 6,
dimnames = list(c("SEED1", "SEED2", "CS", "SD", "SAP", "MA"),
c("SEED1", "SEED2", "CS", "SD", "SAP", "MA")))
}
t <- 1
S(t)
##-----------------------------------------------------------##
## FERTILITY ##
##-----------------------------------------------------------##
No_seeds_per_cone <- 45
No_cones <- function(t, size = 1){ #Seed production in wbp is periodic
result <- NULL # with masting years every ~ 4 years
# so define cone production as a function
for(i in 1:size){ # of time described by cos with normally distributed error
value <- (12.5*cos(1.5 * t) + 14 + rnorm(1, sd = 3.5))
if( value >= 0){ # Max values and expected values from
result[i] <- value # IGBST cone monitoring since 1980
} else if(value < 0){ #
result[i] <- value - value # # caches assumes 45 seeds/cone
} # and 3 seeds/cache. All available seeds cachek
} # Assumes 45% of caches created are left for regeneration.
return(result)
}
e1 <- matrix(c(1,0,0,0,0,0))
No_seeds <- function(t, size = 1, x){
No_cones(t,1) * No_seeds_per_cone * x[6] * e1
}
##-----------------------------------------------------------##
## Germination ##
##-----------------------------------------------------------##
##-----------------------------------------------------------##
## Define variables assumed fixed & known ##
##-----------------------------------------------------------##
Pfind <- 0.55 # Proportion of seeds found by nutcrackers
Pcons <- 0.3 # Proportion of seeds consumed by nutcracers (prior to caching?)
nBirds <- 3 # No. Clark's nutcrackers in the theoretical population
SpC <- 3.7 # No. seeds per cache
##-----------------------------------------------------------##
## Define variables dependent on time vars ##
##-----------------------------------------------------------##
SpB <- function(x){ # Number of seeds available to each bird
x[1]/nBirds
}
## Define reduction factors. These variables reduce
## 1) rALS decreases germination as light availability decreases
## 2) rCache increases caching propensity as seed availability increases
rALS <- function(x){
1/(1 + exp(2*(LAI(x)-3)))
}
# rCache <- function(x){
# 0.73/(1+ exp((31000-SpB(x))/3000))
# }
e3 <- matrix(c(0,0,1,0,0,0))
## Define germination probability
## First try, do not include the reduction factor that reduces caching as seed availability decreases.
## So we assume the proportion of seeds cached is constant and does not vary as a function of cone production
germ1st <- function(t, size = 1, x){
as.vector(No_seeds(t = t, size = 1, x = n)[1])*((1-Pcons)/3.7) * (1-Pfind) * as.vector(rALS(x)) * rbeta(n = 1, shape1 = SEED1_germ_alpha, shape2= SEED1_germ_beta) * e3
}
germ1st_rcache <- function(t, size = 1, x){
as.vector(No_seeds(t = t, size = 1, x = n)[1])*((1-Pcons)*rCache()/3.7) * (1-Pfind) * as.vector(rALS(x)) * rbeta(n = 1, shape1 = SEED1_germ_alpha, shape2= SEED1_germ_beta) * e3
}
germ2nd <- function(t, size = 1, x){
as.vector(x[2]/3.7 * rALS(x)) * rbeta(n = 1, shape1 = SEED2_germ_alpha, shape2 = SEED2_germ_beta) * e3
}
##-----------------------------------------------------------##
## Function that projects pop ##
## sizes and incorporates fire ##
##-----------------------------------------------------------##
library(plyr)
project <- function(projection_time, n0, reps = 100, fire = T){ # stochastic projection function
# that tracks stage based pop sizes
# over time for reps number of iterations
results <- stochastic_matrixes <- #Create null matrix that will hold stage
array(0, dim = c(projection_time, length(n0) + 1, reps)) # based population sizes
for(j in 1:reps){ # Iterate through i years (projection_time) of population growth j times (iterations)
# Incorporate FIRE
if(j == 1){
intervals <- NULL # Create null vectors that will hold fire intervals for each iteration
iteration <- NULL # Create null vectors that will show the iteration during which each fire occured
LAI_tracker <- matrix(c(rep(1:reps, each = projection_time),rep(0, projection_time*reps*2)),
nrow = projection_time * reps, ncol = 3, byrow = F)
} else if(j != 1){
intervals <- intervals
iteration <- iteration
LAI_tracker <- LAI_tracker
}
if(fire == TRUE){
interval <- rnbinom(n = 4, size = 1, mu = 230) %>% # Select fire years from a negative binomial distribution (assumes mean fire return interval = 230 yrs)
cumsum(.) # representing the waiting times btwn fires
# cumsum gives the time = t years during which fires should occur in projection
interval <- interval[-which(interval > projection_time)] #trim to only include those within the projection time
## Creates dataframe result that tracks the fire years for each iteration.
intervals <- append(intervals, interval, after = length(intervals))
} else if(fire == FALSE){
intervals <- NULL
interval <- NULL
}
iteration <- append(iteration, rep(j, length(interval)),
after = length(iteration))
pops <- matrix(0, nrow = projection_time, ncol = length(n)) # Creates empty matrix to hold population sizes and LAI
for(i in 1:projection_time){ # get population
if (i == 1){
n <- n0
}else if(i != 1){
n <- n
}
t <- i # time counter
tSinceFire <- ifelse(i == 1, 1, tSinceFire)
fire_current <- ifelse(t %in% interval, TRUE, FALSE) # Determine whether this iteration experiences a stand replacing fire
# LAI_tracker_each_iteraton <- matrix(0, nrow = projection_time, ncol = 2)
LAI_tracker[j*projection_time - (projection_time)+i,2:3] <- c(i, LAI(n))
# Now, multiple possibilities
# 1) There's a fire. This kills the population. Assumes stand replacing burn that impacts entire population
# And that no regeneration occurs the year of the fire
# 2) It's a year after a fire, in which case we assume input from an outside population (i.e., system
# isn't entirely closed) and that the outside population is on a similar masting schedule as our
# population
# 3) There's no fire and it's >1 year after fire. In this case, there are no modifications and the
# system proceeds as normal.
if(fire_current == T){
tSinceFire <- 0
pops[i,] <- c(0, 0, 0, 0, 0, 0) # Assuming stand replacing burn with no survival and no regeneration.
n <- pops[i,] # Most fires go out with first snow. e.g., Romme 1982
} else if(fire_current == F & tSinceFire == 1 & t != 1){
tSinceFire <- tSinceFire + 1
pops[i,] <- t(No_seeds(size = 1, t = t, x = n))
n <- pops[i,]
} else if((fire_current == F & tSinceFire != 1) |
(fire_current == F & tSinceFire == 1 & t == 1)){
tSinceFire <- tSinceFire + 1
mat <- S(t = t)
pops[i,] <- t(mat%*%n + germ1st(t = t, size = 1, x = n) + germ2nd(t = t, size = 1, x = n) + No_seeds(t = t, size = 1, x = n)) # Defines the intermediate population size
n <- as.matrix(pops[i,], nrow = length(pops[i,]), ncol = 1)
# } else if(fire == F & tSinceFire == 1 & t == 2){
# tSinceFire <- tSinceFire + 1
#
# pops[i,] <- c(500000, 0, 0, 0, 0, 0)
# n <- as.matrix(pops[i,], nrow = length(pops[i,]), ncol = 1)
}
}
pops <- cbind(pops, rep(1:projection_time)) # Appends matrix to keep track of time during iteration
results[, ,j] <- pops # Combines iterations into a j dimensional array
}
pop_sizes <- plyr::adply(results, 3) # Changes array to dataframe so easier to manipulate later
colnames(pop_sizes) <- c("Iteration", "SEED1", "SEED2", "CS", "SD", "SAP", "MA", "t")
fire_intervals <- cbind(iteration, intervals) # Dataframe keeping track of fire return intervals
results <- list(pop_sizes = pop_sizes, fire_intervals = fire_intervals, LAI_track = LAI_tracker)
return(results)
}
##-----------------------------------------------------------##
## Population projection ##
##-----------------------------------------------------------##
n <- c(62, 580 + 38, 79, 65, 91, 353) # Arbitrary starting pop size vectors
projection <- project(projection_time = 500, n0 = n, reps = 100, fire = FALSE)
pops <- gather(projection$pop_sizes, Stage, Count, - Iteration, - t)
pop_sizes <- gather(projection$pop_sizes, Stage, Count, -Iteration, -t) %>%
filter(., !Stage == "SEED1") %>% # Pop sizes in dataframe format
filter(., !Stage == "SEED2") %>% # and excluding seed numbers (most don't think of seeds)
group_by(., Iteration, t) %>% # as a part of the population, so presenting numbers as
summarise_at(., vars(Count), funs(sum)) %>% # number of living trees (i.e., post germination) is more
ungroup(.) %>% # intuitive
mutate(., Density = Count/(10000*10000))
fire_intervals <- as.data.frame(projection$fire_intervals)
ggplot(data = pop_sizes, aes(x = t, y = Density, col = Iteration)) + # plot pop sizes for all iterations.
geom_line(lwd = 1) +
theme(axis.title.x=element_text( size=18, vjust=0)) +
theme(axis.text.x=element_text(size=18)) +
theme(axis.title.y=element_text( size=18, vjust=2.75, face = "bold")) +
theme(axis.text.y=element_text(size = 18)) +
labs(x = "Years", y = expression(paste("Density (no./",m^2,")"))) #+
# theme(legend.position="none")
## Plot of projection iteration 1
projection1 <- pop_sizes %>%
filter(., Iteration == 1)
ggplot(data = projection1, aes(x = t, y = Count)) + # plot pop sizes for all iterations.
geom_line(lwd = 1) +
theme(legend.position="none") +
theme(axis.title.x=element_text( size=18, vjust=0)) +
theme(axis.text.x=element_text(size=18)) +
theme(axis.title.y=element_text( size=18, vjust=2.75, face = "bold")) +
theme(axis.text.y=element_text(size = 18))
(mean_density <- pop_sizes %>%
group_by(., Iteration) %>%
filter(., Count == max(Count)) %>%
ungroup(.) %>%
summarise(., mean(Density)))
(max_density <- pop_sizes %>%
group_by(., Iteration) %>%
filter(., Count == max(Count)) %>%
ungroup(.) %>%
summarise(., max(Density)))
## Plot of projection iteration 1 for t = 30 years
projection1t30 <- projection1 %>%
filter(., t < 3)
ggplot(data = projection1t30, aes(x = t, y = Count)) + # plot pop sizes for all iterations.
geom_point() +
geom_line(lwd = 1) +
theme(legend.position="none") +
theme(axis.title.x=element_text( size=18, vjust=0)) +
theme(axis.text.x=element_text(size=18)) +
theme(axis.title.y=element_text( size=18, vjust=2.75, face = "bold")) +
theme(axis.text.y=element_text(size = 18))
hist(projection$Count[projection$t == 500], breaks = 20,
main = "", xlab = "Population size at time = 500 years")
##-----------------------------------------------------------##
## LAI Diagnostics ##
##-----------------------------------------------------------##
LAI_data <- data.frame(DBH = c(0, 0, 0, 2.05, 12.5, 37),
LA = c(0, 0, alpha1(), alpha2()*2.05^alpha3(), alpha2()*12.5^alpha3(), alpha2()*37^alpha3()))
ggplot(data = LAI_data, aes(x = DBH, y = LA))+
geom_line()+
geom_point()
LAI_values <- as.data.frame(projection[[3]]) %>%
dplyr::select(., Iteration = V1, Time = V2, LAI = V3) %>%
group_by(., Time) %>%
summarise_at(., vars(LAI), funs(mean, min, max)) %>%
dplyr::select(.,Time, LAI = mean, min, max)
ggplot(data = LAI_values, aes(x = Time, y = LAI))+
geom_line() +
geom_ribbon(data = LAI_values, aes(ymin = min, ymax = max), alpha = 0.5)
# ggplot(predictions, aes(x = time, y = prob)) +
# # geom_point() +
# geom_ribbon(aes(ymin = lcl, ymax = ucl), alpha = 0.5, fill = "#ef8a62") +
# geom_line(size = 0.5) +
# xlab("Year") +
# ylab("Annual survival rate") +
# scale_y_continuous(limits = c(0.75,1)) +
# scale_x_continuous(limits = c(1990, 2017))
##-----------------------------------------------------------##
## Get stochastic lambda and ##
## elasticities ##
##-----------------------------------------------------------##
## Create a list of 10,000 matrixes to use in stochastic lambda
## and elasticity analyses
reps <- 10000
stochastic_matrixes <- array(0, dim = c(6,6,reps))
for(i in 1:reps){
stochastic_matrixes[, , i] <- S(i)
}
A <- list(NULL)
for(i in 1:reps){
mat <- stochastic_matrixes[,,i]
A[[i]] <- mat
}
rm(stochastic_matrixes)
## Estimate elasticities
stoch.elast<- stoch.sens(A, tlimit = 500)
stoch.elast
## Estimate stochastic lambda
sgr <- stoch.growth.rate(A)
sgr_real <- exp(sgr$approx)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.iot_operations.R
\name{delete_certificate}
\alias{delete_certificate}
\title{Deletes the specified certificate}
\usage{
delete_certificate(certificateId, forceDelete = NULL)
}
\arguments{
\item{certificateId}{[required] The ID of the certificate. (The last part of the certificate ARN contains the certificate ID.)}
\item{forceDelete}{Forces a certificate request to be deleted.}
}
\description{
Deletes the specified certificate.
}
\details{
A certificate cannot be deleted if it has a policy attached to it or if its status is set to ACTIVE. To delete a certificate, first use the DetachPrincipalPolicy API to detach all policies. Next, use the UpdateCertificate API to set the certificate to the INACTIVE status.
}
\section{Accepted Parameters}{
\preformatted{delete_certificate(
certificateId = "string",
forceDelete = TRUE|FALSE
)
}
}
| /service/paws.iot/man/delete_certificate.Rd | permissive | CR-Mercado/paws | R | false | true | 929 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.iot_operations.R
\name{delete_certificate}
\alias{delete_certificate}
\title{Deletes the specified certificate}
\usage{
delete_certificate(certificateId, forceDelete = NULL)
}
\arguments{
\item{certificateId}{[required] The ID of the certificate. (The last part of the certificate ARN contains the certificate ID.)}
\item{forceDelete}{Forces a certificate request to be deleted.}
}
\description{
Deletes the specified certificate.
}
\details{
A certificate cannot be deleted if it has a policy attached to it or if its status is set to ACTIVE. To delete a certificate, first use the DetachPrincipalPolicy API to detach all policies. Next, use the UpdateCertificate API to set the certificate to the INACTIVE status.
}
\section{Accepted Parameters}{
\preformatted{delete_certificate(
certificateId = "string",
forceDelete = TRUE|FALSE
)
}
}
|
plot1 <- function() {
hist(df$Global_active_power, main = paste("Global Active Power"), col="red", xlab="Global Active Power (kilowatts)")
dev.copy(png, file="plot1.png", width=480, height=480)
dev.off()
cat("Plot1.png has been saved in", getwd())
}
plot1() | /plot1.R | no_license | Saurabh2388/Exploratory-Data-Analysis-pt1 | R | false | false | 295 | r | plot1 <- function() {
hist(df$Global_active_power, main = paste("Global Active Power"), col="red", xlab="Global Active Power (kilowatts)")
dev.copy(png, file="plot1.png", width=480, height=480)
dev.off()
cat("Plot1.png has been saved in", getwd())
}
plot1() |
#10222015--------------------------------------------------------------------------------
library(readr)
library(xgboost)
set.seed(28071993)
cat("reading the train and test data\n")
train <- read_csv("D:/kaggle/Forecasting/DATA/train.csv")
test <- read_csv("D:/kaggle/Forecasting/DATA/test.csv")
store <- read_csv("D:/kaggle/Forecasting/DATA/store.csv")
# removing the date column (since elements are extracted) and also StateHoliday which has a lot of NAs (may add it back in later)
train <- merge(train,store)
test <- merge(test,store)
# There are some NAs in the integer columns so conversion to zero
train[is.na(train)] <- -1
test[is.na(test)] <- -1
names(train)
str(train)
summary(train)
names(test)
str(test)
summary(test)
# looking at only stores that were open in the train set
# may change this later
train <- train[ which(train$Open=='1'),]
train <- train[ which(train$Sales!='0'),]
# seperating out the elements of the date column for the train set
train$month <- as.integer(format(train$Date, "%m"))
train$year <- as.integer(format(train$Date, "%y"))
train$day <- as.integer(format(train$Date, "%d"))
# removing the date column (since elements are extracted) and also StateHoliday which has a lot of NAs (may add it back in later)
train <- train[,-c(3,8)]
# seperating out the elements of the date column for the test set
test$month <- as.integer(format(test$Date, "%m"))
test$year <- as.integer(format(test$Date, "%y"))
test$day <- as.integer(format(test$Date, "%d"))
# removing the date column (since elements are extracted) and also StateHoliday which has a lot of NAs (may add it back in later)
test <- test[,-c(4,7)]
feature.names <- names(train)[c(1,2,5:19)]
feature.names
cat("assuming text variables are categorical & replacing them with numeric ids\n")
for (f in feature.names) {
if (class(train[[f]])=="character") {
levels <- unique(c(train[[f]], test[[f]]))
train[[f]] <- as.integer(factor(train[[f]], levels=levels))
test[[f]] <- as.integer(factor(test[[f]], levels=levels))
}
}
response <- train$Sales
train$Sales <- NULL
split = createDataPartition(train_target, p = 0.9, list = F)
response_val <- response[-split]
response_train <- response[split]
tmp <- train[,feature.names]
dval <- xgb.DMatrix(data=data.matrix(tmp[-split, ]), label = log(response_val+1))
dtrain <- xgb.DMatrix(data=data.matrix(tmp[split, ]), label = log(response_train+1))
watchlist<-list(val=dval,train=dtrain)
RMPSE<- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
elab <- exp(as.numeric(labels))-1
epreds <- exp(as.numeric(preds))-1
err <- sqrt(mean((epreds/elab-1)^2))
return(list(metric = "RMPSE", value = err))
}
param <- list( objective = "reg:linear",
booster = "gbtree",
eta = 0.01,
max_depth = 20,
subsample = 0.7,
colsample_bytree = 0.7
# alpha = 0.0001,
# lambda = 1
)
cl <- makeCluster(2); registerDoParallel(cl)
clf <- xgb.train( params = param,
data = dtrain,
nrounds = 1000, #300, #280, #125, #250, # changed from 300
verbose = 1,
nthread = 2,
watchlist = watchlist,
maximize = FALSE,
feval=RMPSE
)
pred <- exp(predict(clf, data.matrix(test[,feature.names]))) -1
submission <- data.frame(Id=test$Id, Sales=pred)
write_csv(submission, "D:/kaggle/Forecasting/submission/1022015.csv")
######################################################################################
#10232015------------------------------------------------------------------------------
library(data.table)
library(h2o)
train <- fread("../input/train.csv",stringsAsFactors = T)
test <- fread("../input/test.csv",stringsAsFactors = T)
store <- fread("../input/store.csv",stringsAsFactors = T)
train <- train[Sales > 0,] ## We are not judged on 0 sales records in test set
train <- merge(train,store,by="Store")
test <- merge(test,store,by="Store")
train[,Date:=as.Date(Date)]
test[,Date:=as.Date(Date)]
# seperating out the elements of the date column for the train set
train[,month:=as.integer(format(Date, "%m"))]
train[,year:=as.integer(format(Date, "%y"))]
train[,Store:=as.factor(as.numeric(Store))]
test[,month:=as.integer(format(Date, "%m"))]
test[,year:=as.integer(format(Date, "%y"))]
test[,Store:=as.factor(as.numeric(Store))]
train[,logSales:=log1p(Sales)]
h2o.init(nthreads=-1,max_mem_size='6G')
trainHex<-as.h2o(train)
features<-colnames(train)[!(colnames(train) %in% c("Id","Date","Sales","logSales","Customers"))]
rfHex <- h2o.randomForest(x=features,
y="logSales",
ntrees = 100,
max_depth = 30,
nbins_cats = 1115, ## allow it to fit store ID
training_frame=trainHex)
testHex<-as.h2o(test)
predictions<-as.data.frame(h2o.predict(rfHex,testHex))
pred <- expm1(predictions[,1])
submission <- data.frame(Id=test$Id, Sales=pred)
write_csv(submission, "D:/kaggle/Forecasting/submission/1023015.csv")
metric=function(y,yhat)
{
y=exp(y)-1
yhat=exp(yhat)-1
sum_squared=0
for( i in c(1:length(yhat)))
{
prop_squared=((y[i]-yhat[i])/y[i])^2
sum_squared=sum_squared+prop_squared
}
err=sqrt((1/length(yhat))*sum_squared)
return(err)
}
#######################################################################################
#10232015_1-----------------------------------------------------------------------------
library(readr); library(xgboost); library(data.table); require(sqldf)
set.seed(28071993)
train <- fread("D:/kaggle/Forecasting/DATA/train.csv")
store <- fread("D:/kaggle/Forecasting/DATA/store.csv")
test <- fread("D:/kaggle/Forecasting/DATA/test.csv")
train <- merge(train,store, by = "Store")
test <- merge(test,store, by = "Store")
setdiff(names(train), names(test))
test[, `:=`(Sales = "NA", Customers = "NA" )]
train[, `:=`(Id = 1:nrow(train) )]
tmp <- rbind(train, test)
tmp[, Date := as.Date(Date)]
tmp[, `:=`( month = as.integer(format(Date, "%m")),
year = as.integer(format(Date, "%y")),
day = as.integer(format(Date, "%d"))
)]
factors <- names(tmp)[!(names(tmp) %in% c("Date", "Sales", "Customers",
"CompetitionDistance", "Id"))]
tmp <- data.frame(tmp)
tmp_original <- tmp
for( i in factors){
tmp[, i] <- as.factor(tmp[, i])
print(paste(i, ":", length(table(tmp[i]))))
}
##########################################################################################
#using owen`s Amazon code approach
my.f2cnt <- function(th2, vn1, vn2, filter=TRUE) {
df <- data.frame(f1=th2[,vn1], f2=th2[,vn2], filter=filter)
sum1 <- sqldf("select f1, f2, count(*) as cnt
from df
where filter=1
group by 1,2")
tmp <- sqldf("select b.cnt
from df a left join sum1 b
on a.f1=b.f1 and a.f2=b.f2")
tmp$cnt[is.na(tmp$cnt)] <- 0
return(tmp$cnt)
}
#3 way count
my.f3cnt<-function(th2, vn1, vn2, vn3, filter=TRUE) {
df<-data.frame(f1=th2[,vn1], f2=th2[,vn2], f3=th2[, vn3], filter=filter)
sum1<-sqldf("select f1, f2, f3, count(*) as cnt
from df
where filter=1
group by 1,2, 3")
tmp<-sqldf("select b.cnt
from df a left join sum1 b
on a.f1=b.f1 and a.f2=b.f2 and a.f3=b.f3")
tmp$cnt[is.na(tmp$cnt)]<-0
return(tmp$cnt)
}
#####################################################################################################
#2 way count--------------------------------------------------------------------
nms <- combn(factors, 2)
dim(nms)
nms_df <- data.frame(nms)
len = length(names(nms_df))
for (i in 1:len) {
nms_df[, i] <- as.character(nms_df[, i])
}
tmp_count <- data.frame(id = 1:dim(tmp)[1])
for(i in 1:dim(nms_df)[2]){
print(((i / dim(nms_df)[2]) * 100 ))
tmp_count[, paste(i, "_two", sep="")] <- my.f2cnt(th2 = tmp,
vn1 = nms_df[1,i],
vn2 = nms_df[2,i] )
}
#3 way count--------------------------------------------------------------------
start <- Sys.time()
nms <- combn(factors, 3)
dim(nms)
nms_df <- data.frame(nms)
len = length(names(nms_df))
for (i in 1:len) {
print(paste0(( i / len) *100, "%"))
nms_df[, i] <- as.character(nms_df[, i])
}
for(i in 1:dim(nms_df)[2]){
print((i / dim(nms_df)[2]) * 100)
tmp_count[, paste(i, "_three", sep="")] <- my.f3cnt(th2 = tmp,
vn1 = nms_df[1,i],
vn2 = nms_df[2,i],
vn3 = nms_df[3,i])
}
time_taken <- Sys.time() - start
tmp_new = cbind.data.frame(tmp_original, tmp_count)
###############################################################################
train <- tmp_new[1:1017209, ]
test <- tmp_new[(1017209+1) : nrow(tmp_new), ]
rm(nms); rm(nms_df); rm(store); rm(tmp); rm(factor_col)
rm(factors); rm(i); rm(len); rm(removecols); rm(tmp_count); rm(tmp_original)
rm(tmp_new)
gc()
removecols <- c("Id","Date","Sales","Customers")
feature.names <- colnames(train)[!(colnames(train) %in% removecols)]
train = setDT(train)
train <- train[Open == 1]
train <- train[Sales != 0]
train <- as.data.frame(train)
#train <- train[ which(train$Open=='1'),]
#train <- train[ which(train$Sales!='0'),]
train[is.na(train)] <- 0
test[is.na(test)] <- 1
for (f in names(train)) {
if (class(train[[f]])=="character") {
levels <- unique(c(train[[f]], test[[f]]))
train[[f]] <- as.integer(factor(train[[f]], levels=levels))
test[[f]] <- as.integer(factor(test[[f]], levels=levels))
}
}
response <- train$Sales
split = createDataPartition(response, p = 0.9, list = F)
response_val <- response[-split]
response_train <- response[split]
tmp <- train[,feature.names]
dval <- xgb.DMatrix(data=data.matrix(tmp[-split, ]), label = (response_val))
dtrain <- xgb.DMatrix(data=data.matrix(tmp[split, ]), label = (response_train))
watchlist<-list(val=dval,train=dtrain)
param <- list( objective = "reg:linear",
booster = "gbtree",
eta = 0.02, # 0.06, #0.01,
max_depth = 10, #changed from default of 8
subsample = 0.9, # 0.7
colsample_bytree = 0.7 # 0.7
#num_parallel_tree = 2
# alpha = 0.0001,
# lambda = 1
)
RMPSE<- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
elab <- exp(as.numeric(labels))-1
epreds <- exp(as.numeric(preds))-1
err <- sqrt(mean((epreds/elab-1)^2))
return(list(metric = "RMPSE", value = err))
}
gc()
library(doParallel)
cl <- makeCluster(2); registerDoParallel(cl)
clf <- xgb.train( params = param,
data = dtrain,
nrounds = 3000, #300, #280, #125, #250, # changed from 300
verbose = 2,
early.stop.round = 600,
watchlist = watchlist,
maximize = T,
feval=RMPSE
)
pred1 <- exp(predict(clf, data.matrix(test[,feature.names]))) -1
submission <- data.frame(Id=test$Id, Sales=pred1)
write_csv(submission, "10252015.csv")
#10252015_1-------------------------------------------------------------------------------
# changed n_rounds to 15000, LB score decreased | /Forecasting/working_file.R | no_license | ronroc/kaggle | R | false | false | 12,884 | r | #10222015--------------------------------------------------------------------------------
library(readr)
library(xgboost)
set.seed(28071993)
cat("reading the train and test data\n")
train <- read_csv("D:/kaggle/Forecasting/DATA/train.csv")
test <- read_csv("D:/kaggle/Forecasting/DATA/test.csv")
store <- read_csv("D:/kaggle/Forecasting/DATA/store.csv")
# removing the date column (since elements are extracted) and also StateHoliday which has a lot of NAs (may add it back in later)
train <- merge(train,store)
test <- merge(test,store)
# There are some NAs in the integer columns so conversion to zero
train[is.na(train)] <- -1
test[is.na(test)] <- -1
names(train)
str(train)
summary(train)
names(test)
str(test)
summary(test)
# looking at only stores that were open in the train set
# may change this later
train <- train[ which(train$Open=='1'),]
train <- train[ which(train$Sales!='0'),]
# seperating out the elements of the date column for the train set
train$month <- as.integer(format(train$Date, "%m"))
train$year <- as.integer(format(train$Date, "%y"))
train$day <- as.integer(format(train$Date, "%d"))
# removing the date column (since elements are extracted) and also StateHoliday which has a lot of NAs (may add it back in later)
train <- train[,-c(3,8)]
# seperating out the elements of the date column for the test set
test$month <- as.integer(format(test$Date, "%m"))
test$year <- as.integer(format(test$Date, "%y"))
test$day <- as.integer(format(test$Date, "%d"))
# removing the date column (since elements are extracted) and also StateHoliday which has a lot of NAs (may add it back in later)
test <- test[,-c(4,7)]
feature.names <- names(train)[c(1,2,5:19)]
feature.names
cat("assuming text variables are categorical & replacing them with numeric ids\n")
for (f in feature.names) {
if (class(train[[f]])=="character") {
levels <- unique(c(train[[f]], test[[f]]))
train[[f]] <- as.integer(factor(train[[f]], levels=levels))
test[[f]] <- as.integer(factor(test[[f]], levels=levels))
}
}
response <- train$Sales
train$Sales <- NULL
split = createDataPartition(train_target, p = 0.9, list = F)
response_val <- response[-split]
response_train <- response[split]
tmp <- train[,feature.names]
dval <- xgb.DMatrix(data=data.matrix(tmp[-split, ]), label = log(response_val+1))
dtrain <- xgb.DMatrix(data=data.matrix(tmp[split, ]), label = log(response_train+1))
watchlist<-list(val=dval,train=dtrain)
RMPSE<- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
elab <- exp(as.numeric(labels))-1
epreds <- exp(as.numeric(preds))-1
err <- sqrt(mean((epreds/elab-1)^2))
return(list(metric = "RMPSE", value = err))
}
param <- list( objective = "reg:linear",
booster = "gbtree",
eta = 0.01,
max_depth = 20,
subsample = 0.7,
colsample_bytree = 0.7
# alpha = 0.0001,
# lambda = 1
)
cl <- makeCluster(2); registerDoParallel(cl)
clf <- xgb.train( params = param,
data = dtrain,
nrounds = 1000, #300, #280, #125, #250, # changed from 300
verbose = 1,
nthread = 2,
watchlist = watchlist,
maximize = FALSE,
feval=RMPSE
)
pred <- exp(predict(clf, data.matrix(test[,feature.names]))) -1
submission <- data.frame(Id=test$Id, Sales=pred)
write_csv(submission, "D:/kaggle/Forecasting/submission/1022015.csv")
######################################################################################
#10232015------------------------------------------------------------------------------
library(data.table)
library(h2o)
train <- fread("../input/train.csv",stringsAsFactors = T)
test <- fread("../input/test.csv",stringsAsFactors = T)
store <- fread("../input/store.csv",stringsAsFactors = T)
train <- train[Sales > 0,] ## We are not judged on 0 sales records in test set
train <- merge(train,store,by="Store")
test <- merge(test,store,by="Store")
train[,Date:=as.Date(Date)]
test[,Date:=as.Date(Date)]
# seperating out the elements of the date column for the train set
train[,month:=as.integer(format(Date, "%m"))]
train[,year:=as.integer(format(Date, "%y"))]
train[,Store:=as.factor(as.numeric(Store))]
test[,month:=as.integer(format(Date, "%m"))]
test[,year:=as.integer(format(Date, "%y"))]
test[,Store:=as.factor(as.numeric(Store))]
train[,logSales:=log1p(Sales)]
h2o.init(nthreads=-1,max_mem_size='6G')
trainHex<-as.h2o(train)
features<-colnames(train)[!(colnames(train) %in% c("Id","Date","Sales","logSales","Customers"))]
rfHex <- h2o.randomForest(x=features,
y="logSales",
ntrees = 100,
max_depth = 30,
nbins_cats = 1115, ## allow it to fit store ID
training_frame=trainHex)
testHex<-as.h2o(test)
predictions<-as.data.frame(h2o.predict(rfHex,testHex))
pred <- expm1(predictions[,1])
submission <- data.frame(Id=test$Id, Sales=pred)
write_csv(submission, "D:/kaggle/Forecasting/submission/1023015.csv")
metric=function(y,yhat)
{
y=exp(y)-1
yhat=exp(yhat)-1
sum_squared=0
for( i in c(1:length(yhat)))
{
prop_squared=((y[i]-yhat[i])/y[i])^2
sum_squared=sum_squared+prop_squared
}
err=sqrt((1/length(yhat))*sum_squared)
return(err)
}
#######################################################################################
#10232015_1-----------------------------------------------------------------------------
library(readr); library(xgboost); library(data.table); require(sqldf)
set.seed(28071993)
train <- fread("D:/kaggle/Forecasting/DATA/train.csv")
store <- fread("D:/kaggle/Forecasting/DATA/store.csv")
test <- fread("D:/kaggle/Forecasting/DATA/test.csv")
train <- merge(train,store, by = "Store")
test <- merge(test,store, by = "Store")
setdiff(names(train), names(test))
test[, `:=`(Sales = "NA", Customers = "NA" )]
train[, `:=`(Id = 1:nrow(train) )]
tmp <- rbind(train, test)
tmp[, Date := as.Date(Date)]
tmp[, `:=`( month = as.integer(format(Date, "%m")),
year = as.integer(format(Date, "%y")),
day = as.integer(format(Date, "%d"))
)]
factors <- names(tmp)[!(names(tmp) %in% c("Date", "Sales", "Customers",
"CompetitionDistance", "Id"))]
tmp <- data.frame(tmp)
tmp_original <- tmp
for( i in factors){
tmp[, i] <- as.factor(tmp[, i])
print(paste(i, ":", length(table(tmp[i]))))
}
##########################################################################################
#using owen`s Amazon code approach
my.f2cnt <- function(th2, vn1, vn2, filter=TRUE) {
df <- data.frame(f1=th2[,vn1], f2=th2[,vn2], filter=filter)
sum1 <- sqldf("select f1, f2, count(*) as cnt
from df
where filter=1
group by 1,2")
tmp <- sqldf("select b.cnt
from df a left join sum1 b
on a.f1=b.f1 and a.f2=b.f2")
tmp$cnt[is.na(tmp$cnt)] <- 0
return(tmp$cnt)
}
#3 way count
my.f3cnt<-function(th2, vn1, vn2, vn3, filter=TRUE) {
df<-data.frame(f1=th2[,vn1], f2=th2[,vn2], f3=th2[, vn3], filter=filter)
sum1<-sqldf("select f1, f2, f3, count(*) as cnt
from df
where filter=1
group by 1,2, 3")
tmp<-sqldf("select b.cnt
from df a left join sum1 b
on a.f1=b.f1 and a.f2=b.f2 and a.f3=b.f3")
tmp$cnt[is.na(tmp$cnt)]<-0
return(tmp$cnt)
}
#####################################################################################################
#2 way count--------------------------------------------------------------------
nms <- combn(factors, 2)
dim(nms)
nms_df <- data.frame(nms)
len = length(names(nms_df))
for (i in 1:len) {
nms_df[, i] <- as.character(nms_df[, i])
}
tmp_count <- data.frame(id = 1:dim(tmp)[1])
for(i in 1:dim(nms_df)[2]){
print(((i / dim(nms_df)[2]) * 100 ))
tmp_count[, paste(i, "_two", sep="")] <- my.f2cnt(th2 = tmp,
vn1 = nms_df[1,i],
vn2 = nms_df[2,i] )
}
#3 way count--------------------------------------------------------------------
start <- Sys.time()
nms <- combn(factors, 3)
dim(nms)
nms_df <- data.frame(nms)
len = length(names(nms_df))
for (i in 1:len) {
print(paste0(( i / len) *100, "%"))
nms_df[, i] <- as.character(nms_df[, i])
}
for(i in 1:dim(nms_df)[2]){
print((i / dim(nms_df)[2]) * 100)
tmp_count[, paste(i, "_three", sep="")] <- my.f3cnt(th2 = tmp,
vn1 = nms_df[1,i],
vn2 = nms_df[2,i],
vn3 = nms_df[3,i])
}
time_taken <- Sys.time() - start
tmp_new = cbind.data.frame(tmp_original, tmp_count)
###############################################################################
train <- tmp_new[1:1017209, ]
test <- tmp_new[(1017209+1) : nrow(tmp_new), ]
rm(nms); rm(nms_df); rm(store); rm(tmp); rm(factor_col)
rm(factors); rm(i); rm(len); rm(removecols); rm(tmp_count); rm(tmp_original)
rm(tmp_new)
gc()
removecols <- c("Id","Date","Sales","Customers")
feature.names <- colnames(train)[!(colnames(train) %in% removecols)]
train = setDT(train)
train <- train[Open == 1]
train <- train[Sales != 0]
train <- as.data.frame(train)
#train <- train[ which(train$Open=='1'),]
#train <- train[ which(train$Sales!='0'),]
train[is.na(train)] <- 0
test[is.na(test)] <- 1
for (f in names(train)) {
if (class(train[[f]])=="character") {
levels <- unique(c(train[[f]], test[[f]]))
train[[f]] <- as.integer(factor(train[[f]], levels=levels))
test[[f]] <- as.integer(factor(test[[f]], levels=levels))
}
}
response <- train$Sales
split = createDataPartition(response, p = 0.9, list = F)
response_val <- response[-split]
response_train <- response[split]
tmp <- train[,feature.names]
dval <- xgb.DMatrix(data=data.matrix(tmp[-split, ]), label = (response_val))
dtrain <- xgb.DMatrix(data=data.matrix(tmp[split, ]), label = (response_train))
watchlist<-list(val=dval,train=dtrain)
param <- list( objective = "reg:linear",
booster = "gbtree",
eta = 0.02, # 0.06, #0.01,
max_depth = 10, #changed from default of 8
subsample = 0.9, # 0.7
colsample_bytree = 0.7 # 0.7
#num_parallel_tree = 2
# alpha = 0.0001,
# lambda = 1
)
RMPSE<- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
elab <- exp(as.numeric(labels))-1
epreds <- exp(as.numeric(preds))-1
err <- sqrt(mean((epreds/elab-1)^2))
return(list(metric = "RMPSE", value = err))
}
gc()
library(doParallel)
cl <- makeCluster(2); registerDoParallel(cl)
clf <- xgb.train( params = param,
data = dtrain,
nrounds = 3000, #300, #280, #125, #250, # changed from 300
verbose = 2,
early.stop.round = 600,
watchlist = watchlist,
maximize = T,
feval=RMPSE
)
pred1 <- exp(predict(clf, data.matrix(test[,feature.names]))) -1
submission <- data.frame(Id=test$Id, Sales=pred1)
write_csv(submission, "10252015.csv")
#10252015_1-------------------------------------------------------------------------------
# changed n_rounds to 15000, LB score decreased |
##1/11/18
library(RColorBrewer)
library(gplots)
data <- read.table("data/scaled_difference_urine5_sorted2.txt", header=TRUE)
rnames <- data[,1] # assign labels in column 1 to "rnames"
mat_data <- data.matrix(data[,2:ncol(data)]) # transform column 2-5 into a matrix
rownames(mat_data) <- rnames ##attach row names to dataset
col_breaks = c(seq(-1,0,length=100), # for red
seq(0.001,0.08,length=100), # for yellow
seq(0.81,1,length=100))
my_palette <- colorRampPalette(c("red", "yellow", "green", "blue", "violet"))(n = 999)
pdf(file=sprintf("data/heatmap_urine.pdf"), width=9)
heatmap.2(mat_data, Rowv=FALSE, dendrogram = "none", col=my_palette, trace="none", density.info=c("none"), offsetRow=-45, key.title=NA, lhei=c(1.5,5), margins=c(5,5), adjCol=c(0.5, 1), srtCol=360)
mtext("Taxa", at=0.02, line=-4)
mtext("USD", side=2, at=1.05, line=1.5)
mtext("Healthy", side=4, at=1.05, line=-31)
dev.off()
data <- read.table("data/scaled_difference_stool5_sorted2.txt", header=TRUE)
rnames <- data[,1] # assign labels in column 1 to "rnames"
mat_data <- data.matrix(data[,2:ncol(data)]) # transform column 2-5 into a matrix
rownames(mat_data) <- rnames ##attach row names to dataset
col_breaks = c(seq(-1,0,length=100), # for red
seq(0.001,0.08,length=100), # for yellow
seq(0.81,1,length=100))
my_palette <- colorRampPalette(c("red", "yellow", "green", "blue", "violet"))(n = 999)
pdf(file=sprintf("data/heatmap_stool.pdf"), width=9)
heatmap.2(mat_data, Rowv=FALSE, dendrogram = "none", col=my_palette, trace="none", density.info=c("none"), offsetRow=-45, key.title=NA, lhei=c(1.5,5), margins=c(5,5), adjCol=c(0.5, 1), srtCol=360)
mtext("Taxa", at=0.02, line=-4)
mtext("USD", side=2, at=1.05, line=1.5)
mtext("Healthy", side=4, at=1.05, line=-31)
dev.off()
data <- read.table("data/stone_relabun_heatmap3.txt", header=TRUE)
rnames <- data[,1] # assign labels in column 1 to "rnames"
mat_data <- data.matrix(data[,2:ncol(data)]) # transform column 2-5 into a matrix
rownames(mat_data) <- rnames ##attach row names to dataset
col_breaks = c(seq(-1,0,length=100), # for red
seq(0.001,0.08,length=100), # for yellow
seq(0.81,1,length=100))
my_palette <- colorRampPalette(c("red", "yellow", "green", "blue", "violet"))(n = 999)
mat_data <- cbind(mat_data, mat_data)
pdf(file=sprintf("data/heatmap_stone.pdf"), width=9)
heatmap.2(mat_data, Rowv=FALSE, dendrogram = "none", col=my_palette, trace="none", density.info=c("none"), offsetRow=-45, key.title=NA, lhei=c(1.5,5), margins=c(5,5), adjCol=c(0.5, 1), srtCol=360, labCol="")
mtext("OTU", at=0.02, line=-4)
mtext("Less Abundant", side=2, at=1.01, line=1.5)
mtext("More Abundant", side=4, at=1.01, line=-31)
dev.off()
| /scripts/heatmap.R | no_license | amill017/USD_metaanalysis_2020 | R | false | false | 2,799 | r | ##1/11/18
library(RColorBrewer)
library(gplots)
data <- read.table("data/scaled_difference_urine5_sorted2.txt", header=TRUE)
rnames <- data[,1] # assign labels in column 1 to "rnames"
mat_data <- data.matrix(data[,2:ncol(data)]) # transform column 2-5 into a matrix
rownames(mat_data) <- rnames ##attach row names to dataset
col_breaks = c(seq(-1,0,length=100), # for red
seq(0.001,0.08,length=100), # for yellow
seq(0.81,1,length=100))
my_palette <- colorRampPalette(c("red", "yellow", "green", "blue", "violet"))(n = 999)
pdf(file=sprintf("data/heatmap_urine.pdf"), width=9)
heatmap.2(mat_data, Rowv=FALSE, dendrogram = "none", col=my_palette, trace="none", density.info=c("none"), offsetRow=-45, key.title=NA, lhei=c(1.5,5), margins=c(5,5), adjCol=c(0.5, 1), srtCol=360)
mtext("Taxa", at=0.02, line=-4)
mtext("USD", side=2, at=1.05, line=1.5)
mtext("Healthy", side=4, at=1.05, line=-31)
dev.off()
data <- read.table("data/scaled_difference_stool5_sorted2.txt", header=TRUE)
rnames <- data[,1] # assign labels in column 1 to "rnames"
mat_data <- data.matrix(data[,2:ncol(data)]) # transform column 2-5 into a matrix
rownames(mat_data) <- rnames ##attach row names to dataset
col_breaks = c(seq(-1,0,length=100), # for red
seq(0.001,0.08,length=100), # for yellow
seq(0.81,1,length=100))
my_palette <- colorRampPalette(c("red", "yellow", "green", "blue", "violet"))(n = 999)
pdf(file=sprintf("data/heatmap_stool.pdf"), width=9)
heatmap.2(mat_data, Rowv=FALSE, dendrogram = "none", col=my_palette, trace="none", density.info=c("none"), offsetRow=-45, key.title=NA, lhei=c(1.5,5), margins=c(5,5), adjCol=c(0.5, 1), srtCol=360)
mtext("Taxa", at=0.02, line=-4)
mtext("USD", side=2, at=1.05, line=1.5)
mtext("Healthy", side=4, at=1.05, line=-31)
dev.off()
data <- read.table("data/stone_relabun_heatmap3.txt", header=TRUE)
rnames <- data[,1] # assign labels in column 1 to "rnames"
mat_data <- data.matrix(data[,2:ncol(data)]) # transform column 2-5 into a matrix
rownames(mat_data) <- rnames ##attach row names to dataset
col_breaks = c(seq(-1,0,length=100), # for red
seq(0.001,0.08,length=100), # for yellow
seq(0.81,1,length=100))
my_palette <- colorRampPalette(c("red", "yellow", "green", "blue", "violet"))(n = 999)
mat_data <- cbind(mat_data, mat_data)
pdf(file=sprintf("data/heatmap_stone.pdf"), width=9)
heatmap.2(mat_data, Rowv=FALSE, dendrogram = "none", col=my_palette, trace="none", density.info=c("none"), offsetRow=-45, key.title=NA, lhei=c(1.5,5), margins=c(5,5), adjCol=c(0.5, 1), srtCol=360, labCol="")
mtext("OTU", at=0.02, line=-4)
mtext("Less Abundant", side=2, at=1.01, line=1.5)
mtext("More Abundant", side=4, at=1.01, line=-31)
dev.off()
|
\name{robCompositions-package}
\alias{robCompositions-package}
\alias{robCompositions}
\docType{package}
\title{
Robust Estimation for Compositional Data.
}
\description{
The package contains methods for imputation
of compositional data including robust methods, (robust) outlier detection for compositional data,
(robust) principal component
analysis for compositional data, (robust) factor analysis for compositional data, (robust) discriminant analysis
(Fisher rule) and (robust)
Anderson-Darling normality tests
for compositional data as well as popular log-ratio transformations (alr, clr, ilr, and their inverse transformations).
}
\details{
\tabular{ll}{
Package: \tab robCompositions\cr
Type: \tab Package\cr
Version: \tab 1.3.3\cr
Date: \tab 2009-11-28\cr
License: \tab GPL 2\cr
LazyLoad: \tab yes\cr
}
}
\author{
Matthias Templ, Peter Filzmoser, Karel Hron,
Maintainer: Matthias Templ <templ@tuwien.ac.at>
}
\references{
Aitchison, J. (1986) \emph{The Statistical Analysis of Compositional
Data} Monographs on Statistics and Applied Probability. Chapman \&
Hall Ltd., London (UK). 416p. \\
Filzmoser, P., and Hron, K. (2008)
Outlier detection for compositional data using robust methods. \emph{Math. Geosciences}, \bold{40} 233-248.
Filzmoser, P., Hron, K., Reimann, C. (2009)
Principal Component Analysis for Compositional Data with Outliers. \emph{Environmetrics}, \bold{20} (6), 621--632.
P. Filzmoser, K. Hron, C. Reimann, R. Garrett (2009): Robust Factor Analysis for Compositional Data.
\emph{Computers and Geosciences}, \bold{35} (9), 1854--1861.
Hron, K. and Templ, M. and Filzmoser, P. (2010) Imputation of missing values for compositional data using classical and robust methods
\emph{Computational Statistics and Data Analysis}, \bold{54} (12), 3095--3107.
C. Reimann, P. Filzmoser, R.G. Garrett, and R. Dutter (2008):
Statistical Data Analysis Explained.
\emph{Applied Environmental Statistics with R}.
John Wiley and Sons, Chichester, 2008.
}
\keyword{ package }
\examples{
## k nearest neighbor imputation
data(expenditures)
expenditures[1,3]
expenditures[1,3] <- NA
impKNNa(expenditures)$xImp[1,3]
## iterative model based imputation
data(expenditures)
x <- expenditures
x[1,3]
x[1,3] <- NA
xi <- impCoda(x)$xImp
xi[1,3]
s1 <- sum(x[1,-3])
impS <- sum(xi[1,-3])
xi[,3] * s1/impS
xi <- impKNNa(expenditures)
xi
summary(xi)
plot(xi, which=1)
plot(xi, which=2)
plot(xi, which=3)
## pca
data(expenditures)
p1 <- pcaCoDa(expenditures)
p1
plot(p1)
## outlier detection
data(expenditures)
oD <- outCoDa(expenditures)
oD
plot(oD)
## transformations
data(arcticLake)
x <- arcticLake
x.alr <- addLR(x, 2)
y <- addLRinv(x.alr)
addLRinv(addLR(x, 3))
data(expenditures)
x <- expenditures
y <- addLRinv(addLR(x, 5))
head(x)
head(y)
addLRinv(x.alr, ivar=2, useClassInfo=FALSE)
data(expenditures)
eclr <- cenLR(expenditures)
inveclr <- cenLRinv(eclr)
head(expenditures)
head(inveclr)
head(cenLRinv(eclr$x.clr))
require(MASS)
Sigma <- matrix(c(5.05,4.95,4.95,5.05), ncol=2, byrow=TRUE)
z <- isomLRinv(mvrnorm(100, mu=c(0,2), Sigma=Sigma))
}
| /man/robCompositions-package.Rd | no_license | Hasil-Sharma/robCompositions | R | false | false | 3,155 | rd | \name{robCompositions-package}
\alias{robCompositions-package}
\alias{robCompositions}
\docType{package}
\title{
Robust Estimation for Compositional Data.
}
\description{
The package contains methods for imputation
of compositional data including robust methods, (robust) outlier detection for compositional data,
(robust) principal component
analysis for compositional data, (robust) factor analysis for compositional data, (robust) discriminant analysis
(Fisher rule) and (robust)
Anderson-Darling normality tests
for compositional data as well as popular log-ratio transformations (alr, clr, ilr, and their inverse transformations).
}
\details{
\tabular{ll}{
Package: \tab robCompositions\cr
Type: \tab Package\cr
Version: \tab 1.3.3\cr
Date: \tab 2009-11-28\cr
License: \tab GPL 2\cr
LazyLoad: \tab yes\cr
}
}
\author{
Matthias Templ, Peter Filzmoser, Karel Hron,
Maintainer: Matthias Templ <templ@tuwien.ac.at>
}
\references{
Aitchison, J. (1986) \emph{The Statistical Analysis of Compositional
Data} Monographs on Statistics and Applied Probability. Chapman \&
Hall Ltd., London (UK). 416p. \\
Filzmoser, P., and Hron, K. (2008)
Outlier detection for compositional data using robust methods. \emph{Math. Geosciences}, \bold{40} 233-248.
Filzmoser, P., Hron, K., Reimann, C. (2009)
Principal Component Analysis for Compositional Data with Outliers. \emph{Environmetrics}, \bold{20} (6), 621--632.
P. Filzmoser, K. Hron, C. Reimann, R. Garrett (2009): Robust Factor Analysis for Compositional Data.
\emph{Computers and Geosciences}, \bold{35} (9), 1854--1861.
Hron, K. and Templ, M. and Filzmoser, P. (2010) Imputation of missing values for compositional data using classical and robust methods
\emph{Computational Statistics and Data Analysis}, \bold{54} (12), 3095--3107.
C. Reimann, P. Filzmoser, R.G. Garrett, and R. Dutter (2008):
Statistical Data Analysis Explained.
\emph{Applied Environmental Statistics with R}.
John Wiley and Sons, Chichester, 2008.
}
\keyword{ package }
\examples{
## k nearest neighbor imputation
data(expenditures)
expenditures[1,3]
expenditures[1,3] <- NA
impKNNa(expenditures)$xImp[1,3]
## iterative model based imputation
data(expenditures)
x <- expenditures
x[1,3]
x[1,3] <- NA
xi <- impCoda(x)$xImp
xi[1,3]
s1 <- sum(x[1,-3])
impS <- sum(xi[1,-3])
xi[,3] * s1/impS
xi <- impKNNa(expenditures)
xi
summary(xi)
plot(xi, which=1)
plot(xi, which=2)
plot(xi, which=3)
## pca
data(expenditures)
p1 <- pcaCoDa(expenditures)
p1
plot(p1)
## outlier detection
data(expenditures)
oD <- outCoDa(expenditures)
oD
plot(oD)
## transformations
data(arcticLake)
x <- arcticLake
x.alr <- addLR(x, 2)
y <- addLRinv(x.alr)
addLRinv(addLR(x, 3))
data(expenditures)
x <- expenditures
y <- addLRinv(addLR(x, 5))
head(x)
head(y)
addLRinv(x.alr, ivar=2, useClassInfo=FALSE)
data(expenditures)
eclr <- cenLR(expenditures)
inveclr <- cenLRinv(eclr)
head(expenditures)
head(inveclr)
head(cenLRinv(eclr$x.clr))
require(MASS)
Sigma <- matrix(c(5.05,4.95,4.95,5.05), ncol=2, byrow=TRUE)
z <- isomLRinv(mvrnorm(100, mu=c(0,2), Sigma=Sigma))
}
|
## name: res_eftu.r
## date: 11/18/2016
## Here I collect the residue name, number, membership of ras
load("/Users/hyangl/project/ras/results/2016/0715_ras_eftu/alignment/ali_raseftu.RData")
membership_eftu <- as.numeric(ali_raseftu["membership_eftu",ali_raseftu["membership_eftu",]!="0"])
community_eftu <- c("a1/b1-b3", "PL", "SI", "SII", "b4-b6", "a3", "a4", "a5", "L8","D2","D3")
for (i in 1:length(community_eftu)) { names(membership_eftu)[membership_eftu==i] <- community_eftu[i]}
# numeric vector and aa as name (e.g. "M" 72)
resno_eftu <- as.numeric(ali_raseftu["resno_eftu",ali_raseftu["membership_eftu",]!="0"])
names(resno_eftu) <- ali_raseftu["1TTT_A",ali_raseftu["membership_eftu",]!="0"]
# residue name + number format (e.g. "M72")
resnano_eftu <- paste0(names(resno_eftu), resno_eftu)
save(resno_eftu, resnano_eftu,
membership_eftu, community_eftu,
file = "res_eftu.RData")
| /info/res_eftu.r | no_license | Hongyang449/2016 | R | false | false | 910 | r | ## name: res_eftu.r
## date: 11/18/2016
## Here I collect the residue name, number, membership of ras
load("/Users/hyangl/project/ras/results/2016/0715_ras_eftu/alignment/ali_raseftu.RData")
membership_eftu <- as.numeric(ali_raseftu["membership_eftu",ali_raseftu["membership_eftu",]!="0"])
community_eftu <- c("a1/b1-b3", "PL", "SI", "SII", "b4-b6", "a3", "a4", "a5", "L8","D2","D3")
for (i in 1:length(community_eftu)) { names(membership_eftu)[membership_eftu==i] <- community_eftu[i]}
# numeric vector and aa as name (e.g. "M" 72)
resno_eftu <- as.numeric(ali_raseftu["resno_eftu",ali_raseftu["membership_eftu",]!="0"])
names(resno_eftu) <- ali_raseftu["1TTT_A",ali_raseftu["membership_eftu",]!="0"]
# residue name + number format (e.g. "M72")
resnano_eftu <- paste0(names(resno_eftu), resno_eftu)
save(resno_eftu, resnano_eftu,
membership_eftu, community_eftu,
file = "res_eftu.RData")
|
library(rockchalk)
set.seed(12345)
x1 <- rnorm(100)
x2 <- rnorm(100)
x3 <- rnorm(100)
x4 <- rnorm(100)
y <- rnorm(100)
y2 <- 0.03 + 0.1*x1 + 0.1*x2 + 0.25*x1*x2 + 0.4*x3 -0.1*x4 + 1*rnorm(100)
dat <- data.frame(x1,x2,x3,x4,y, y2)
rm(x1, x2, x3, x4, y, y2)
## linear ordinary regression
m1 <- lm(y ~ x1 + x2 +x3 + x4, data = dat)
plotPlane(m1, plotx1 = "x3", plotx2 = "x4")
plotPlane(m1, plotx1 = "x3", plotx2 = "x4", drawArrows = TRUE)
plotPlane(m1, plotx1 = "x1", plotx2 = "x4", drawArrows = TRUE)
plotPlane(m1, plotx1 = "x1", plotx2 = "x2", drawArrows = TRUE, npp = 10)
plotPlane(m1, plotx1 = "x3", plotx2 = "x2", drawArrows = TRUE, npp = 40)
plotPlane(m1, plotx1 = "x3", plotx2 = "x2", drawArrows = FALSE, npp = 5, ticktype = "detailed")
## regression with interaction
m2 <- lm(y ~ x1 * x2 +x3 + x4, data = dat)
plotPlane(m2, plotx1 = "x1", plotx2 = "x2", drawArrows = TRUE)
plotPlane(m2, plotx1 = "x1", plotx2 = "x4", drawArrows = TRUE)
plotPlane(m2, plotx1 = "x1", plotx2 = "x3", drawArrows = TRUE)
plotPlane(m2, plotx1 = "x1", plotx2 = "x2", drawArrows = TRUE, phi = 10, theta = 30)
## regression with quadratic;
## Required some fancy footwork in plotPlane, so be happy
dat$y3 <- 0 + 1 * dat$x1 + 2 * dat$x1^2 + 1 * dat$x2 + 0.4*dat$x3 + 8 * rnorm(100)
m3 <- lm(y3 ~ poly(x1,2) + x2 +x3 + x4, data = dat)
summary(m3)
plotPlane(m3, plotx1 = "x1", plotx2 = "x2", drawArrows = TRUE, x1lab = "my great predictor", x2lab = "a so-so predictor", ylab = "Most awesomest DV ever")
plotPlane(m3, plotx1 = "x1", plotx2 = "x2", drawArrows = TRUE, x1lab = "my great predictor", x2lab = "a so-so predictor", ylab = "Most awesomest DV ever", phi=-20)
plotPlane(m3, plotx1 = "x1", plotx2 = "x2", drawArrows = TRUE, phi = 10, theta = 30)
plotPlane(m3, plotx1 = "x1", plotx2 = "x4", drawArrows = TRUE, ticktype = "detailed")
plotPlane(m3, plotx1 = "x1", plotx2 = "x3", drawArrows = TRUE)
plotPlane(m3, plotx1 = "x1", plotx2 = "x2", drawArrows = TRUE, phi = 10, theta = 30)
m4 <- lm(y ~ sin(x1) + x2*x3 +x3 + x4, data = dat)
summary(m4)
plotPlane(m4, plotx1 = "x1", plotx2 = "x2", drawArrows = TRUE)
plotPlane(m4, plotx1 = "x1", plotx2 = "x3", drawArrows = TRUE)
eta3 <- 1.1 + .9*dat$x1 - .6*dat$x2 + .5*dat$x3
dat$y4 <- rbinom(100, size = 1, prob = exp( eta3)/(1+exp(eta3)))
gm1 <- glm(y4 ~ x1 + x2 + x3, data = dat, family = binomial(logit))
summary(gm1)
plotPlane(gm1, plotx1 = "x1", plotx2 = "x2")
plotPlane(gm1, plotx1 = "x1", plotx2 = "x2", phi = -10)
plotPlane(gm1, plotx1 = "x1", plotx2 = "x2", ticktype = "detailed")
plotPlane(gm1, plotx1 = "x1", plotx2 = "x2", ticktype = "detailed", npp = 30, theta = 30)
plotPlane(gm1, plotx1 = "x1", plotx2 = "x3", ticktype = "detailed", npp = 70, theta = 60)
plotPlane(gm1, plotx1 = "x1", plotx2 = "x2", ticktype = c("detailed"), npp = 50, theta = 40)
dat$x2 <- 5 * dat$x2
dat$x4 <- 10 * dat$x4
eta4 <- 0.1 + .15*dat$x1 - 0.1*dat$x2 + .25*dat$x3 + 0.1*dat$x4
dat$y4 <- rbinom(100, size = 1, prob = exp( eta4)/(1+exp(eta4)))
gm2 <- glm(y4 ~ x1 + x2 + x3 + x4, data = dat, family = binomial(logit))
summary(gm2)
plotPlane(gm2, plotx1 = "x1", plotx2 = "x2")
plotPlane(gm2, plotx1 = "x2", plotx2 = "x1")
plotPlane(gm2, plotx1 = "x1", plotx2 = "x2", phi = -10)
plotPlane(gm2, plotx1 = "x1", plotx2 = "x2", phi = 5, theta = 70, npp = 40)
plotPlane(gm2, plotx1 = "x1", plotx2 = "x2", ticktype = "detailed")
plotPlane(gm2, plotx1 = "x1", plotx2 = "x2", ticktype = "detailed", npp = 30, theta = -30)
plotPlane(gm2, plotx1 = "x1", plotx2 = "x3", ticktype = "detailed", npp = 70, theta = 60)
plotPlane(gm2, plotx1 = "x4", plotx2 = "x3", ticktype = "detailed", npp = 50, theta = 10)
plotPlane(gm2, plotx1 = "x1", plotx2 = "x2", ticktype = c("detailed"))
| /packages/rockchalk/rockchalk/inst/examples/plotPlane-ex.R | no_license | rgroup-crmda/rgroup-crmda | R | false | false | 3,713 | r | library(rockchalk)
set.seed(12345)
x1 <- rnorm(100)
x2 <- rnorm(100)
x3 <- rnorm(100)
x4 <- rnorm(100)
y <- rnorm(100)
y2 <- 0.03 + 0.1*x1 + 0.1*x2 + 0.25*x1*x2 + 0.4*x3 -0.1*x4 + 1*rnorm(100)
dat <- data.frame(x1,x2,x3,x4,y, y2)
rm(x1, x2, x3, x4, y, y2)
## linear ordinary regression
m1 <- lm(y ~ x1 + x2 +x3 + x4, data = dat)
plotPlane(m1, plotx1 = "x3", plotx2 = "x4")
plotPlane(m1, plotx1 = "x3", plotx2 = "x4", drawArrows = TRUE)
plotPlane(m1, plotx1 = "x1", plotx2 = "x4", drawArrows = TRUE)
plotPlane(m1, plotx1 = "x1", plotx2 = "x2", drawArrows = TRUE, npp = 10)
plotPlane(m1, plotx1 = "x3", plotx2 = "x2", drawArrows = TRUE, npp = 40)
plotPlane(m1, plotx1 = "x3", plotx2 = "x2", drawArrows = FALSE, npp = 5, ticktype = "detailed")
## regression with interaction
m2 <- lm(y ~ x1 * x2 +x3 + x4, data = dat)
plotPlane(m2, plotx1 = "x1", plotx2 = "x2", drawArrows = TRUE)
plotPlane(m2, plotx1 = "x1", plotx2 = "x4", drawArrows = TRUE)
plotPlane(m2, plotx1 = "x1", plotx2 = "x3", drawArrows = TRUE)
plotPlane(m2, plotx1 = "x1", plotx2 = "x2", drawArrows = TRUE, phi = 10, theta = 30)
## regression with quadratic;
## Required some fancy footwork in plotPlane, so be happy
dat$y3 <- 0 + 1 * dat$x1 + 2 * dat$x1^2 + 1 * dat$x2 + 0.4*dat$x3 + 8 * rnorm(100)
m3 <- lm(y3 ~ poly(x1,2) + x2 +x3 + x4, data = dat)
summary(m3)
plotPlane(m3, plotx1 = "x1", plotx2 = "x2", drawArrows = TRUE, x1lab = "my great predictor", x2lab = "a so-so predictor", ylab = "Most awesomest DV ever")
plotPlane(m3, plotx1 = "x1", plotx2 = "x2", drawArrows = TRUE, x1lab = "my great predictor", x2lab = "a so-so predictor", ylab = "Most awesomest DV ever", phi=-20)
plotPlane(m3, plotx1 = "x1", plotx2 = "x2", drawArrows = TRUE, phi = 10, theta = 30)
plotPlane(m3, plotx1 = "x1", plotx2 = "x4", drawArrows = TRUE, ticktype = "detailed")
plotPlane(m3, plotx1 = "x1", plotx2 = "x3", drawArrows = TRUE)
plotPlane(m3, plotx1 = "x1", plotx2 = "x2", drawArrows = TRUE, phi = 10, theta = 30)
m4 <- lm(y ~ sin(x1) + x2*x3 +x3 + x4, data = dat)
summary(m4)
plotPlane(m4, plotx1 = "x1", plotx2 = "x2", drawArrows = TRUE)
plotPlane(m4, plotx1 = "x1", plotx2 = "x3", drawArrows = TRUE)
eta3 <- 1.1 + .9*dat$x1 - .6*dat$x2 + .5*dat$x3
dat$y4 <- rbinom(100, size = 1, prob = exp( eta3)/(1+exp(eta3)))
gm1 <- glm(y4 ~ x1 + x2 + x3, data = dat, family = binomial(logit))
summary(gm1)
plotPlane(gm1, plotx1 = "x1", plotx2 = "x2")
plotPlane(gm1, plotx1 = "x1", plotx2 = "x2", phi = -10)
plotPlane(gm1, plotx1 = "x1", plotx2 = "x2", ticktype = "detailed")
plotPlane(gm1, plotx1 = "x1", plotx2 = "x2", ticktype = "detailed", npp = 30, theta = 30)
plotPlane(gm1, plotx1 = "x1", plotx2 = "x3", ticktype = "detailed", npp = 70, theta = 60)
plotPlane(gm1, plotx1 = "x1", plotx2 = "x2", ticktype = c("detailed"), npp = 50, theta = 40)
dat$x2 <- 5 * dat$x2
dat$x4 <- 10 * dat$x4
eta4 <- 0.1 + .15*dat$x1 - 0.1*dat$x2 + .25*dat$x3 + 0.1*dat$x4
dat$y4 <- rbinom(100, size = 1, prob = exp( eta4)/(1+exp(eta4)))
gm2 <- glm(y4 ~ x1 + x2 + x3 + x4, data = dat, family = binomial(logit))
summary(gm2)
plotPlane(gm2, plotx1 = "x1", plotx2 = "x2")
plotPlane(gm2, plotx1 = "x2", plotx2 = "x1")
plotPlane(gm2, plotx1 = "x1", plotx2 = "x2", phi = -10)
plotPlane(gm2, plotx1 = "x1", plotx2 = "x2", phi = 5, theta = 70, npp = 40)
plotPlane(gm2, plotx1 = "x1", plotx2 = "x2", ticktype = "detailed")
plotPlane(gm2, plotx1 = "x1", plotx2 = "x2", ticktype = "detailed", npp = 30, theta = -30)
plotPlane(gm2, plotx1 = "x1", plotx2 = "x3", ticktype = "detailed", npp = 70, theta = 60)
plotPlane(gm2, plotx1 = "x4", plotx2 = "x3", ticktype = "detailed", npp = 50, theta = 10)
plotPlane(gm2, plotx1 = "x1", plotx2 = "x2", ticktype = c("detailed"))
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Poverty status
if(year == 1996)
FYC <- FYC %>% rename(POVCAT96 = POVCAT)
FYC <- FYC %>%
mutate(poverty = recode_factor(POVCAT.yy., .default = "Missing", .missing = "Missing",
"1" = "Negative or poor",
"2" = "Near-poor",
"3" = "Low income",
"4" = "Middle income",
"5" = "High income"))
# Marital status
if(year == 1996){
FYC <- FYC %>%
mutate(MARRY42X = ifelse(MARRY2X <= 6, MARRY2X, MARRY2X-6),
MARRY31X = ifelse(MARRY1X <= 6, MARRY1X, MARRY1X-6))
}
FYC <- FYC %>%
mutate_at(vars(starts_with("MARRY")), funs(replace(., .< 0, NA))) %>%
mutate(married = coalesce(MARRY.yy.X, MARRY42X, MARRY31X)) %>%
mutate(married = recode_factor(married, .default = "Missing", .missing = "Missing",
"1" = "Married",
"2" = "Widowed",
"3" = "Divorced",
"4" = "Separated",
"5" = "Never married",
"6" = "Inapplicable (age < 16)"))
# Keep only needed variables from FYC
FYCsub <- FYC %>% select(married,poverty,ind, DUPERSID, PERWT.yy.F, VARSTR, VARPSU)
# Load event files
RX <- read.xport('C:/MEPS/.RX..ssp')
DVT <- read.xport('C:/MEPS/.DV..ssp')
IPT <- read.xport('C:/MEPS/.IP..ssp')
ERT <- read.xport('C:/MEPS/.ER..ssp')
OPT <- read.xport('C:/MEPS/.OP..ssp')
OBV <- read.xport('C:/MEPS/.OB..ssp')
HHT <- read.xport('C:/MEPS/.HH..ssp')
# Define sub-levels for office-based and outpatient
# To compute estimates for these sub-events, replace 'event' with 'event_v2X'
# in the 'svyby' statement below, when applicable
OBV <- OBV %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OBD', '2' = 'OBO'))
OPT <- OPT %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OPY', '2' = 'OPZ'))
# Stack events
stacked_events <- stack_events(RX, DVT, IPT, ERT, OPT, OBV, HHT,
keep.vars = c('SEEDOC','event_v2X'))
stacked_events <- stacked_events %>%
mutate(event = data,
PR.yy.X = PV.yy.X + TR.yy.X,
OZ.yy.X = OF.yy.X + SL.yy.X + OT.yy.X + OR.yy.X + OU.yy.X + WC.yy.X + VA.yy.X) %>%
select(DUPERSID, event, event_v2X, SEEDOC,
XP.yy.X, SF.yy.X, MR.yy.X, MD.yy.X, PR.yy.X, OZ.yy.X)
EVENTS <- stacked_events %>% full_join(FYCsub, by='DUPERSID')
EVNTdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = EVENTS,
nest = TRUE)
results <- svyby(~(XP.yy.X >= 0), FUN=svytotal, by = ~married + poverty, design = subset(EVNTdsgn, XP.yy.X >= 0))
print(results)
| /mepstrends/hc_use/json/code/r/totEVT__married__poverty__.r | permissive | RandomCriticalAnalysis/MEPS-summary-tables | R | false | false | 3,370 | r | # Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Poverty status
if(year == 1996)
FYC <- FYC %>% rename(POVCAT96 = POVCAT)
FYC <- FYC %>%
mutate(poverty = recode_factor(POVCAT.yy., .default = "Missing", .missing = "Missing",
"1" = "Negative or poor",
"2" = "Near-poor",
"3" = "Low income",
"4" = "Middle income",
"5" = "High income"))
# Marital status
if(year == 1996){
FYC <- FYC %>%
mutate(MARRY42X = ifelse(MARRY2X <= 6, MARRY2X, MARRY2X-6),
MARRY31X = ifelse(MARRY1X <= 6, MARRY1X, MARRY1X-6))
}
FYC <- FYC %>%
mutate_at(vars(starts_with("MARRY")), funs(replace(., .< 0, NA))) %>%
mutate(married = coalesce(MARRY.yy.X, MARRY42X, MARRY31X)) %>%
mutate(married = recode_factor(married, .default = "Missing", .missing = "Missing",
"1" = "Married",
"2" = "Widowed",
"3" = "Divorced",
"4" = "Separated",
"5" = "Never married",
"6" = "Inapplicable (age < 16)"))
# Keep only needed variables from FYC
FYCsub <- FYC %>% select(married,poverty,ind, DUPERSID, PERWT.yy.F, VARSTR, VARPSU)
# Load event files
RX <- read.xport('C:/MEPS/.RX..ssp')
DVT <- read.xport('C:/MEPS/.DV..ssp')
IPT <- read.xport('C:/MEPS/.IP..ssp')
ERT <- read.xport('C:/MEPS/.ER..ssp')
OPT <- read.xport('C:/MEPS/.OP..ssp')
OBV <- read.xport('C:/MEPS/.OB..ssp')
HHT <- read.xport('C:/MEPS/.HH..ssp')
# Define sub-levels for office-based and outpatient
# To compute estimates for these sub-events, replace 'event' with 'event_v2X'
# in the 'svyby' statement below, when applicable
OBV <- OBV %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OBD', '2' = 'OBO'))
OPT <- OPT %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OPY', '2' = 'OPZ'))
# Stack events
stacked_events <- stack_events(RX, DVT, IPT, ERT, OPT, OBV, HHT,
keep.vars = c('SEEDOC','event_v2X'))
stacked_events <- stacked_events %>%
mutate(event = data,
PR.yy.X = PV.yy.X + TR.yy.X,
OZ.yy.X = OF.yy.X + SL.yy.X + OT.yy.X + OR.yy.X + OU.yy.X + WC.yy.X + VA.yy.X) %>%
select(DUPERSID, event, event_v2X, SEEDOC,
XP.yy.X, SF.yy.X, MR.yy.X, MD.yy.X, PR.yy.X, OZ.yy.X)
EVENTS <- stacked_events %>% full_join(FYCsub, by='DUPERSID')
EVNTdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = EVENTS,
nest = TRUE)
results <- svyby(~(XP.yy.X >= 0), FUN=svytotal, by = ~married + poverty, design = subset(EVNTdsgn, XP.yy.X >= 0))
print(results)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DIDparams.R
\name{DIDparams}
\alias{DIDparams}
\title{DIDparams}
\usage{
DIDparams(
yname,
tname,
idname = NULL,
first.treat.name,
xformla = NULL,
data,
control.group,
weightsname = NULL,
alp = 0.05,
bstrap = T,
biters = 1000,
clustervars = NULL,
cband = T,
printdetails = TRUE,
pl = FALSE,
cores = 1,
estMethod = "dr",
panel = TRUE,
n = NULL,
nG = NULL,
nT = NULL,
tlist = NULL,
glist = NULL
)
}
\arguments{
\item{yname}{The name of the outcome variable}
\item{tname}{The name of the column containing the time periods}
\item{idname}{The individual (cross-sectional unit) id name}
\item{first.treat.name}{The name of the variable in \code{data} that
contains the first period when a particular observation is treated.
This should be a positive number for all observations in treated groups.
It defines which "group" a unit belongs to. It should be 0 for units
in the untreated group.}
\item{xformla}{A formula for the covariates to include in the
model. It should be of the form \code{~ X1 + X2}. Default
is NULL which is equivalent to \code{xformla=~1}. This is
used to create a matrix of covariates which is then passed
to the 2x2 DID estimator chosen in \code{estMethod}.}
\item{data}{The name of the data.frame that contains the data}
\item{control.group}{Which units to use the control group.
The default is "nevertreated" which sets the control group
to be the group of units that never participate in the
treatment. This group does not change across groups or
time periods. The other option is to set
\code{group="notyettreated"}. In this case, the control group
is set to the group of units that have not yet participated
in the treatment in that time period. This includes all
never treated units, but it includes additional units that
eventually participate in the treatment, but have not
participated yet.}
\item{weightsname}{The name of the column containing the sampling weights.
If not set, all observations have same weight.}
\item{alp}{the significance level, default is 0.05}
\item{bstrap}{Boolean for whether or not to compute standard errors using
the multiplier boostrap. If standard errors are clustered, then one
must set \code{bstrap=TRUE}. Default is \code{FALSE} and analytical
standard errors are reported.}
\item{biters}{The number of boostrap iterations to use. The default is 1000,
and this is only applicable if \code{bstrap=TRUE}.}
\item{clustervars}{A vector of variables to cluster on. At most, there
can be two variables (otherwise will throw an error) and one of these
must be the same as idname which allows for clustering at the individual
level.}
\item{cband}{Boolean for whether or not to compute a uniform confidence
band that covers all of the group-time average treatment effects
with fixed probability \code{1-alp}. In order to compute uniform confidence
bands, \code{bstrap} must also be set to \code{TRUE}. The default is
\code{FALSE} and the resulting standard errors will be pointwise.}
\item{printdetails}{Boolean for showing detailed results or not}
\item{pl}{Boolean for whether or not to use parallel processing
(not implemented yet)}
\item{cores}{The number of cores to use for parallel processing
(not implemented yet)}
\item{estMethod}{the method to compute group-time average treatment effects. The default is "dr" which uses the doubly robust
approach in the \code{DRDID} package. Other built-in methods
include "ipw" for inverse probability weighting and "reg" for
first step regression estimators. The user can also pass their
own function for estimating group time average treatment
effects. This should be a function
\code{f(Y1,Y0,treat,covariates)} where \code{Y1} is an
\code{n} x \code{1} vector of outcomes in the post-treatment
outcomes, \code{Y0} is an \code{n} x \code{1} vector of
pre-treatment outcomes, \code{treat} is a vector indicating
whether or not an individual participates in the treatment,
and \code{covariates} is an \code{n} x \code{k} matrix of
covariates. The function should return a list that includes
\code{ATT} (an estimated average treatment effect), and
\code{inf.func} (an \code{n} x \code{1} influence function).
The function can return other things as well, but these are
the only two that are required. \code{estMethod} is only used
if covariates are included.}
\item{panel}{Whether or not the data is a panel dataset.
The panel dataset should be provided in long format -- that
is, where each row corresponds to a unit observed at a
particular point in time. The default is TRUE. When
is using a panel dataset, the variable \code{idname} must
be set. When \code{panel=FALSE}, the data is treated
as repeated cross sections.}
\item{n}{The number of observations. This is equal to the
number of units (which may be different from the number
of rows in a panel dataset).}
\item{nG}{The number of groups}
\item{nT}{The number of time periods}
\item{tlist}{a vector containing each time period}
\item{glist}{a vector containing each group}
}
\description{
object to hold did parameters
}
| /man/DIDparams.Rd | no_license | Lingbing/did | R | false | true | 5,149 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DIDparams.R
\name{DIDparams}
\alias{DIDparams}
\title{DIDparams}
\usage{
DIDparams(
yname,
tname,
idname = NULL,
first.treat.name,
xformla = NULL,
data,
control.group,
weightsname = NULL,
alp = 0.05,
bstrap = T,
biters = 1000,
clustervars = NULL,
cband = T,
printdetails = TRUE,
pl = FALSE,
cores = 1,
estMethod = "dr",
panel = TRUE,
n = NULL,
nG = NULL,
nT = NULL,
tlist = NULL,
glist = NULL
)
}
\arguments{
\item{yname}{The name of the outcome variable}
\item{tname}{The name of the column containing the time periods}
\item{idname}{The individual (cross-sectional unit) id name}
\item{first.treat.name}{The name of the variable in \code{data} that
contains the first period when a particular observation is treated.
This should be a positive number for all observations in treated groups.
It defines which "group" a unit belongs to. It should be 0 for units
in the untreated group.}
\item{xformla}{A formula for the covariates to include in the
model. It should be of the form \code{~ X1 + X2}. Default
is NULL which is equivalent to \code{xformla=~1}. This is
used to create a matrix of covariates which is then passed
to the 2x2 DID estimator chosen in \code{estMethod}.}
\item{data}{The name of the data.frame that contains the data}
\item{control.group}{Which units to use the control group.
The default is "nevertreated" which sets the control group
to be the group of units that never participate in the
treatment. This group does not change across groups or
time periods. The other option is to set
\code{group="notyettreated"}. In this case, the control group
is set to the group of units that have not yet participated
in the treatment in that time period. This includes all
never treated units, but it includes additional units that
eventually participate in the treatment, but have not
participated yet.}
\item{weightsname}{The name of the column containing the sampling weights.
If not set, all observations have same weight.}
\item{alp}{the significance level, default is 0.05}
\item{bstrap}{Boolean for whether or not to compute standard errors using
the multiplier boostrap. If standard errors are clustered, then one
must set \code{bstrap=TRUE}. Default is \code{FALSE} and analytical
standard errors are reported.}
\item{biters}{The number of boostrap iterations to use. The default is 1000,
and this is only applicable if \code{bstrap=TRUE}.}
\item{clustervars}{A vector of variables to cluster on. At most, there
can be two variables (otherwise will throw an error) and one of these
must be the same as idname which allows for clustering at the individual
level.}
\item{cband}{Boolean for whether or not to compute a uniform confidence
band that covers all of the group-time average treatment effects
with fixed probability \code{1-alp}. In order to compute uniform confidence
bands, \code{bstrap} must also be set to \code{TRUE}. The default is
\code{FALSE} and the resulting standard errors will be pointwise.}
\item{printdetails}{Boolean for showing detailed results or not}
\item{pl}{Boolean for whether or not to use parallel processing
(not implemented yet)}
\item{cores}{The number of cores to use for parallel processing
(not implemented yet)}
\item{estMethod}{the method to compute group-time average treatment effects. The default is "dr" which uses the doubly robust
approach in the \code{DRDID} package. Other built-in methods
include "ipw" for inverse probability weighting and "reg" for
first step regression estimators. The user can also pass their
own function for estimating group time average treatment
effects. This should be a function
\code{f(Y1,Y0,treat,covariates)} where \code{Y1} is an
\code{n} x \code{1} vector of outcomes in the post-treatment
outcomes, \code{Y0} is an \code{n} x \code{1} vector of
pre-treatment outcomes, \code{treat} is a vector indicating
whether or not an individual participates in the treatment,
and \code{covariates} is an \code{n} x \code{k} matrix of
covariates. The function should return a list that includes
\code{ATT} (an estimated average treatment effect), and
\code{inf.func} (an \code{n} x \code{1} influence function).
The function can return other things as well, but these are
the only two that are required. \code{estMethod} is only used
if covariates are included.}
\item{panel}{Whether or not the data is a panel dataset.
The panel dataset should be provided in long format -- that
is, where each row corresponds to a unit observed at a
particular point in time. The default is TRUE. When
is using a panel dataset, the variable \code{idname} must
be set. When \code{panel=FALSE}, the data is treated
as repeated cross sections.}
\item{n}{The number of observations. This is equal to the
number of units (which may be different from the number
of rows in a panel dataset).}
\item{nG}{The number of groups}
\item{nT}{The number of time periods}
\item{tlist}{a vector containing each time period}
\item{glist}{a vector containing each group}
}
\description{
object to hold did parameters
}
|
#!/usr/bin/Rscript
options(scipen=100)
startTime <- Sys.time()
################ USE THE FOLLOWING FILES FROM PREVIOUS STEPS
# - script0: rna_geneList.Rdata
# - script0: pipeline_geneList.Rdata
# - script1: DE_topTable.Rdata
# - script1: DE_geneList.Rdata
# - script1: DE_rnaseqDT.Rdata
# - script8: all_obs_ratioDown.Rdata
# - script9: emp_pval_meanLogFC.Rdata
# - script10: emp_pval_meanCorr.Rdata
################################################################################
################ OUTPUT
# - emp_pval_combined.Rdata + tables
################################################################################
SSHFS <- F
setDir <- ifelse(SSHFS, "/media/electron", "")
args <- commandArgs(trailingOnly = TRUE)
stopifnot(length(args) == 1)
settingF <- args[1]
stopifnot(file.exists(settingF))
pipScriptDir <- paste0(setDir, "/mnt/ed4/marie/scripts/TAD_DE_pipeline_v2")
script0_name <- "0_prepGeneData"
script1_name <- "1_runGeneDE"
script8_name <- "8c_runAllDown"
script_name <- "14f5_cumulAllDown_limited_AUC_flexQuant"
stopifnot(file.exists(paste0(pipScriptDir, "/", script_name, ".R")))
cat(paste0("> START ", script_name, "\n"))
source("main_settings.R")
source(settingF)
source(paste0(pipScriptDir, "/", "TAD_DE_utils.R"))
suppressPackageStartupMessages(library(foreach, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE)) # error bar
suppressPackageStartupMessages(library(doMC, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE)) # error bar
suppressPackageStartupMessages(library(flux, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE)) # error bar
registerDoMC(ifelse(SSHFS,2, nCpu)) # loaded from main_settings.R
# create the directories
curr_outFold <- paste0(pipOutFold, "/", script_name)
system(paste0("mkdir -p ", curr_outFold))
pipLogFile <- paste0(pipOutFold, "/", format(Sys.time(), "%Y%d%m%H%M%S"),"_", script_name, "_logFile.txt")
system(paste0("rm -f ", pipLogFile))
# ADDED 27.11.2018 to check using other files
txt <- paste0("inputDataType\t=\t", inputDataType, "\n")
printAndLog(txt, pipLogFile)
txt <- paste0("gene2tadDT_file\t=\t", gene2tadDT_file, "\n")
printAndLog(txt, pipLogFile)
txt <- paste0("TADpos_file\t=\t", TADpos_file, "\n")
printAndLog(txt, pipLogFile)
txt <- paste0("settingF\t=\t", settingF, "\n")
printAndLog(txt, pipLogFile)
all_aucQuantThresh <- c(0.95)
# 14f: leg_xpos <- ifelse(observed_auc <= max(density_permut$x), observed_auc, max(density_permut$x))
# 14f2: leg_xpos <- observed_auc
################################****************************************************************************************
####################################################### PREPARE INPUT
################################****************************************************************************************
# INPUT DATA
gene2tadDT <- read.delim(gene2tadDT_file, header=F, col.names = c("entrezID", "chromo", "start", "end", "region"), stringsAsFactors = F)
gene2tadDT$entrezID <- as.character(gene2tadDT$entrezID)
# UPDATE SELECT THE GENES ACCORDING TO THE SETTINGS PREPARED IN 0_PREPGENEDATA
initList <- eval(parse(text = load(paste0(pipOutFold, "/", script0_name, "/rna_geneList.Rdata"))))
geneList <- eval(parse(text = load(paste0(pipOutFold, "/", script0_name, "/pipeline_geneList.Rdata"))))
txt <- paste0(toupper(script_name), "> Start with # genes: ", length(geneList), "/", length(initList), "\n")
printAndLog(txt, pipLogFile)
stopifnot(!any(duplicated(names(geneList))))
gene2tadDT <- gene2tadDT[gene2tadDT$entrezID %in% geneList,]
geneNbr <- setNames(as.numeric(table(gene2tadDT$region)), names(table(gene2tadDT$region)))
### SET OUTPUT
plotType <- "svg"
myHeight <- ifelse(plotType == "png", 480 , 7)
myWidth <- ifelse(plotType == "png", 600, 10)
# if plotSeparated == TRUE => 1 plot per ratio, otherwise all on the same figure (#x2 plots)
plotSeparated <- F
# "permThresh" the quantile of permutations to take is loaded from main_settings.R
###############
##### retrieve the direction of up/down
###############
# retrieve the direction of up/down
DE_topTable <- eval(parse(text = load(paste0(pipOutFold, "/", script1_name, "/DE_topTable.Rdata"))))
DE_geneList <- eval(parse(text = load(paste0(pipOutFold, "/", script1_name, "/DE_geneList.Rdata"))))
exprDT <- eval(parse(text = load(paste0(pipOutFold, "/", script1_name, "/DE_rnaseqDT.Rdata"))))
# samp1 <- eval(parse(text=load(paste0(setDir, "/", sample1_file))))
# samp2 <- eval(parse(text=load(paste0(setDir, "/", sample2_file))))
samp1 <- eval(parse(text=load(paste0(sample1_file))))
samp2 <- eval(parse(text=load(paste0(sample2_file))))
DE_topTable <- DE_topTable[DE_topTable$genes %in% names(DE_geneList),]
stopifnot(all(DE_topTable$genes %in% names(DE_geneList)))
stopifnot(!any(duplicated(names(DE_geneList))))
stopifnot(all(colnames(exprDT) %in% c(samp1, samp2)))
stopifnot(all(samp1 %in% colnames(exprDT)))
stopifnot(all(samp2 %in% colnames(exprDT)))
maxDownGene <- DE_topTable$genes[which.min(DE_topTable$logFC)]
stopifnot(maxDownGene %in% rownames(exprDT))
mean_expr1 <- mean(unlist(c(exprDT[maxDownGene, samp1])), na.rm=T)
mean_expr2 <- mean(unlist(c(exprDT[maxDownGene, samp2])), na.rm=T)
if(mean_expr1 > mean_expr2) {
subtitDir <- paste0("down: ", toupper(cond1), " > ", toupper(cond2))
} else{
subtitDir <- paste0("down: ", toupper(cond2), " > ", toupper(cond1))
}
if(! plotSeparated) {
nColPlot <- 2
# nRowPlot <- length(allDown_limited)*2/nColPlot
nRowPlot <- length(allDown_limited)*1/nColPlot
outFile <- paste0(curr_outFold, "/allRatios_cumsum_obs_permut.", plotType)
do.call(plotType, list(outFile, height=myHeight*nRowPlot, width=myWidth*nColPlot))
par(mfrow=c(nRowPlot, nColPlot))
}
################################****************************************************************************************
####################################################### ITERATE OVER RATIOS TO PLOT
################################****************************************************************************************
all_ratios_all_auc_values <- list()
for(curr_ratio_type in allDown_limited) {
cat(paste0("*** START ", curr_ratio_type, "\n"))
obs_curr_down <- eval(parse(text = load(paste0(pipOutFold, "/", script8_name, "/all_obs_", curr_ratio_type, ".Rdata"))))
permut_currDown <- eval(parse(text = load(paste0(pipOutFold, "/", script8_name, "/", curr_ratio_type, "_permDT.Rdata"))))
# ensure I used the same set of TADs for the permutation and for the calculation
# (NB: would also be possible to filter the obs_curr_down, but not the permut_currDown)
stopifnot(all(names(obs_curr_down) %in% rownames(permut_currDown)))
stopifnot(all(rownames(permut_currDown) %in% names(obs_curr_down)))
interReg <- intersect(names(obs_curr_down),rownames(permut_currDown) )
############################################################
############################################################ # filter the TADs and sort
############################################################
filter_obs_curr_down <- sort(obs_curr_down[interReg], decreasing = T)
filter_permut_currDown_unsort <- permut_currDown[interReg,]
stopifnot(length(filter_obs_curr_down) == nrow(filter_permut_currDown_unsort))
filter_permut_currDown <- apply(filter_permut_currDown_unsort, 2, sort, decreasing=T)
rownames(filter_permut_currDown) <- NULL
stopifnot(length(filter_obs_curr_down) == nrow(filter_permut_currDown_unsort))
# FOR ratioDown => plot ratioConcord, departure from 0.5
if(curr_ratio_type == "ratioDown") {
my_stat_curr_ratio <- "ratioDown_Concord"
departureFromValue <- 0.5
# => Concord, departure 0.5
# Transform ratioDown -> ratioConcord
# change so that the ratioDown ranges between 0.5 and 1 (-> e.g. treats 0.1 as 0.9)
# transf. ratioDown -> ratioConcord
filter_obs_curr_down_half <- abs(filter_obs_curr_down - 0.5) + 0.5
filter_permut_currDown_half <- abs(filter_permut_currDown - 0.5) + 0.5
} else if(curr_ratio_type == "rescWeightedQQ" | curr_ratio_type == "rescWeighted" ) {
my_stat_curr_ratio <- paste0(curr_ratio_type, "_Concord")
departureFromValue <- 0.5
# => Concord, departure 0.5
# Transform rescWeightedQQ -> rescWeightedQQConcord
# change so that the ratioDown ranges between 0.5 and 1 (-> e.g. treats 0.1 as 0.9)
# transf. ratioDown -> ratioConcord
filter_obs_curr_down_half <- abs(filter_obs_curr_down - 0.5) + 0.5
filter_permut_currDown_half <- abs(filter_permut_currDown - 0.5) + 0.5
} else if(curr_ratio_type == "prodSignedRatio") {
my_stat_curr_ratio <- "prodSignedRatio"
departureFromValue <- 0
# => raw (departure 0)
# prodSignedRatio -> does not need to be transformed
filter_obs_curr_down_half <- filter_obs_curr_down
filter_permut_currDown_half <- filter_permut_currDown
}
# PLOT THE 1ST PLOT
if(plotSeparated) {
outFile <- paste0(curr_outFold, "/", curr_ratio_type, "_departure05_cumsum_obs_permut.", plotType)
do.call(plotType, list(outFile, height=myHeight, width=myWidth))
}
all_auc_values <- plot_cumsumDiff05_withLines(observ_vect = filter_obs_curr_down_half,
permut_DT = filter_permut_currDown_half,
all_quantThresh = all_aucQuantThresh,
my_stat = my_stat_curr_ratio,
departureValue = departureFromValue,
drawline=TRUE)
mtext(subtitDir, font=3)
if(plotSeparated) {
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
}
# # PLOT THE 2ND PLOT
# if(plotSeparated){
# outFile <- paste0(curr_outFold, "/", curr_ratio_type, "_departure05_cumsum_obs_permut_AUC.", plotType)
# do.call(plotType, list(outFile, height=myHeight, width=myWidth))
# }
# plot_cumsumDiff05_AUC2(filter_obs_curr_down_half,
# filter_permut_currDown_half,
# my_stat = my_stat_curr_ratio,
# departureValue = departureFromValue)
# mtext(subtitDir, font=3)
# if(plotSeparated){
# foo <- dev.off()
# cat(paste0("... written: ", outFile, "\n"))
all_ratios_all_auc_values[[curr_ratio_type]] <- all_auc_values
# }
}
if(!plotSeparated){
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
}
outFile <- file.path(curr_outFold, "all_ratios_all_auc_values.Rdata")
save(all_ratios_all_auc_values, file = outFile)
cat(paste0("... written: ", outFile, "\n"))
txt <- paste0(startTime, "\n", Sys.time(), "\n")
printAndLog(txt, pipLogFile)
cat(paste0("*** DONE: ", script_name, "\n"))
| /14f5_cumulAllDown_limited_AUC_flexQuant.R | no_license | marzuf/TAD_DE_pipeline_v2 | R | false | false | 11,108 | r | #!/usr/bin/Rscript
options(scipen=100)
startTime <- Sys.time()
################ USE THE FOLLOWING FILES FROM PREVIOUS STEPS
# - script0: rna_geneList.Rdata
# - script0: pipeline_geneList.Rdata
# - script1: DE_topTable.Rdata
# - script1: DE_geneList.Rdata
# - script1: DE_rnaseqDT.Rdata
# - script8: all_obs_ratioDown.Rdata
# - script9: emp_pval_meanLogFC.Rdata
# - script10: emp_pval_meanCorr.Rdata
################################################################################
################ OUTPUT
# - emp_pval_combined.Rdata + tables
################################################################################
SSHFS <- F
setDir <- ifelse(SSHFS, "/media/electron", "")
args <- commandArgs(trailingOnly = TRUE)
stopifnot(length(args) == 1)
settingF <- args[1]
stopifnot(file.exists(settingF))
pipScriptDir <- paste0(setDir, "/mnt/ed4/marie/scripts/TAD_DE_pipeline_v2")
script0_name <- "0_prepGeneData"
script1_name <- "1_runGeneDE"
script8_name <- "8c_runAllDown"
script_name <- "14f5_cumulAllDown_limited_AUC_flexQuant"
stopifnot(file.exists(paste0(pipScriptDir, "/", script_name, ".R")))
cat(paste0("> START ", script_name, "\n"))
source("main_settings.R")
source(settingF)
source(paste0(pipScriptDir, "/", "TAD_DE_utils.R"))
suppressPackageStartupMessages(library(foreach, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE)) # error bar
suppressPackageStartupMessages(library(doMC, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE)) # error bar
suppressPackageStartupMessages(library(flux, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE)) # error bar
registerDoMC(ifelse(SSHFS,2, nCpu)) # loaded from main_settings.R
# create the directories
curr_outFold <- paste0(pipOutFold, "/", script_name)
system(paste0("mkdir -p ", curr_outFold))
pipLogFile <- paste0(pipOutFold, "/", format(Sys.time(), "%Y%d%m%H%M%S"),"_", script_name, "_logFile.txt")
system(paste0("rm -f ", pipLogFile))
# ADDED 27.11.2018 to check using other files
txt <- paste0("inputDataType\t=\t", inputDataType, "\n")
printAndLog(txt, pipLogFile)
txt <- paste0("gene2tadDT_file\t=\t", gene2tadDT_file, "\n")
printAndLog(txt, pipLogFile)
txt <- paste0("TADpos_file\t=\t", TADpos_file, "\n")
printAndLog(txt, pipLogFile)
txt <- paste0("settingF\t=\t", settingF, "\n")
printAndLog(txt, pipLogFile)
all_aucQuantThresh <- c(0.95)
# 14f: leg_xpos <- ifelse(observed_auc <= max(density_permut$x), observed_auc, max(density_permut$x))
# 14f2: leg_xpos <- observed_auc
################################****************************************************************************************
####################################################### PREPARE INPUT
################################****************************************************************************************
# INPUT DATA
gene2tadDT <- read.delim(gene2tadDT_file, header=F, col.names = c("entrezID", "chromo", "start", "end", "region"), stringsAsFactors = F)
gene2tadDT$entrezID <- as.character(gene2tadDT$entrezID)
# UPDATE SELECT THE GENES ACCORDING TO THE SETTINGS PREPARED IN 0_PREPGENEDATA
initList <- eval(parse(text = load(paste0(pipOutFold, "/", script0_name, "/rna_geneList.Rdata"))))
geneList <- eval(parse(text = load(paste0(pipOutFold, "/", script0_name, "/pipeline_geneList.Rdata"))))
txt <- paste0(toupper(script_name), "> Start with # genes: ", length(geneList), "/", length(initList), "\n")
printAndLog(txt, pipLogFile)
stopifnot(!any(duplicated(names(geneList))))
gene2tadDT <- gene2tadDT[gene2tadDT$entrezID %in% geneList,]
geneNbr <- setNames(as.numeric(table(gene2tadDT$region)), names(table(gene2tadDT$region)))
### SET OUTPUT
plotType <- "svg"
myHeight <- ifelse(plotType == "png", 480 , 7)
myWidth <- ifelse(plotType == "png", 600, 10)
# if plotSeparated == TRUE => 1 plot per ratio, otherwise all on the same figure (#x2 plots)
plotSeparated <- F
# "permThresh" the quantile of permutations to take is loaded from main_settings.R
###############
##### retrieve the direction of up/down
###############
# retrieve the direction of up/down
DE_topTable <- eval(parse(text = load(paste0(pipOutFold, "/", script1_name, "/DE_topTable.Rdata"))))
DE_geneList <- eval(parse(text = load(paste0(pipOutFold, "/", script1_name, "/DE_geneList.Rdata"))))
exprDT <- eval(parse(text = load(paste0(pipOutFold, "/", script1_name, "/DE_rnaseqDT.Rdata"))))
# samp1 <- eval(parse(text=load(paste0(setDir, "/", sample1_file))))
# samp2 <- eval(parse(text=load(paste0(setDir, "/", sample2_file))))
samp1 <- eval(parse(text=load(paste0(sample1_file))))
samp2 <- eval(parse(text=load(paste0(sample2_file))))
DE_topTable <- DE_topTable[DE_topTable$genes %in% names(DE_geneList),]
stopifnot(all(DE_topTable$genes %in% names(DE_geneList)))
stopifnot(!any(duplicated(names(DE_geneList))))
stopifnot(all(colnames(exprDT) %in% c(samp1, samp2)))
stopifnot(all(samp1 %in% colnames(exprDT)))
stopifnot(all(samp2 %in% colnames(exprDT)))
maxDownGene <- DE_topTable$genes[which.min(DE_topTable$logFC)]
stopifnot(maxDownGene %in% rownames(exprDT))
mean_expr1 <- mean(unlist(c(exprDT[maxDownGene, samp1])), na.rm=T)
mean_expr2 <- mean(unlist(c(exprDT[maxDownGene, samp2])), na.rm=T)
if(mean_expr1 > mean_expr2) {
subtitDir <- paste0("down: ", toupper(cond1), " > ", toupper(cond2))
} else{
subtitDir <- paste0("down: ", toupper(cond2), " > ", toupper(cond1))
}
if(! plotSeparated) {
nColPlot <- 2
# nRowPlot <- length(allDown_limited)*2/nColPlot
nRowPlot <- length(allDown_limited)*1/nColPlot
outFile <- paste0(curr_outFold, "/allRatios_cumsum_obs_permut.", plotType)
do.call(plotType, list(outFile, height=myHeight*nRowPlot, width=myWidth*nColPlot))
par(mfrow=c(nRowPlot, nColPlot))
}
################################****************************************************************************************
####################################################### ITERATE OVER RATIOS TO PLOT
################################****************************************************************************************
all_ratios_all_auc_values <- list()
for(curr_ratio_type in allDown_limited) {
cat(paste0("*** START ", curr_ratio_type, "\n"))
obs_curr_down <- eval(parse(text = load(paste0(pipOutFold, "/", script8_name, "/all_obs_", curr_ratio_type, ".Rdata"))))
permut_currDown <- eval(parse(text = load(paste0(pipOutFold, "/", script8_name, "/", curr_ratio_type, "_permDT.Rdata"))))
# ensure I used the same set of TADs for the permutation and for the calculation
# (NB: would also be possible to filter the obs_curr_down, but not the permut_currDown)
stopifnot(all(names(obs_curr_down) %in% rownames(permut_currDown)))
stopifnot(all(rownames(permut_currDown) %in% names(obs_curr_down)))
interReg <- intersect(names(obs_curr_down),rownames(permut_currDown) )
############################################################
############################################################ # filter the TADs and sort
############################################################
filter_obs_curr_down <- sort(obs_curr_down[interReg], decreasing = T)
filter_permut_currDown_unsort <- permut_currDown[interReg,]
stopifnot(length(filter_obs_curr_down) == nrow(filter_permut_currDown_unsort))
filter_permut_currDown <- apply(filter_permut_currDown_unsort, 2, sort, decreasing=T)
rownames(filter_permut_currDown) <- NULL
stopifnot(length(filter_obs_curr_down) == nrow(filter_permut_currDown_unsort))
# FOR ratioDown => plot ratioConcord, departure from 0.5
if(curr_ratio_type == "ratioDown") {
my_stat_curr_ratio <- "ratioDown_Concord"
departureFromValue <- 0.5
# => Concord, departure 0.5
# Transform ratioDown -> ratioConcord
# change so that the ratioDown ranges between 0.5 and 1 (-> e.g. treats 0.1 as 0.9)
# transf. ratioDown -> ratioConcord
filter_obs_curr_down_half <- abs(filter_obs_curr_down - 0.5) + 0.5
filter_permut_currDown_half <- abs(filter_permut_currDown - 0.5) + 0.5
} else if(curr_ratio_type == "rescWeightedQQ" | curr_ratio_type == "rescWeighted" ) {
my_stat_curr_ratio <- paste0(curr_ratio_type, "_Concord")
departureFromValue <- 0.5
# => Concord, departure 0.5
# Transform rescWeightedQQ -> rescWeightedQQConcord
# change so that the ratioDown ranges between 0.5 and 1 (-> e.g. treats 0.1 as 0.9)
# transf. ratioDown -> ratioConcord
filter_obs_curr_down_half <- abs(filter_obs_curr_down - 0.5) + 0.5
filter_permut_currDown_half <- abs(filter_permut_currDown - 0.5) + 0.5
} else if(curr_ratio_type == "prodSignedRatio") {
my_stat_curr_ratio <- "prodSignedRatio"
departureFromValue <- 0
# => raw (departure 0)
# prodSignedRatio -> does not need to be transformed
filter_obs_curr_down_half <- filter_obs_curr_down
filter_permut_currDown_half <- filter_permut_currDown
}
# PLOT THE 1ST PLOT
if(plotSeparated) {
outFile <- paste0(curr_outFold, "/", curr_ratio_type, "_departure05_cumsum_obs_permut.", plotType)
do.call(plotType, list(outFile, height=myHeight, width=myWidth))
}
all_auc_values <- plot_cumsumDiff05_withLines(observ_vect = filter_obs_curr_down_half,
permut_DT = filter_permut_currDown_half,
all_quantThresh = all_aucQuantThresh,
my_stat = my_stat_curr_ratio,
departureValue = departureFromValue,
drawline=TRUE)
mtext(subtitDir, font=3)
if(plotSeparated) {
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
}
# # PLOT THE 2ND PLOT
# if(plotSeparated){
# outFile <- paste0(curr_outFold, "/", curr_ratio_type, "_departure05_cumsum_obs_permut_AUC.", plotType)
# do.call(plotType, list(outFile, height=myHeight, width=myWidth))
# }
# plot_cumsumDiff05_AUC2(filter_obs_curr_down_half,
# filter_permut_currDown_half,
# my_stat = my_stat_curr_ratio,
# departureValue = departureFromValue)
# mtext(subtitDir, font=3)
# if(plotSeparated){
# foo <- dev.off()
# cat(paste0("... written: ", outFile, "\n"))
all_ratios_all_auc_values[[curr_ratio_type]] <- all_auc_values
# }
}
if(!plotSeparated){
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
}
outFile <- file.path(curr_outFold, "all_ratios_all_auc_values.Rdata")
save(all_ratios_all_auc_values, file = outFile)
cat(paste0("... written: ", outFile, "\n"))
txt <- paste0(startTime, "\n", Sys.time(), "\n")
printAndLog(txt, pipLogFile)
cat(paste0("*** DONE: ", script_name, "\n"))
|
E001TM53.R GISS Model E gas 06/00
WARNING: The boundary conditions used here may not be what you want
and no tuning has yet been done.
Please check and see before running
E001TM53: 53 layer 4x5 model sample tracers
Air mass, SF6, RN222, CO2, 14CO2, CFC-11, CH4, N2O, linearizedO3
Preprocessor Options
#define TRACERS_ON ! include tracers code
#define TRACERS_SPECIAL_Lerner
End Preprocessor Options
Object modules: (in order of decreasing priority)
RES_M53 ! horiz/vert resolution
MODEL_COM GEOM_B IORSF ! model variables and geometry
MODELE ! Main and model overhead
PARAM PARSER ! parameter database
ATMDYN_COM ATMDYN MOMEN2ND ! atmospheric dynamics
STRATDYN STRAT_DIAG ! strospheric dynamics (incl. gw drag)
QUS_COM QUSDEF QUS_DRV ! advection of tracers
TQUS_DRV ! advection of Q
TRACER_COM TRACERS_DRV ! configurable tracer code
TRACERS ! generic tracer code
TRDIAG_COM TRACER_PRT ! tracer diagnostic printout
! use next line if #define TRACERS_SPECIAL_Lerner
TRACER_SPECIAL_Lerner ! routines called when TRACERS_SPECIAL_Lerner is activated
CLOUDS CLOUDS_DRV CLOUDS_COM ! clouds modules
SURFACE FLUXES ! surface calculation and fluxes
GHY_COM GHY_DRV GHY ! land surface and soils
PBL_COM PBL_DRV PBL ! atmospheric pbl
! pick exactly one of the next 2 choices: ATURB or DRYCNV
! ATURB ! turbulence in whole atmosphere
DRYCNV ! drycnv
LAKES_COM LAKES ! lake modules
SEAICE SEAICE_DRV ! seaice modules
LANDICE LANDICE_DRV ! land ice modules
OCEAN OCNML ! ocean modules
ICEDYN_DRV ICEDYN ! ice dynamics modules
SNOW_DRV SNOW ! snow model
RAD_COM RAD_DRV RADIATION ! radiation modules
DIAG_COM DIAG DEFACC DIAG_PRT ! diagnostics
CONST FFT72 UTILDBL SYSTEM ! utilities
POUT ! post-processing output
Data input files:
AIC=AIC.RES_M53.D771201
GIC=GIC.rsfB357M12.1DEC1956.1.ext ! initial conditions (ground)
! OHT=OTSPEC.RB399AM12.M250D ! not needed if KOCEAN=0
OCNML=Z1O.B4X5.cor ! needed only for postprocessing
OSST=OST4X5.B.1946-55avg.Hadl1.1 SICE=SICE4X5.B.1946-55avg.Hadl1.1 ! ocn
CDN=CD4X500S VEG=V72X46.1.cor2
SOIL=S4X50093 TOPO=Z72X46N.cor4 ! bdy.cond
REG=REG4X5 ! special regions-diag
RVR=RD4X525.RVR ! river direction file
ZVAR=ZVAR4X5 ! topographic variation for gwdrag
RADN1=sgpgxg.table8 ! rad.tables
RADN2=kdist33.tautab8
RADN3=miescatpar.abcdv
RADN4=o3Prather1979-80.London1957-70
RADN5=TROAER.1875-1990.Jun2002
RADN6=dust8.tau9x8x13
RADN7=STRATAER.VOL.1850-1999.Apr02
RADN8=cloud.epsilon4.72x46
! RADN9=solar.lean99.uvflux ! need KSOLAR<2
RADN9=solar.lean02.ann.uvflux ! need KSOLAR=2
RADNA=O3.1850-2050.depl.rec ! with recovery of O3 after 2000
! RADNA=O3.1850-2050.depl.con ! O3 'constant' after 2000
RADNB=o3WangJacob.1890.1979
RADNE=topcld.trscat8
GHG=GHG.1850-2050.Mar2002
dH2O=dH2O_by_CH4_monthly
TOP_INDEX=top_index_72x46.ij
CO2_IC=CO2ijl_IC_Jan1_scale334_M23 !wofsy+B140TQaM9
CO2_FOS_FUEL=CO2_sources/gcm_data/CO2FOS_MRL_4X5
CO2_FERT=CO2_sources/gcm_data/CO2fert01_4X5
CO2_REGROWTH=CO2_sources/gcm_data/CO2_Nforest_4X5
CO2_LAND_USE=CO2_sources/gcm_data/CO2DEF_HOU_4X5
CO2_VEG=CO2_sources/gcm_data/CO2VEG_MON_4X5 ! Monthly source
CO2_OCEAN=CO2_sources/gcm_data/CO2_4X5_Ocean_flux02 ! Monthly source
14CO2_IC_DATA=workshop.14co2 ! for 14CO2 Oct. 1963
LINOZ_TABLE=chemtab_solfb_31aug01.txt ! linoz O3 coefficients
N2O_TABLE=N2Oloss.table ! Stratosphere tracer forcing
CFC11_TABLE=F11loss.table ! Stratosphere tracer forcing
CH4_TABLE=CH4chem.table ! Stratosphere tracer forcing
CH4_TROP_FRQ=CLIM.RUN.OHCH4.FRQ !tropo loss frequency table (9 layers, n-grid)
N2O_IC=N2O_Shindell_Jan9293_M23 !initial conditions
CH4_IC=Wofsy_data_CH4 !wofsy jl initial conditions
CH4_ANIMALS=methane/gcm_data/CH4ANIMLS_4X5 ! Annual
CH4_COALMINE=methane/gcm_data/CH4COAL_4X5 ! Annual
CH4_GASLEAK=methane/gcm_data/CH4GASLEAK_4X5 ! Annual
CH4_GASVENT=methane/gcm_data/CH4GASVENT_4X5 ! Annual
CH4_CITYDUMP=methane/gcm_data/CH4MSW_4X5 ! Annual
CH4_SOIL_ABS=methane/gcm_data/CH4SOILABS_4X5 ! Annual
CH4_TERMITES=methane/gcm_data/CH4TRMITE_4X5 ! Annual
CH4_COALBURN=methane/gcm_data/COAL_BURN_BY_POP84_4X5 ! Annual
CH4_BURN=methane/gcm_data/CH4BURN_4X5 ! Monthly
CH4_RICE=methane/gcm_data/CH4RICEC_4X5 ! Monthly
CH4_WETL=methane/gcm_data/CH4WETL+TUNDRA_4X5 ! Monthly
Label and Namelist:
E001TM53 (4x5, 53 layer model, sample tracers)
R=00BG/B
&&PARAMETERS
X_SDRAG=.0005,.00005 ! used for lin. sdrag above P_SDRAG mb
C_SDRAG=0. ! no constant sdrag
P_SDRAG=.1 ! lin. sdrag above .1mb (top 2 layers) except near poles
PP_SDRAG=1. ! lin. sdrag above 1.mb near poles (top 4 layers)
ANG_SDRAG=1 ! if =1: sdrag conserves ang mom.
KOCEAN=0
U00ice=.85 ! tune this first to get reas.alb/cldcvr (range: .4-.6), then
HRMAX=400. ! tune this to get rad.equilibrium (range: 100.-1500. meters)
H2ObyCH4=1. ! activates strat.H2O generated by CH4
KVEGA6=3 ! 6-band albedo (Schramm)
KSOLAR=2
LMCM=26 ! max level of moist convection
XCDNST=300.,10000. ! strat. gw drag parameters
DT=180., ! from default: DTsrc=3600.,
dt_UVfilter=180.,
NIsurf=4, ! number of surface time steps
NSUBDD=0 ! saving sub-daily diags
Kvflxo=0 ! saving VFLXO (daily)
KCOPY=2 ! saving acc + rsf
isccp_diags=0
DEFTHRESH=0.00003 ! deformation threshold (default = 15d-6)
PBREAK=200. ! p level for breaking gravity waves
CDEF=3. ! parameter for GW DEF drag
CMTN=.5 ! parameter for GW MTN drag
CMC=0.0000002 ! parameter for GW Moist Convective drag
to_volume_MixRat=1,1,1,1,1,1,1,1,1 ! for tracer printout
&&END_PARAMETERS
&INPUTZ
YEARI=1950,MONTHI=1,DATEI=1,HOURI=0, ! from default: IYEAR1=YEARI
YEARE=1956,MONTHE=1,DATEE=1,HOURE=0, KDIAG=0,2,2,9*0,
YEARE=1950,MONTHE=2,
ISTART=2,IRANDI=0, YEARE=1950,MONTHE=1,HOURE=1,IWRITE=1,JWRITE=1,
&END
| /source/model/E001TM53.R | permissive | Climostatistics/giss_model_e | R | false | false | 6,556 | r | E001TM53.R GISS Model E gas 06/00
WARNING: The boundary conditions used here may not be what you want
and no tuning has yet been done.
Please check and see before running
E001TM53: 53 layer 4x5 model sample tracers
Air mass, SF6, RN222, CO2, 14CO2, CFC-11, CH4, N2O, linearizedO3
Preprocessor Options
#define TRACERS_ON ! include tracers code
#define TRACERS_SPECIAL_Lerner
End Preprocessor Options
Object modules: (in order of decreasing priority)
RES_M53 ! horiz/vert resolution
MODEL_COM GEOM_B IORSF ! model variables and geometry
MODELE ! Main and model overhead
PARAM PARSER ! parameter database
ATMDYN_COM ATMDYN MOMEN2ND ! atmospheric dynamics
STRATDYN STRAT_DIAG ! strospheric dynamics (incl. gw drag)
QUS_COM QUSDEF QUS_DRV ! advection of tracers
TQUS_DRV ! advection of Q
TRACER_COM TRACERS_DRV ! configurable tracer code
TRACERS ! generic tracer code
TRDIAG_COM TRACER_PRT ! tracer diagnostic printout
! use next line if #define TRACERS_SPECIAL_Lerner
TRACER_SPECIAL_Lerner ! routines called when TRACERS_SPECIAL_Lerner is activated
CLOUDS CLOUDS_DRV CLOUDS_COM ! clouds modules
SURFACE FLUXES ! surface calculation and fluxes
GHY_COM GHY_DRV GHY ! land surface and soils
PBL_COM PBL_DRV PBL ! atmospheric pbl
! pick exactly one of the next 2 choices: ATURB or DRYCNV
! ATURB ! turbulence in whole atmosphere
DRYCNV ! drycnv
LAKES_COM LAKES ! lake modules
SEAICE SEAICE_DRV ! seaice modules
LANDICE LANDICE_DRV ! land ice modules
OCEAN OCNML ! ocean modules
ICEDYN_DRV ICEDYN ! ice dynamics modules
SNOW_DRV SNOW ! snow model
RAD_COM RAD_DRV RADIATION ! radiation modules
DIAG_COM DIAG DEFACC DIAG_PRT ! diagnostics
CONST FFT72 UTILDBL SYSTEM ! utilities
POUT ! post-processing output
Data input files:
AIC=AIC.RES_M53.D771201
GIC=GIC.rsfB357M12.1DEC1956.1.ext ! initial conditions (ground)
! OHT=OTSPEC.RB399AM12.M250D ! not needed if KOCEAN=0
OCNML=Z1O.B4X5.cor ! needed only for postprocessing
OSST=OST4X5.B.1946-55avg.Hadl1.1 SICE=SICE4X5.B.1946-55avg.Hadl1.1 ! ocn
CDN=CD4X500S VEG=V72X46.1.cor2
SOIL=S4X50093 TOPO=Z72X46N.cor4 ! bdy.cond
REG=REG4X5 ! special regions-diag
RVR=RD4X525.RVR ! river direction file
ZVAR=ZVAR4X5 ! topographic variation for gwdrag
RADN1=sgpgxg.table8 ! rad.tables
RADN2=kdist33.tautab8
RADN3=miescatpar.abcdv
RADN4=o3Prather1979-80.London1957-70
RADN5=TROAER.1875-1990.Jun2002
RADN6=dust8.tau9x8x13
RADN7=STRATAER.VOL.1850-1999.Apr02
RADN8=cloud.epsilon4.72x46
! RADN9=solar.lean99.uvflux ! need KSOLAR<2
RADN9=solar.lean02.ann.uvflux ! need KSOLAR=2
RADNA=O3.1850-2050.depl.rec ! with recovery of O3 after 2000
! RADNA=O3.1850-2050.depl.con ! O3 'constant' after 2000
RADNB=o3WangJacob.1890.1979
RADNE=topcld.trscat8
GHG=GHG.1850-2050.Mar2002
dH2O=dH2O_by_CH4_monthly
TOP_INDEX=top_index_72x46.ij
CO2_IC=CO2ijl_IC_Jan1_scale334_M23 !wofsy+B140TQaM9
CO2_FOS_FUEL=CO2_sources/gcm_data/CO2FOS_MRL_4X5
CO2_FERT=CO2_sources/gcm_data/CO2fert01_4X5
CO2_REGROWTH=CO2_sources/gcm_data/CO2_Nforest_4X5
CO2_LAND_USE=CO2_sources/gcm_data/CO2DEF_HOU_4X5
CO2_VEG=CO2_sources/gcm_data/CO2VEG_MON_4X5 ! Monthly source
CO2_OCEAN=CO2_sources/gcm_data/CO2_4X5_Ocean_flux02 ! Monthly source
14CO2_IC_DATA=workshop.14co2 ! for 14CO2 Oct. 1963
LINOZ_TABLE=chemtab_solfb_31aug01.txt ! linoz O3 coefficients
N2O_TABLE=N2Oloss.table ! Stratosphere tracer forcing
CFC11_TABLE=F11loss.table ! Stratosphere tracer forcing
CH4_TABLE=CH4chem.table ! Stratosphere tracer forcing
CH4_TROP_FRQ=CLIM.RUN.OHCH4.FRQ !tropo loss frequency table (9 layers, n-grid)
N2O_IC=N2O_Shindell_Jan9293_M23 !initial conditions
CH4_IC=Wofsy_data_CH4 !wofsy jl initial conditions
CH4_ANIMALS=methane/gcm_data/CH4ANIMLS_4X5 ! Annual
CH4_COALMINE=methane/gcm_data/CH4COAL_4X5 ! Annual
CH4_GASLEAK=methane/gcm_data/CH4GASLEAK_4X5 ! Annual
CH4_GASVENT=methane/gcm_data/CH4GASVENT_4X5 ! Annual
CH4_CITYDUMP=methane/gcm_data/CH4MSW_4X5 ! Annual
CH4_SOIL_ABS=methane/gcm_data/CH4SOILABS_4X5 ! Annual
CH4_TERMITES=methane/gcm_data/CH4TRMITE_4X5 ! Annual
CH4_COALBURN=methane/gcm_data/COAL_BURN_BY_POP84_4X5 ! Annual
CH4_BURN=methane/gcm_data/CH4BURN_4X5 ! Monthly
CH4_RICE=methane/gcm_data/CH4RICEC_4X5 ! Monthly
CH4_WETL=methane/gcm_data/CH4WETL+TUNDRA_4X5 ! Monthly
Label and Namelist:
E001TM53 (4x5, 53 layer model, sample tracers)
R=00BG/B
&&PARAMETERS
X_SDRAG=.0005,.00005 ! used for lin. sdrag above P_SDRAG mb
C_SDRAG=0. ! no constant sdrag
P_SDRAG=.1 ! lin. sdrag above .1mb (top 2 layers) except near poles
PP_SDRAG=1. ! lin. sdrag above 1.mb near poles (top 4 layers)
ANG_SDRAG=1 ! if =1: sdrag conserves ang mom.
KOCEAN=0
U00ice=.85 ! tune this first to get reas.alb/cldcvr (range: .4-.6), then
HRMAX=400. ! tune this to get rad.equilibrium (range: 100.-1500. meters)
H2ObyCH4=1. ! activates strat.H2O generated by CH4
KVEGA6=3 ! 6-band albedo (Schramm)
KSOLAR=2
LMCM=26 ! max level of moist convection
XCDNST=300.,10000. ! strat. gw drag parameters
DT=180., ! from default: DTsrc=3600.,
dt_UVfilter=180.,
NIsurf=4, ! number of surface time steps
NSUBDD=0 ! saving sub-daily diags
Kvflxo=0 ! saving VFLXO (daily)
KCOPY=2 ! saving acc + rsf
isccp_diags=0
DEFTHRESH=0.00003 ! deformation threshold (default = 15d-6)
PBREAK=200. ! p level for breaking gravity waves
CDEF=3. ! parameter for GW DEF drag
CMTN=.5 ! parameter for GW MTN drag
CMC=0.0000002 ! parameter for GW Moist Convective drag
to_volume_MixRat=1,1,1,1,1,1,1,1,1 ! for tracer printout
&&END_PARAMETERS
&INPUTZ
YEARI=1950,MONTHI=1,DATEI=1,HOURI=0, ! from default: IYEAR1=YEARI
YEARE=1956,MONTHE=1,DATEE=1,HOURE=0, KDIAG=0,2,2,9*0,
YEARE=1950,MONTHE=2,
ISTART=2,IRANDI=0, YEARE=1950,MONTHE=1,HOURE=1,IWRITE=1,JWRITE=1,
&END
|
dc_compact <- function(l) Filter(Negate(is.null), l)
dc_base <- function() "search.datacite.org"
dc_oai_base <- function() "https://oai.datacite.org/oai"
pluck <- function(x, name, type) {
if (missing(type)) {
lapply(x, "[[", name)
} else {
vapply(x, "[[", name, FUN.VALUE = type)
}
}
last <- function(x) x[length(x)][[1]]
strextract <- function(str, pattern) regmatches(str, regexpr(pattern, str))
make_dc_conn <- function(proxy) {
solrium::SolrClient$new(host = dc_base(),
path = "api", scheme = "https", port = NULL, errors = "complete",
proxy = proxy)
}
| /R/zzz.r | permissive | shivam11/rdatacite | R | false | false | 585 | r | dc_compact <- function(l) Filter(Negate(is.null), l)
dc_base <- function() "search.datacite.org"
dc_oai_base <- function() "https://oai.datacite.org/oai"
pluck <- function(x, name, type) {
if (missing(type)) {
lapply(x, "[[", name)
} else {
vapply(x, "[[", name, FUN.VALUE = type)
}
}
last <- function(x) x[length(x)][[1]]
strextract <- function(str, pattern) regmatches(str, regexpr(pattern, str))
make_dc_conn <- function(proxy) {
solrium::SolrClient$new(host = dc_base(),
path = "api", scheme = "https", port = NULL, errors = "complete",
proxy = proxy)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/timespans.r, R/intervals.r
\docType{methods}
\name{time_length}
\alias{time_length}
\alias{time_length,Interval-method}
\title{Compute the exact length of a time span}
\usage{
time_length(x, unit = "second")
\S4method{time_length}{Interval}(x, unit = "second")
}
\arguments{
\item{x}{a duration, period, difftime or interval}
\item{unit}{a character string that specifies with time units to use}
}
\value{
the length of the interval in the specified unit. A negative number
connotes a negative interval or duration
}
\description{
Compute the exact length of a time span
}
\details{
When \code{x} is an \linkS4class{Interval} object and
\code{unit} are years or months, \code{time_length()} takes into account
the fact that all months and years don't have the same number of days.
When \code{x} is a \linkS4class{Duration}, \linkS4class{Period}
or \code{\link[=difftime]{difftime()}} object, length in months or years is based on their
most common lengths in seconds (see \code{\link[=timespan]{timespan()}}).
}
\examples{
int <- interval(ymd("1980-01-01"), ymd("2014-09-18"))
time_length(int, "week")
# Exact age
time_length(int, "year")
# Age at last anniversary
trunc(time_length(int, "year"))
# Example of difference between intervals and durations
int <- interval(ymd("1900-01-01"), ymd("1999-12-31"))
time_length(int, "year")
time_length(as.duration(int), "year")
}
\seealso{
\code{\link[=timespan]{timespan()}}
}
\keyword{chron}
\keyword{math}
\keyword{methods}
\keyword{period}
| /man/time_length.Rd | no_license | lizl90/lubridate | R | false | true | 1,570 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/timespans.r, R/intervals.r
\docType{methods}
\name{time_length}
\alias{time_length}
\alias{time_length,Interval-method}
\title{Compute the exact length of a time span}
\usage{
time_length(x, unit = "second")
\S4method{time_length}{Interval}(x, unit = "second")
}
\arguments{
\item{x}{a duration, period, difftime or interval}
\item{unit}{a character string that specifies with time units to use}
}
\value{
the length of the interval in the specified unit. A negative number
connotes a negative interval or duration
}
\description{
Compute the exact length of a time span
}
\details{
When \code{x} is an \linkS4class{Interval} object and
\code{unit} are years or months, \code{time_length()} takes into account
the fact that all months and years don't have the same number of days.
When \code{x} is a \linkS4class{Duration}, \linkS4class{Period}
or \code{\link[=difftime]{difftime()}} object, length in months or years is based on their
most common lengths in seconds (see \code{\link[=timespan]{timespan()}}).
}
\examples{
int <- interval(ymd("1980-01-01"), ymd("2014-09-18"))
time_length(int, "week")
# Exact age
time_length(int, "year")
# Age at last anniversary
trunc(time_length(int, "year"))
# Example of difference between intervals and durations
int <- interval(ymd("1900-01-01"), ymd("1999-12-31"))
time_length(int, "year")
time_length(as.duration(int), "year")
}
\seealso{
\code{\link[=timespan]{timespan()}}
}
\keyword{chron}
\keyword{math}
\keyword{methods}
\keyword{period}
|
# User Interface
# Corsica Team App
# Last edited 1-26-2016
# Manny
shinyUI(navbarPage("Teams", id = "tab", inverse = F, windowTitle = "Corsica | Teams",
tabPanel("Team Stats", value = "stats",
# Formatting
tags$head(tags$style(".container-fluid {font-size: 13px; color: #2B547E; background-color: #E8E8E8;}")),
tags$head(tags$style(".rightAlign{float:right;}")),
tags$style(type = "text/css", ".shiny-output-error {visibility: hidden;}", ".shiny-output-error:before { visibility: hidden;}"),
tags$style(".navbar-default {background-color: #4863A0; border-color: #ffffff;}"),
tags$style(".navbar-default .navbar-nav li a {background-color: #4863A0; color: #ffffff;}"),
tags$style(".navbar-default .navbar-nav .active a {background-color: #ffffff; color: #4863A0;}"),
tags$style(".navbar-default .navbar-brand {background-color: #4863A0; color: #ffffff;}"),
tags$style(".dataTable thead tr {background-color: #4863A0; color: #ffffff;}"),
# Header text
fluidRow(
column(6, h2("Team Stats")),
column(6, tags$div(class = "rightAlign", checked = NA, tags$a(href = "http://www.corsica.hockey/", target = "_parent", tags$h2("Corsica ↩", style = "color: #2B547E;"))))
),
# Help text
fluidRow(
column(6, helpText("Loading the data may take a few seconds. Thanks for your patience.")),
column(6, helpText("", class = "rightAlign"))
),
# Input row 1
fluidRow(
column(2, uiOutput("s1")),
column(2, selectInput("strength", "Strength State", choices = c("All", "5v5", "5v4", "4v5", "4v4", "5v3", "3v5", "3v3", "4v3", "3v4"), selected = "5v5")),
column(2, selectInput("venue", "Venue", choices = c("Any", "Home", "Away"), selected = "Any")),
column(3, selectInput("adjust", "Adjustment", choices = c("None", "Score and Venue", "Score, Zone and Venue"), selected = "None")),
column(2, selectInput("type", "Season Type", choices = c("Regular", "Playoffs", "Both"), selected = "Regular"))
),
fluidRow(
column(2, uiOutput("s2")),
column(2, selectInput("score", "Score State", choices = c("Any", "Leading", "Trailing", "Even", "+3", "+2", "+1", "-1", "-2", "-3"), selected = "Any")),
column(2, selectInput("report", "Report", choices = c("On-Ice", "Context", "Counts"), selected = "On-Ice")),
column(2, downloadButton("dl", "Download File")),
column(3, checkboxInput("aggregate", "Aggregate Seasons", value = TRUE))
),
# Table output
DT::dataTableOutput("t1")
)
)) | /corsicateam/ui.R | no_license | NlIceD/shiny-server | R | false | false | 3,651 | r | # User Interface
# Corsica Team App
# Last edited 1-26-2016
# Manny
shinyUI(navbarPage("Teams", id = "tab", inverse = F, windowTitle = "Corsica | Teams",
tabPanel("Team Stats", value = "stats",
# Formatting
tags$head(tags$style(".container-fluid {font-size: 13px; color: #2B547E; background-color: #E8E8E8;}")),
tags$head(tags$style(".rightAlign{float:right;}")),
tags$style(type = "text/css", ".shiny-output-error {visibility: hidden;}", ".shiny-output-error:before { visibility: hidden;}"),
tags$style(".navbar-default {background-color: #4863A0; border-color: #ffffff;}"),
tags$style(".navbar-default .navbar-nav li a {background-color: #4863A0; color: #ffffff;}"),
tags$style(".navbar-default .navbar-nav .active a {background-color: #ffffff; color: #4863A0;}"),
tags$style(".navbar-default .navbar-brand {background-color: #4863A0; color: #ffffff;}"),
tags$style(".dataTable thead tr {background-color: #4863A0; color: #ffffff;}"),
# Header text
fluidRow(
column(6, h2("Team Stats")),
column(6, tags$div(class = "rightAlign", checked = NA, tags$a(href = "http://www.corsica.hockey/", target = "_parent", tags$h2("Corsica ↩", style = "color: #2B547E;"))))
),
# Help text
fluidRow(
column(6, helpText("Loading the data may take a few seconds. Thanks for your patience.")),
column(6, helpText("", class = "rightAlign"))
),
# Input row 1
fluidRow(
column(2, uiOutput("s1")),
column(2, selectInput("strength", "Strength State", choices = c("All", "5v5", "5v4", "4v5", "4v4", "5v3", "3v5", "3v3", "4v3", "3v4"), selected = "5v5")),
column(2, selectInput("venue", "Venue", choices = c("Any", "Home", "Away"), selected = "Any")),
column(3, selectInput("adjust", "Adjustment", choices = c("None", "Score and Venue", "Score, Zone and Venue"), selected = "None")),
column(2, selectInput("type", "Season Type", choices = c("Regular", "Playoffs", "Both"), selected = "Regular"))
),
fluidRow(
column(2, uiOutput("s2")),
column(2, selectInput("score", "Score State", choices = c("Any", "Leading", "Trailing", "Even", "+3", "+2", "+1", "-1", "-2", "-3"), selected = "Any")),
column(2, selectInput("report", "Report", choices = c("On-Ice", "Context", "Counts"), selected = "On-Ice")),
column(2, downloadButton("dl", "Download File")),
column(3, checkboxInput("aggregate", "Aggregate Seasons", value = TRUE))
),
# Table output
DT::dataTableOutput("t1")
)
)) |
#' Causal Mediation Analysis for Multiple Outcome/Treatment/Mediator
#' Combinations
#'
#' 'mediations' can be used to process a set of outcome/treatment/mediator
#' combinations through the \code{\link{mediate}} function to produce a series
#' of causal mediation analysis results.
#'
#' @details This function processes multiple treatment/mediators/outcome
#' variable combinations to produce a collected set of output ready for
#' analysis or graphing. In principle, this is a function designed to
#' facilitate running causal mediation analyses on multiple models that share
#' the same basic specification (i.e. the types of parametric models and the
#' set of pre-treatment covariates) except the treatment, mediator and outcome
#' variables can differ across specifications. The function works by looping
#' over a set of data frames that are pre-loaded into the workspace. Each one
#' of these data frames has a specific treatment variable that is used for
#' analysis with that data frame. Then the code runs causal mediation analysis
#' via \code{\link{mediate}} on every combination of the treatment, mediator,
#' and outcomes specified in these arguments. This allows the users to explore
#' whether different mediators transmit the effect of the treatment variable
#' on a variety of outcome variables. A single set of pre-treatment control
#' variables can be specified in 'covariates', which will be used throughout.
#'
#' The 'mediations' function can be used with either multiple mediators and a
#' single outcome, a single mediator and multiple outcomes, or multiple
#' mediators and outcomes. For example, with three different treatments, user
#' will create three different data frames, each containing a treatment
#' variable. In addition, if there are also four different mediators, each of
#' these will be contained in each data frame, along with the outcome
#' variable. The function will estimate all of the combinations of treatment
#' variables and mediators instead of separate lines of code being written for
#' each one.
#'
#' Individual elements of the output list (see "Value") may be passed through
#' \code{\link[=summary.mediate]{summary}} and
#' \code{\link[=plot.mediate]{plot}} for tabular and graphical summaries of
#' the results. Alternatively, the entire output may be directly passed to
#' \code{\link[=summary.mediations]{summary}} or
#' \code{\link[=plot.mediations]{plot}} for all results to be inspected.
#'
#' The default value of 'covariates' is 'NULL' and no covariate will be
#' included in either mediator or outcome models without a custom value. It
#' should be noted that users typically should have pre-treatment covariates
#' to make the sequential ignorability assumption more plausible.
#'
#' There are several limitations to the code. First, it works only with a
#' subset of the model types that will be accommodated if 'mediate' is used
#' individually (see the 'families' argument above for details). Second, one
#' cannot specify separate sets of covariates for different
#' treatment/mediator/outcome combinations. Users should use 'mediate'
#' separately for individual models if more flexibility is required in their
#' specific applications.
#'
#' @param datasets a named list of data frames. Each data frame has a separate
#' treatment variable. The names of each data frame must begin with the exact
#' name of the treatment variable that is contained in that dataset (see
#' example below).
#' @param treatment a vector of character strings indicating the names of the
#' treatment variables, with length equal to the length of 'datasets'. Each
#' treatment variable must be included in the data frame listed in the same
#' position of list 'datasets' and its name must match the first part of the
#' corresponding data frame.
#' @param mediators a vector of character strings indicating the names of the
#' mediators contained within each data frame. All of the mediators will be
#' used with each treatment variable and hence must be included in each data
#' frame of 'datasets'.
#' @param outcome a vector of character strings indicating the names of the
#' outcome variables contained within each data frame. All of the outcomes
#' will be used with each treatment variable and must be in each data frame.
#' @param covariates a character string representing the set of pre-treatment
#' covariate names (as they appear in the data frame) to be included in each
#' model. The value must take the form of standard model formula, with each
#' additive component separated by "+", etc. (see example below). All
#' covariates must be in each data frame. Default is 'NULL'.
#' @param families a vector of length two specifying the types of the mediator
#' and outcome models. Currently only supports "gaussian" (for linear
#' regression), "binomial" (for binary probit), "oprobit" (for ordered probit)
#' and "quantile" (for quantile regression, see 'tau'). For the outcome the
#' tobit model ("tobit") is also available in addition to the mediator model
#' options.
#' @param tau.m a numeric value specifying the quantile to be used for a
#' quantile regression for the mediator model. Only relevant if the first
#' element of 'families' is "quantile". See \code{rq}.
#' @param tau.y a numeric value specifying the quantile to be used for a
#' quantile regression for the outcome model. Only relevant if the second
#' element of 'families' is "quantile". See \code{rq}.
#' @param LowerY a numeric value indicating the lower bound for the tobit
#' outcome model. See \code{tobit}.
#' @param UpperY a numeric value indicating the upper bound for the tobit
#' outcome model. See \code{tobit}.
#' @param interaction a logical value indicating whether the treatment and
#' mediator variables should be interacted. This will apply to applications of
#' \code{\link{mediate}} to all the treatment/mediator/outcome combinations.
#' @param conf.level confidence level used in each application of the
#' \code{\link{mediate}} function.
#' @param sims an integer indicating the desired number of simulations for
#' inference. This will apply to all applications of 'mediate' to all the
#' treatment/mediator/outcome combinations.
#' @param boot a logical value, indicating whether or not nonparametric
#' bootstrap should be used in each \code{\link{mediate}} application.
#' @param weights a single valued vector of a character string indicating a
#' weight variable to be used in all model fitting.
#' @param ... other arguments passed to \code{\link{mediate}}, such as
#' 'robustSE', 'dropobs', etc.
#'
#' @return An object of class "mediations" (or "mediations.order" if the outcome
#' model is ordered probit), a list of "mediate" ("mediate.order") objects
#' produced by applications of \code{\link{mediate}} for the specified
#' treatment/mediator/outcome combinations. The elements are named based on
#' the names of the outcome, treatment, and mediator variables, each separated
#' by a "." (see example below).
#'
#' @author Dustin Tingley, Harvard University,
#' \email{dtingley@@gov.harvard.edu}; Teppei Yamamoto, Massachusetts Institute
#' of Technology, \email{teppei@@mit.edu}.
#'
#' @seealso \code{\link{mediate}}, \code{\link{summary.mediations}},
#' \code{\link{plot.mediations}}, \code{rq}, \code{tobit}.
#'
#' @export
#' @examples
#'
#' \dontrun{
#' # Hypothetical example
#'
#' datasets <- list(T1 = T1, T2 = T2)
#' # List of data frames corresponding to the two different treatment variables
#' #"T1vsCont" and "T2vsCont".
#' # Each data set has its respective treatment variable.
#'
#' mediators <- c("M1", "M2")
#' # Vector of mediator names, all included in each data frame.
#'
#' outcome <- c("Ycont1","Ycont2")
#' # Vector of outcome variable names, again all included in each data frame.
#'
#' treatment <- c("T1vsCont", "T2vsCont")
#' # Vector of treatment variables names; must begin with identical strings with dataset
#' # names in 'datasets'.
#'
#' covariates <- c("X1 + X2")
#' # Set of covariates (in each data set), entered using the standard model formula format.
#'
#' x <- mediations(datasets, treatment, mediators, outcome, covariates,
#' families=c("gaussian","gaussian"), interaction=FALSE,
#' conf.level=.90, sims=50)
#' # Runs 'mediate' iteratively for each variable combinations, with 'lm' on both mediator
#' # and outcome model.
#'
#' summary(x) # tabular summary of results for all model combinations
#' plot(x) # graphical summary of results for all model combinations at once
#'
#' plot(x$Ycont1.T1vsCont.M1)
#' # Individual 'mediate' outputs are stored as list elements and can be
#' # accessed using the usual "$" operator.
#' }
#'
mediations <- function(datasets, treatment, mediators, outcome,
covariates=NULL, families=c("gaussian", "gaussian"),
tau.m=.5, tau.y=.5, LowerY=NULL, UpperY=NULL,
interaction=FALSE, conf.level=.95, sims=500,
boot=FALSE, weights=NULL, ...) {
data <- names(datasets)
labels <- c()
out <- list()
count <- 1
weight.storage <- weights
for (i in 1:length(treatment)) {
d1 <- sprintf("datasets$%s", data[i])
dataarg <- eval(parse(text=d1))
for (o in 1:length(outcome)) {
for (j in 1:length(mediators)) {
# create model formulas
if(is.null(covariates)) {
f1 <- sprintf("%s ~ %s ", mediators[j], treatment[i])
if (interaction) {
f2 <- sprintf("%s ~ %s * %s", outcome[o], treatment[i], mediators[j])
} else {
f2 <- sprintf("%s ~ %s + %s", outcome[o], treatment[i], mediators[j])
}
} else {
f1 <- sprintf("%s ~ %s + %s", mediators[j], treatment[i], covariates)
if (interaction) {
f2 <- sprintf("%s ~ %s * %s + %s", outcome[o], treatment[i],
mediators[j], covariates)
} else {
f2 <- sprintf("%s ~ %s + %s + %s", outcome[o], treatment[i],
mediators[j], covariates)
}
}
if(!is.null(weights)) {
weight1 <- sprintf("dataarg$%s", weights)
weight <- as.data.frame(eval(parse(text=weight1)))
} else {
dataarg$weight <- weight <- rep(1,nrow(dataarg))
}
# run Mediator model using new data/specification
if(families[1] == "binomial") {
result1 <- glm(f1, family=binomial("probit"), weights=weight,
data=dataarg)
} else if(families[1] == "quantile") {
if(!is.null(weights)) {
stop("Weights not supported with quantile regression")
} else {
result1 <- quantreg::rq(f1, data=dataarg, tau=tau.m)
}
} else if(families[1] == "oprobit") {
result1 <- polr(f1, method = "probit", weights=weight, data=dataarg, Hess=TRUE)
} else if (families[1] == "gaussian") {
result1 <- glm(f1, family="gaussian", weights=weight, data=dataarg)
} else {
stop("mediations does not support this model for the mediator")
}
# run Outcome model using new data/specification
if(families[2] == "binomial") {
result2 <- glm(f2, family=binomial("probit"), weights=weight,
data=dataarg)
} else if(families[2] == "quantile") {
if(!is.null(weights)) {
stop("Weights not supported with quantile regression")
} else {
result2 <- quantreg::rq(f2, data=dataarg, tau=tau.y)
}
} else if(families[2] == "tobit") {
result2 <- VGAM::vglm(f2, VGAM::tobit(Lower=LowerY,Upper=UpperY), weights=weight,
data=dataarg, model=TRUE)
} else if(families[2]== "oprobit"){
result2 <- polr(f2, method = "probit", weights=weight, data=dataarg, Hess=TRUE)
} else if(families[2]== "gaussian"){
result2 <- glm(f2, family="gaussian", weights=weight, data=dataarg)
} else {
print("mediations does not support this model for the outcome")
}
if(is.null(weight.storage)){
out[[(count)]] <- mediate(result1, result2, sims=sims,
treat=treatment[i], mediator=mediators[j],
conf.level=conf.level, boot=boot, ...)
} else {
out[[(count)]] <- mediate(result1, result2, sims=sims,
treat=treatment[i], mediator=mediators[j],
conf.level=conf.level, boot=boot, ...)
weights <- weight.storage
}
rm(result1, result2)
labels[(count)] <- sprintf("%s.%s.%s", outcome[o],treatment[i], mediators[j])
count <- count + 1
}
}
if(!is.null(weight.storage)){
weights <- weight.storage
}
}
names(out) <- labels
if(families[2]== "oprobit") {
class(out) <- "mediations.order"
} else {
class(out) <- "mediations"
}
out
}
#' Plotting Indirect, Direct, and Total Effects from Multiple Mediation Analyses
#'
#' Function to plot results from multiple causal mediation analyses conducted
#' via the \code{\link{mediations}} funciton. Output is a series of plots
#' generated via \code{\link{plot.mediate}} for each treatment/mediator/outcome
#' combination specified in the input 'mediations' object.
#'
#' @aliases plot.mediations plot.mediations.order
#'
#' @param x output from the mediations function.
#' @param which subset of names(x), indicating which model combinations to be
#' plotted. Default is to plot all.
#' @param ask logical. If 'TRUE', the user is asked for input before a new
#' figure is plotted. Default is to ask only if the number of plots on
#' current screen is fewer the number implied by 'which'.
#' @param ... arguments passed to the \code{\link{plot.mediate}} function for
#' individual plots.
#'
#' @return \code{mediations} returns an object of class \code{mediations}. The
#' function \code{summary} is used to obtain a table of the results. The plot
#' function instead plots these quantities. All additional parameters desired
#' for the plotting of an output from \code{mediate} can be passed through.
#'
#' @author Dustin Tingley, Harvard University,
#' \email{dtingley@@gov.harvard.edu}; Teppei Yamamoto, Massachusetts Institute
#' of Technology, \email{teppei@@mit.edu}.
#'
#' @seealso \code{\link{mediations}}, \code{\link{plot.mediate}},
#' \code{\link{plot}}.
#'
#' @export
plot.mediations <- function(x, which = names(x),
ask = prod(par("mfcol")) < length(which) && dev.interactive(), ...){
if (ask) {
oask <- devAskNewPage(TRUE)
on.exit(devAskNewPage(oask))
}
for(i in 1:length(which)){
plot.mediate(x[[i]], xlab = which[i], ...)
}
}
#' @export
plot.mediations.order <- function(x, which = names(x),
ask = prod(par("mfcol")) < length(which) && dev.interactive(), ...){
if (ask) {
oask <- devAskNewPage(TRUE)
on.exit(devAskNewPage(oask))
}
for(i in 1:length(which)){
plot.mediate.order(x[[i]], xlab = which[i], ...)
}
}
#' Summarizing Output from Multiple Mediation Analyses
#'
#' The 'summary.mediations' function produces a summary of results from multiple
#' causal analyses conducted via \code{\link{mediations}}. Output is a series
#' of \code{\link{summary.mediate}} outputs for all the
#' treatment/mediator/outcome combinations used in the input 'mediations'
#' object.
#'
#' @aliases summary.mediations summary.mediations.order print.summary.mediations
#' print.summary.mediations.order
#'
#' @param object output from mediations function.
#' @param x output from summary.mediations function.
#' @param ... additional arguments affecting the summary produced.
#'
#' @author Dustin Tingley, Harvard University,
#' \email{dtingley@@gov.harvard.edu}; Teppei Yamamoto, Massachusetts Institute
#' of Technology, \email{teppei@@mit.edu}.
#'
#' @seealso \code{\link{mediations}}, \code{\link{summary.mediate}},
#' \code{\link{summary}}.
#'
#' @export
summary.mediations <- function(object, ...){
structure(object, class = c("summary.mediations", class(object)))
}
#' @rdname summary.mediations
#' @export
print.summary.mediations <- function(x, ...){
name.list <- names(x)
for(i in 1:length(name.list)){
cat("Specification", name.list[i], "\n")
print(summary.mediate(x[[i]]))
}
}
#' @export
summary.mediations.order <- function(object, ...){
structure(object, class = c("summary.mediations.order", class(object)))
}
#' @export
print.summary.mediations.order <- function(x, ...){
name.list <- names(x)
for(i in 1:length(name.list)){
cat("Specification", name.list[i], "\n")
print(summary.mediate.order(x[[i]]) )
}
}
| /R/mediations.R | no_license | kosukeimai/mediation | R | false | false | 18,078 | r | #' Causal Mediation Analysis for Multiple Outcome/Treatment/Mediator
#' Combinations
#'
#' 'mediations' can be used to process a set of outcome/treatment/mediator
#' combinations through the \code{\link{mediate}} function to produce a series
#' of causal mediation analysis results.
#'
#' @details This function processes multiple treatment/mediators/outcome
#' variable combinations to produce a collected set of output ready for
#' analysis or graphing. In principle, this is a function designed to
#' facilitate running causal mediation analyses on multiple models that share
#' the same basic specification (i.e. the types of parametric models and the
#' set of pre-treatment covariates) except the treatment, mediator and outcome
#' variables can differ across specifications. The function works by looping
#' over a set of data frames that are pre-loaded into the workspace. Each one
#' of these data frames has a specific treatment variable that is used for
#' analysis with that data frame. Then the code runs causal mediation analysis
#' via \code{\link{mediate}} on every combination of the treatment, mediator,
#' and outcomes specified in these arguments. This allows the users to explore
#' whether different mediators transmit the effect of the treatment variable
#' on a variety of outcome variables. A single set of pre-treatment control
#' variables can be specified in 'covariates', which will be used throughout.
#'
#' The 'mediations' function can be used with either multiple mediators and a
#' single outcome, a single mediator and multiple outcomes, or multiple
#' mediators and outcomes. For example, with three different treatments, user
#' will create three different data frames, each containing a treatment
#' variable. In addition, if there are also four different mediators, each of
#' these will be contained in each data frame, along with the outcome
#' variable. The function will estimate all of the combinations of treatment
#' variables and mediators instead of separate lines of code being written for
#' each one.
#'
#' Individual elements of the output list (see "Value") may be passed through
#' \code{\link[=summary.mediate]{summary}} and
#' \code{\link[=plot.mediate]{plot}} for tabular and graphical summaries of
#' the results. Alternatively, the entire output may be directly passed to
#' \code{\link[=summary.mediations]{summary}} or
#' \code{\link[=plot.mediations]{plot}} for all results to be inspected.
#'
#' The default value of 'covariates' is 'NULL' and no covariate will be
#' included in either mediator or outcome models without a custom value. It
#' should be noted that users typically should have pre-treatment covariates
#' to make the sequential ignorability assumption more plausible.
#'
#' There are several limitations to the code. First, it works only with a
#' subset of the model types that will be accommodated if 'mediate' is used
#' individually (see the 'families' argument above for details). Second, one
#' cannot specify separate sets of covariates for different
#' treatment/mediator/outcome combinations. Users should use 'mediate'
#' separately for individual models if more flexibility is required in their
#' specific applications.
#'
#' @param datasets a named list of data frames. Each data frame has a separate
#' treatment variable. The names of each data frame must begin with the exact
#' name of the treatment variable that is contained in that dataset (see
#' example below).
#' @param treatment a vector of character strings indicating the names of the
#' treatment variables, with length equal to the length of 'datasets'. Each
#' treatment variable must be included in the data frame listed in the same
#' position of list 'datasets' and its name must match the first part of the
#' corresponding data frame.
#' @param mediators a vector of character strings indicating the names of the
#' mediators contained within each data frame. All of the mediators will be
#' used with each treatment variable and hence must be included in each data
#' frame of 'datasets'.
#' @param outcome a vector of character strings indicating the names of the
#' outcome variables contained within each data frame. All of the outcomes
#' will be used with each treatment variable and must be in each data frame.
#' @param covariates a character string representing the set of pre-treatment
#' covariate names (as they appear in the data frame) to be included in each
#' model. The value must take the form of standard model formula, with each
#' additive component separated by "+", etc. (see example below). All
#' covariates must be in each data frame. Default is 'NULL'.
#' @param families a vector of length two specifying the types of the mediator
#' and outcome models. Currently only supports "gaussian" (for linear
#' regression), "binomial" (for binary probit), "oprobit" (for ordered probit)
#' and "quantile" (for quantile regression, see 'tau'). For the outcome the
#' tobit model ("tobit") is also available in addition to the mediator model
#' options.
#' @param tau.m a numeric value specifying the quantile to be used for a
#' quantile regression for the mediator model. Only relevant if the first
#' element of 'families' is "quantile". See \code{rq}.
#' @param tau.y a numeric value specifying the quantile to be used for a
#' quantile regression for the outcome model. Only relevant if the second
#' element of 'families' is "quantile". See \code{rq}.
#' @param LowerY a numeric value indicating the lower bound for the tobit
#' outcome model. See \code{tobit}.
#' @param UpperY a numeric value indicating the upper bound for the tobit
#' outcome model. See \code{tobit}.
#' @param interaction a logical value indicating whether the treatment and
#' mediator variables should be interacted. This will apply to applications of
#' \code{\link{mediate}} to all the treatment/mediator/outcome combinations.
#' @param conf.level confidence level used in each application of the
#' \code{\link{mediate}} function.
#' @param sims an integer indicating the desired number of simulations for
#' inference. This will apply to all applications of 'mediate' to all the
#' treatment/mediator/outcome combinations.
#' @param boot a logical value, indicating whether or not nonparametric
#' bootstrap should be used in each \code{\link{mediate}} application.
#' @param weights a single valued vector of a character string indicating a
#' weight variable to be used in all model fitting.
#' @param ... other arguments passed to \code{\link{mediate}}, such as
#' 'robustSE', 'dropobs', etc.
#'
#' @return An object of class "mediations" (or "mediations.order" if the outcome
#' model is ordered probit), a list of "mediate" ("mediate.order") objects
#' produced by applications of \code{\link{mediate}} for the specified
#' treatment/mediator/outcome combinations. The elements are named based on
#' the names of the outcome, treatment, and mediator variables, each separated
#' by a "." (see example below).
#'
#' @author Dustin Tingley, Harvard University,
#' \email{dtingley@@gov.harvard.edu}; Teppei Yamamoto, Massachusetts Institute
#' of Technology, \email{teppei@@mit.edu}.
#'
#' @seealso \code{\link{mediate}}, \code{\link{summary.mediations}},
#' \code{\link{plot.mediations}}, \code{rq}, \code{tobit}.
#'
#' @export
#' @examples
#'
#' \dontrun{
#' # Hypothetical example
#'
#' datasets <- list(T1 = T1, T2 = T2)
#' # List of data frames corresponding to the two different treatment variables
#' #"T1vsCont" and "T2vsCont".
#' # Each data set has its respective treatment variable.
#'
#' mediators <- c("M1", "M2")
#' # Vector of mediator names, all included in each data frame.
#'
#' outcome <- c("Ycont1","Ycont2")
#' # Vector of outcome variable names, again all included in each data frame.
#'
#' treatment <- c("T1vsCont", "T2vsCont")
#' # Vector of treatment variables names; must begin with identical strings with dataset
#' # names in 'datasets'.
#'
#' covariates <- c("X1 + X2")
#' # Set of covariates (in each data set), entered using the standard model formula format.
#'
#' x <- mediations(datasets, treatment, mediators, outcome, covariates,
#' families=c("gaussian","gaussian"), interaction=FALSE,
#' conf.level=.90, sims=50)
#' # Runs 'mediate' iteratively for each variable combinations, with 'lm' on both mediator
#' # and outcome model.
#'
#' summary(x) # tabular summary of results for all model combinations
#' plot(x) # graphical summary of results for all model combinations at once
#'
#' plot(x$Ycont1.T1vsCont.M1)
#' # Individual 'mediate' outputs are stored as list elements and can be
#' # accessed using the usual "$" operator.
#' }
#'
mediations <- function(datasets, treatment, mediators, outcome,
covariates=NULL, families=c("gaussian", "gaussian"),
tau.m=.5, tau.y=.5, LowerY=NULL, UpperY=NULL,
interaction=FALSE, conf.level=.95, sims=500,
boot=FALSE, weights=NULL, ...) {
data <- names(datasets)
labels <- c()
out <- list()
count <- 1
weight.storage <- weights
for (i in 1:length(treatment)) {
d1 <- sprintf("datasets$%s", data[i])
dataarg <- eval(parse(text=d1))
for (o in 1:length(outcome)) {
for (j in 1:length(mediators)) {
# create model formulas
if(is.null(covariates)) {
f1 <- sprintf("%s ~ %s ", mediators[j], treatment[i])
if (interaction) {
f2 <- sprintf("%s ~ %s * %s", outcome[o], treatment[i], mediators[j])
} else {
f2 <- sprintf("%s ~ %s + %s", outcome[o], treatment[i], mediators[j])
}
} else {
f1 <- sprintf("%s ~ %s + %s", mediators[j], treatment[i], covariates)
if (interaction) {
f2 <- sprintf("%s ~ %s * %s + %s", outcome[o], treatment[i],
mediators[j], covariates)
} else {
f2 <- sprintf("%s ~ %s + %s + %s", outcome[o], treatment[i],
mediators[j], covariates)
}
}
if(!is.null(weights)) {
weight1 <- sprintf("dataarg$%s", weights)
weight <- as.data.frame(eval(parse(text=weight1)))
} else {
dataarg$weight <- weight <- rep(1,nrow(dataarg))
}
# run Mediator model using new data/specification
if(families[1] == "binomial") {
result1 <- glm(f1, family=binomial("probit"), weights=weight,
data=dataarg)
} else if(families[1] == "quantile") {
if(!is.null(weights)) {
stop("Weights not supported with quantile regression")
} else {
result1 <- quantreg::rq(f1, data=dataarg, tau=tau.m)
}
} else if(families[1] == "oprobit") {
result1 <- polr(f1, method = "probit", weights=weight, data=dataarg, Hess=TRUE)
} else if (families[1] == "gaussian") {
result1 <- glm(f1, family="gaussian", weights=weight, data=dataarg)
} else {
stop("mediations does not support this model for the mediator")
}
# run Outcome model using new data/specification
if(families[2] == "binomial") {
result2 <- glm(f2, family=binomial("probit"), weights=weight,
data=dataarg)
} else if(families[2] == "quantile") {
if(!is.null(weights)) {
stop("Weights not supported with quantile regression")
} else {
result2 <- quantreg::rq(f2, data=dataarg, tau=tau.y)
}
} else if(families[2] == "tobit") {
result2 <- VGAM::vglm(f2, VGAM::tobit(Lower=LowerY,Upper=UpperY), weights=weight,
data=dataarg, model=TRUE)
} else if(families[2]== "oprobit"){
result2 <- polr(f2, method = "probit", weights=weight, data=dataarg, Hess=TRUE)
} else if(families[2]== "gaussian"){
result2 <- glm(f2, family="gaussian", weights=weight, data=dataarg)
} else {
print("mediations does not support this model for the outcome")
}
if(is.null(weight.storage)){
out[[(count)]] <- mediate(result1, result2, sims=sims,
treat=treatment[i], mediator=mediators[j],
conf.level=conf.level, boot=boot, ...)
} else {
out[[(count)]] <- mediate(result1, result2, sims=sims,
treat=treatment[i], mediator=mediators[j],
conf.level=conf.level, boot=boot, ...)
weights <- weight.storage
}
rm(result1, result2)
labels[(count)] <- sprintf("%s.%s.%s", outcome[o],treatment[i], mediators[j])
count <- count + 1
}
}
if(!is.null(weight.storage)){
weights <- weight.storage
}
}
names(out) <- labels
if(families[2]== "oprobit") {
class(out) <- "mediations.order"
} else {
class(out) <- "mediations"
}
out
}
#' Plotting Indirect, Direct, and Total Effects from Multiple Mediation Analyses
#'
#' Function to plot results from multiple causal mediation analyses conducted
#' via the \code{\link{mediations}} funciton. Output is a series of plots
#' generated via \code{\link{plot.mediate}} for each treatment/mediator/outcome
#' combination specified in the input 'mediations' object.
#'
#' @aliases plot.mediations plot.mediations.order
#'
#' @param x output from the mediations function.
#' @param which subset of names(x), indicating which model combinations to be
#' plotted. Default is to plot all.
#' @param ask logical. If 'TRUE', the user is asked for input before a new
#' figure is plotted. Default is to ask only if the number of plots on
#' current screen is fewer the number implied by 'which'.
#' @param ... arguments passed to the \code{\link{plot.mediate}} function for
#' individual plots.
#'
#' @return \code{mediations} returns an object of class \code{mediations}. The
#' function \code{summary} is used to obtain a table of the results. The plot
#' function instead plots these quantities. All additional parameters desired
#' for the plotting of an output from \code{mediate} can be passed through.
#'
#' @author Dustin Tingley, Harvard University,
#' \email{dtingley@@gov.harvard.edu}; Teppei Yamamoto, Massachusetts Institute
#' of Technology, \email{teppei@@mit.edu}.
#'
#' @seealso \code{\link{mediations}}, \code{\link{plot.mediate}},
#' \code{\link{plot}}.
#'
#' @export
plot.mediations <- function(x, which = names(x),
ask = prod(par("mfcol")) < length(which) && dev.interactive(), ...){
if (ask) {
oask <- devAskNewPage(TRUE)
on.exit(devAskNewPage(oask))
}
for(i in 1:length(which)){
plot.mediate(x[[i]], xlab = which[i], ...)
}
}
#' @export
plot.mediations.order <- function(x, which = names(x),
ask = prod(par("mfcol")) < length(which) && dev.interactive(), ...){
if (ask) {
oask <- devAskNewPage(TRUE)
on.exit(devAskNewPage(oask))
}
for(i in 1:length(which)){
plot.mediate.order(x[[i]], xlab = which[i], ...)
}
}
#' Summarizing Output from Multiple Mediation Analyses
#'
#' The 'summary.mediations' function produces a summary of results from multiple
#' causal analyses conducted via \code{\link{mediations}}. Output is a series
#' of \code{\link{summary.mediate}} outputs for all the
#' treatment/mediator/outcome combinations used in the input 'mediations'
#' object.
#'
#' @aliases summary.mediations summary.mediations.order print.summary.mediations
#' print.summary.mediations.order
#'
#' @param object output from mediations function.
#' @param x output from summary.mediations function.
#' @param ... additional arguments affecting the summary produced.
#'
#' @author Dustin Tingley, Harvard University,
#' \email{dtingley@@gov.harvard.edu}; Teppei Yamamoto, Massachusetts Institute
#' of Technology, \email{teppei@@mit.edu}.
#'
#' @seealso \code{\link{mediations}}, \code{\link{summary.mediate}},
#' \code{\link{summary}}.
#'
#' @export
summary.mediations <- function(object, ...){
structure(object, class = c("summary.mediations", class(object)))
}
#' @rdname summary.mediations
#' @export
print.summary.mediations <- function(x, ...){
name.list <- names(x)
for(i in 1:length(name.list)){
cat("Specification", name.list[i], "\n")
print(summary.mediate(x[[i]]))
}
}
#' @export
summary.mediations.order <- function(object, ...){
structure(object, class = c("summary.mediations.order", class(object)))
}
#' @export
print.summary.mediations.order <- function(x, ...){
name.list <- names(x)
for(i in 1:length(name.list)){
cat("Specification", name.list[i], "\n")
print(summary.mediate.order(x[[i]]) )
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/response-type.R
\name{http_type}
\alias{http_type}
\title{Extract the content type of a response}
\usage{
http_type(x)
}
\arguments{
\item{x}{A response}
}
\value{
A string giving the complete mime type, with all parameters
stripped off.
}
\description{
Extract the content type of a response
}
\examples{
\dontrun{
r1 <- GET("http://httpbin.org/image/png")
http_type(r1)
headers(r1)[["Content-Type"]]
r2 <- GET("http://httpbin.org/ip")
http_type(r2)
headers(r2)[["Content-Type"]]
}
}
| /man/http_type.Rd | permissive | r-lib/httr | R | false | true | 564 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/response-type.R
\name{http_type}
\alias{http_type}
\title{Extract the content type of a response}
\usage{
http_type(x)
}
\arguments{
\item{x}{A response}
}
\value{
A string giving the complete mime type, with all parameters
stripped off.
}
\description{
Extract the content type of a response
}
\examples{
\dontrun{
r1 <- GET("http://httpbin.org/image/png")
http_type(r1)
headers(r1)[["Content-Type"]]
r2 <- GET("http://httpbin.org/ip")
http_type(r2)
headers(r2)[["Content-Type"]]
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jaccardAndPValue.R
\name{jaccardAndPValue}
\alias{jaccardAndPValue}
\title{Ranks bed file similarities with significance}
\usage{
jaccardAndPValue(n, bed1, genome, folder_dir)
}
\arguments{
\item{n}{he number of background files generated (the greater the n, the more reliable the p-value, default n of 100)}
\item{bed1}{The file path string of a query bed file to be compared to the database files.}
\item{genome}{The file path of a genome file, which should be tab delimited and structured as follows: <chromName><TAB><chromSize>}
\item{folder_dir}{The directory of a folder containing separate database files to be used for comparison with the query file.}
}
\value{
A dataframe with 12 columns, ranked by jaccard index. Contains the name of the database file, the respective jaccard index, the pi score, the significance value (p-value) of the similarity, the proportion of intersection similarity with bed1, the proportion of intersection similarity with the database file, and a five-number summary of the background files generated from the query file.
}
\description{
The function compares a given query bed file with multiple bed files, and ranks the relative similarity between each file pairing, computed using jaccard indexes, where 0 has no similarities and 1 has an identical file. Will provide significance values (p-values) of each jaccard index, but will run slower.
}
| /man/jaccardAndPValue.Rd | no_license | ADotDong/RankBindingSimilarity | R | false | true | 1,467 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jaccardAndPValue.R
\name{jaccardAndPValue}
\alias{jaccardAndPValue}
\title{Ranks bed file similarities with significance}
\usage{
jaccardAndPValue(n, bed1, genome, folder_dir)
}
\arguments{
\item{n}{he number of background files generated (the greater the n, the more reliable the p-value, default n of 100)}
\item{bed1}{The file path string of a query bed file to be compared to the database files.}
\item{genome}{The file path of a genome file, which should be tab delimited and structured as follows: <chromName><TAB><chromSize>}
\item{folder_dir}{The directory of a folder containing separate database files to be used for comparison with the query file.}
}
\value{
A dataframe with 12 columns, ranked by jaccard index. Contains the name of the database file, the respective jaccard index, the pi score, the significance value (p-value) of the similarity, the proportion of intersection similarity with bed1, the proportion of intersection similarity with the database file, and a five-number summary of the background files generated from the query file.
}
\description{
The function compares a given query bed file with multiple bed files, and ranks the relative similarity between each file pairing, computed using jaccard indexes, where 0 has no similarities and 1 has an identical file. Will provide significance values (p-values) of each jaccard index, but will run slower.
}
|
library(smdata)
### Name: rtime
### Title: Censored response time data
### Aliases: rtime
### Keywords: datasets
### ** Examples
data("rtime", package="smdata")
| /data/genthat_extracted_code/smdata/examples/rtime.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 168 | r | library(smdata)
### Name: rtime
### Title: Censored response time data
### Aliases: rtime
### Keywords: datasets
### ** Examples
data("rtime", package="smdata")
|
library(dplyr)
library(ggplot2)
setwd(paste0(Sys.getenv('CS_HOME'),'/CircularEconomy/Models/NetLogo/netlogo6'))
source(paste0(Sys.getenv("CN_HOME"),'/Models/Utils/R/plots.R'))
#resdirpref='20180615_1623_NSGA2_SYNTHETIC_TRCOST1.0_DISTRIBSD0.1'
#res <- as.tbl(read.csv(paste0('explo/',resdirpref,'/population879.csv')))
resdirpref='20180722_1631_NSGA2_SYNTHETIC_TRCOST3_DISTRIBSD0.01'
generation='50000'
res <- as.tbl(read.csv(paste0('explo/',resdirpref,'/population',generation,'.csv')))
resdir=paste0(Sys.getenv('CS_HOME'),'/CircularEconomy/Results/Calibration/',resdirpref);dir.create(resdir)
#minsamples = 5
#minsamples = 1
#minsamples = 0
for(minsamples in c(0,1,5)){
sres = res[res$evolution.samples>minsamples,]
g=ggplot(sres,aes(x=totalWaste,y=relativeCost,size=gravityDecay/2,color=overlapThreshold))
g+geom_point()+xlab("Total waste")+ylab("Relative cost")+
scale_color_continuous(name=expression(T[0]))+scale_size_continuous(name=expression(d[0]))+stdtheme
ggsave(file=paste0(resdir,'/pareto-waste-cost_minsamples',minsamples,'_gen',generation,'.png'),width=18,height = 15,units = 'cm')
}
| /Models/Analysis/calibration.R | no_license | SFICSSS16-CircularEconomy/CircularEconomy | R | false | false | 1,108 | r | library(dplyr)
library(ggplot2)
setwd(paste0(Sys.getenv('CS_HOME'),'/CircularEconomy/Models/NetLogo/netlogo6'))
source(paste0(Sys.getenv("CN_HOME"),'/Models/Utils/R/plots.R'))
#resdirpref='20180615_1623_NSGA2_SYNTHETIC_TRCOST1.0_DISTRIBSD0.1'
#res <- as.tbl(read.csv(paste0('explo/',resdirpref,'/population879.csv')))
resdirpref='20180722_1631_NSGA2_SYNTHETIC_TRCOST3_DISTRIBSD0.01'
generation='50000'
res <- as.tbl(read.csv(paste0('explo/',resdirpref,'/population',generation,'.csv')))
resdir=paste0(Sys.getenv('CS_HOME'),'/CircularEconomy/Results/Calibration/',resdirpref);dir.create(resdir)
#minsamples = 5
#minsamples = 1
#minsamples = 0
for(minsamples in c(0,1,5)){
sres = res[res$evolution.samples>minsamples,]
g=ggplot(sres,aes(x=totalWaste,y=relativeCost,size=gravityDecay/2,color=overlapThreshold))
g+geom_point()+xlab("Total waste")+ylab("Relative cost")+
scale_color_continuous(name=expression(T[0]))+scale_size_continuous(name=expression(d[0]))+stdtheme
ggsave(file=paste0(resdir,'/pareto-waste-cost_minsamples',minsamples,'_gen',generation,'.png'),width=18,height = 15,units = 'cm')
}
|
fluidRow(
boxPlus(
width = 12, closable = F,
title = tagList(icon("chart-line"), i18n$t("COVID-19 重症患者状況 日本COVID-19対策ECMOnet集計")),
tags$p(i18n$t("このページは、"),
tags$a(
icon("external-link-alt"),
i18n$t("COVID-19 重症患者状況 日本COVID-19対策ECMOnet集計"),
href = "https://covid19.jsicm.org/"),
i18n$t("のデータ(文言を含む)を一覧できるように、若干異なる可視化方法でデータを表現しています。")
),
blockQuote(
tags$small(
i18n$t("このグラフ群は横断的ICU情報探索システム(CRoss Icu Searchable Information System, 略称CRISIS, 非公開) に蓄積されたデータベースを視覚化したものです。このCRISISには日本集中治療医学会専門医認定施設、日本救急医学会救急科専門医指定施設を中心に日本全国570以上の施設が参加されており、それら施設の総ICUベッド数は5500にのぼり、日本全体のICUベッド(6500ベッドほど)の80%をカバーしております。本事業は各病院担当者の方々の善意により忙しい合間を縫って任意に手入力いただいているものです。そのため精度はかなり高いと存じますが完璧なものではないことをご理解いただければ幸いです。当初我々はECMOに関するデータを中心に集めて参りました。しかしながら人工呼吸器を必要とする重症の方々のデータも必要であると改めて認識し、データの精度を高めるよう努力しております。少しずつではございますが改善させ、このコロナ禍を乗り切った暁には重要な資産として次の世代に遺せるものを目指しております。 日本COVID-19対策ECMOnet 2020/5/1記載"))
),
accordion(
accordionItem(
id = 11,
title = tagList(icon("first-aid"), i18n$t("COVID-19重症者における人工呼吸器装着数の推移")),
collapsed = F,
uiOutput("artificialRespirators") %>% withSpinner()
),
accordionItem(
id = 12,
title = tagList(icon("heartbeat"), i18n$t("COVID-19重症者におけるECMO装着数の推移")),
collapsed = T,
uiOutput("ecmoUsing") %>% withSpinner()
),
accordionItem(
id = 13,
title = tagList(icon("file-medical"), i18n$t("国内のCOVID-19に対するECMO治療の成績累計")),
collapsed = T,
fluidRow(
column(
width = 4,
tags$br(),
tags$b(icon("exclamation-circle"), i18n$t("注意事項")),
blockQuote(
tags$small(
i18n$t("この図はCRISISに申告のあった症例と、それ以外に我々のネットワークで集めたECMO症例の推移をあわらしたものです。あとから判明した症例も多くありますので、過去にさかのぼって日々数が変異しております。したがって上の図の数とここに表す数にも若干の齟齬が生じますのでご了承ください。人工呼吸が必要な患者さんのほぼ5人に1人がECMOも必要と判断されます。ECMOからの生還例ではおおよそ10日間から2週間のECMO装着が必要となります。ーー2020/5/1記載")
)
)
),
column(
width = 8,
echarts4rOutput("ecmo") %>% withSpinner()
)
)
)
)
)
)
| /04_Pages/ECMO/ECMO.ui.R | permissive | takewiki/2019-ncov-japan | R | false | false | 3,626 | r | fluidRow(
boxPlus(
width = 12, closable = F,
title = tagList(icon("chart-line"), i18n$t("COVID-19 重症患者状況 日本COVID-19対策ECMOnet集計")),
tags$p(i18n$t("このページは、"),
tags$a(
icon("external-link-alt"),
i18n$t("COVID-19 重症患者状況 日本COVID-19対策ECMOnet集計"),
href = "https://covid19.jsicm.org/"),
i18n$t("のデータ(文言を含む)を一覧できるように、若干異なる可視化方法でデータを表現しています。")
),
blockQuote(
tags$small(
i18n$t("このグラフ群は横断的ICU情報探索システム(CRoss Icu Searchable Information System, 略称CRISIS, 非公開) に蓄積されたデータベースを視覚化したものです。このCRISISには日本集中治療医学会専門医認定施設、日本救急医学会救急科専門医指定施設を中心に日本全国570以上の施設が参加されており、それら施設の総ICUベッド数は5500にのぼり、日本全体のICUベッド(6500ベッドほど)の80%をカバーしております。本事業は各病院担当者の方々の善意により忙しい合間を縫って任意に手入力いただいているものです。そのため精度はかなり高いと存じますが完璧なものではないことをご理解いただければ幸いです。当初我々はECMOに関するデータを中心に集めて参りました。しかしながら人工呼吸器を必要とする重症の方々のデータも必要であると改めて認識し、データの精度を高めるよう努力しております。少しずつではございますが改善させ、このコロナ禍を乗り切った暁には重要な資産として次の世代に遺せるものを目指しております。 日本COVID-19対策ECMOnet 2020/5/1記載"))
),
accordion(
accordionItem(
id = 11,
title = tagList(icon("first-aid"), i18n$t("COVID-19重症者における人工呼吸器装着数の推移")),
collapsed = F,
uiOutput("artificialRespirators") %>% withSpinner()
),
accordionItem(
id = 12,
title = tagList(icon("heartbeat"), i18n$t("COVID-19重症者におけるECMO装着数の推移")),
collapsed = T,
uiOutput("ecmoUsing") %>% withSpinner()
),
accordionItem(
id = 13,
title = tagList(icon("file-medical"), i18n$t("国内のCOVID-19に対するECMO治療の成績累計")),
collapsed = T,
fluidRow(
column(
width = 4,
tags$br(),
tags$b(icon("exclamation-circle"), i18n$t("注意事項")),
blockQuote(
tags$small(
i18n$t("この図はCRISISに申告のあった症例と、それ以外に我々のネットワークで集めたECMO症例の推移をあわらしたものです。あとから判明した症例も多くありますので、過去にさかのぼって日々数が変異しております。したがって上の図の数とここに表す数にも若干の齟齬が生じますのでご了承ください。人工呼吸が必要な患者さんのほぼ5人に1人がECMOも必要と判断されます。ECMOからの生還例ではおおよそ10日間から2週間のECMO装着が必要となります。ーー2020/5/1記載")
)
)
),
column(
width = 8,
echarts4rOutput("ecmo") %>% withSpinner()
)
)
)
)
)
)
|
join_df <- function(df1, df2) {
if(is.null(df1$annData) | is.null(df2$annData))
stop("Merge sites first to get annData")
sampleData <- bind_rows(df1$sampleData, df2$sampleData)
annData <- inner_join(df1$annData, df2$annData, by="ann_ID")
annIntData <- bind_rows(semi_join(df1$annIntData, annData),
semi_join(df2$annIntData, annData))
df <- list(sampleData = sampleData,
annData = annData,
annIntData = annIntData)
return(df)
}
join_df_proteins <- function(df1, df2) {
sampleData <- bind_rows(df1$sampleData, df2$sampleData)
peakData <- inner_join(df1$peakData, df2$peakData, by="peak_ID")
intData <- bind_rows(semi_join(df1$intData, peakData),
semi_join(df2$intData, peakData))
df <- list(sampleData = sampleData,
peakData = peakData,
intData = intData)
return(df)} | /R/07 - join_df.R | no_license | kbajdzienko/Phosphoproteomics | R | false | false | 904 | r | join_df <- function(df1, df2) {
if(is.null(df1$annData) | is.null(df2$annData))
stop("Merge sites first to get annData")
sampleData <- bind_rows(df1$sampleData, df2$sampleData)
annData <- inner_join(df1$annData, df2$annData, by="ann_ID")
annIntData <- bind_rows(semi_join(df1$annIntData, annData),
semi_join(df2$annIntData, annData))
df <- list(sampleData = sampleData,
annData = annData,
annIntData = annIntData)
return(df)
}
join_df_proteins <- function(df1, df2) {
sampleData <- bind_rows(df1$sampleData, df2$sampleData)
peakData <- inner_join(df1$peakData, df2$peakData, by="peak_ID")
intData <- bind_rows(semi_join(df1$intData, peakData),
semi_join(df2$intData, peakData))
df <- list(sampleData = sampleData,
peakData = peakData,
intData = intData)
return(df)} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_register_transit_gateway_multicast_group_members}
\alias{ec2_register_transit_gateway_multicast_group_members}
\title{Registers members (network interfaces) with the transit gateway
multicast group}
\usage{
ec2_register_transit_gateway_multicast_group_members(
TransitGatewayMulticastDomainId, GroupIpAddress, NetworkInterfaceIds,
DryRun)
}
\arguments{
\item{TransitGatewayMulticastDomainId}{The ID of the transit gateway multicast domain.}
\item{GroupIpAddress}{The IP address assigned to the transit gateway multicast group.}
\item{NetworkInterfaceIds}{The group members\' network interface IDs to register with the transit
gateway multicast group.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
}
\description{
Registers members (network interfaces) with the transit gateway
multicast group. A member is a network interface associated with a
supported EC2 instance that receives multicast traffic. For information
about supported instances, see \href{https://docs.aws.amazon.com/vpc/latest/tgw/transit-gateway-limits.html#multicast-limits}{Multicast Consideration}
in \emph{Amazon VPC Transit Gateways}.
}
\details{
After you add the members, use
\href{https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SearchTransitGatewayMulticastGroups.html}{SearchTransitGatewayMulticastGroups}
to verify that the members were added to the transit gateway multicast
group.
}
\section{Request syntax}{
\preformatted{svc$register_transit_gateway_multicast_group_members(
TransitGatewayMulticastDomainId = "string",
GroupIpAddress = "string",
NetworkInterfaceIds = list(
"string"
),
DryRun = TRUE|FALSE
)
}
}
\keyword{internal}
| /paws/man/ec2_register_transit_gateway_multicast_group_members.Rd | permissive | johnnytommy/paws | R | false | true | 1,985 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_register_transit_gateway_multicast_group_members}
\alias{ec2_register_transit_gateway_multicast_group_members}
\title{Registers members (network interfaces) with the transit gateway
multicast group}
\usage{
ec2_register_transit_gateway_multicast_group_members(
TransitGatewayMulticastDomainId, GroupIpAddress, NetworkInterfaceIds,
DryRun)
}
\arguments{
\item{TransitGatewayMulticastDomainId}{The ID of the transit gateway multicast domain.}
\item{GroupIpAddress}{The IP address assigned to the transit gateway multicast group.}
\item{NetworkInterfaceIds}{The group members\' network interface IDs to register with the transit
gateway multicast group.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
}
\description{
Registers members (network interfaces) with the transit gateway
multicast group. A member is a network interface associated with a
supported EC2 instance that receives multicast traffic. For information
about supported instances, see \href{https://docs.aws.amazon.com/vpc/latest/tgw/transit-gateway-limits.html#multicast-limits}{Multicast Consideration}
in \emph{Amazon VPC Transit Gateways}.
}
\details{
After you add the members, use
\href{https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SearchTransitGatewayMulticastGroups.html}{SearchTransitGatewayMulticastGroups}
to verify that the members were added to the transit gateway multicast
group.
}
\section{Request syntax}{
\preformatted{svc$register_transit_gateway_multicast_group_members(
TransitGatewayMulticastDomainId = "string",
GroupIpAddress = "string",
NetworkInterfaceIds = list(
"string"
),
DryRun = TRUE|FALSE
)
}
}
\keyword{internal}
|
logistpl <-
function(x, y, init=NULL, i, LL.0, firth, which = -1,
offset=rep(0, length(y)), weight=rep(1,length(y)), plcontrol) {
n<-nrow(x)
k<-ncol(x)
if (is.null(init)) init<-rep(0,k)
beta<-init
if (is.null(offset)) offset=rep(0,n)
if (is.null(weight)) weight=rep(1,n)
if (missing(plcontrol)) plcontrol<-logistpl.control()
maxit<-plcontrol$maxit
maxstep<-plcontrol$maxstep
maxhs<-plcontrol$maxhs
xconv<-plcontrol$xconv
lconv<-plcontrol$lconv
firth <- if(firth) 1 else 0
loglik <- iter <- 0
conv <- double(2)
betahist <- matrix(double(k * maxit), maxit)
mode(x) <- mode(weight) <- mode(beta) <- mode(offset) <- mode(LL.0) <- "double"
mode(y) <- mode(firth) <- mode(n) <- mode(k) <- "integer"
mode(maxstep) <- mode(lconv) <- mode(xconv) <- mode(loglik) <- "double"
mode(maxit) <- mode(maxhs) <- mode(i) <- mode(which) <- mode(iter) <- "integer"
res <- .C("logistpl", x, y, n, k, weight, offset, beta=beta, i, which, LL.0, firth, maxit,
maxstep, maxhs, lconv, xconv, betahist=betahist, loglik=loglik, iter=iter, conv=conv,
PACKAGE="logistf")
res <- res[c("beta", "betahist", "loglik", "iter", "conv")]
res$betahist <- head(res$betahist, res$iter)
res$beta <- res$beta[i]
res
}
| /R/logistpl.R | no_license | ezgicn/logistf | R | false | false | 1,235 | r | logistpl <-
function(x, y, init=NULL, i, LL.0, firth, which = -1,
offset=rep(0, length(y)), weight=rep(1,length(y)), plcontrol) {
n<-nrow(x)
k<-ncol(x)
if (is.null(init)) init<-rep(0,k)
beta<-init
if (is.null(offset)) offset=rep(0,n)
if (is.null(weight)) weight=rep(1,n)
if (missing(plcontrol)) plcontrol<-logistpl.control()
maxit<-plcontrol$maxit
maxstep<-plcontrol$maxstep
maxhs<-plcontrol$maxhs
xconv<-plcontrol$xconv
lconv<-plcontrol$lconv
firth <- if(firth) 1 else 0
loglik <- iter <- 0
conv <- double(2)
betahist <- matrix(double(k * maxit), maxit)
mode(x) <- mode(weight) <- mode(beta) <- mode(offset) <- mode(LL.0) <- "double"
mode(y) <- mode(firth) <- mode(n) <- mode(k) <- "integer"
mode(maxstep) <- mode(lconv) <- mode(xconv) <- mode(loglik) <- "double"
mode(maxit) <- mode(maxhs) <- mode(i) <- mode(which) <- mode(iter) <- "integer"
res <- .C("logistpl", x, y, n, k, weight, offset, beta=beta, i, which, LL.0, firth, maxit,
maxstep, maxhs, lconv, xconv, betahist=betahist, loglik=loglik, iter=iter, conv=conv,
PACKAGE="logistf")
res <- res[c("beta", "betahist", "loglik", "iter", "conv")]
res$betahist <- head(res$betahist, res$iter)
res$beta <- res$beta[i]
res
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/method_structure.R
\docType{methods}
\name{CSanalysis,matrix,matrix,character-method}
\alias{CSanalysis,matrix,matrix,character-method}
\title{Connectivity Score Analysis.}
\usage{
\S4method{CSanalysis}{matrix,matrix,character}(querMat, refMat, type, ...)
}
\arguments{
\item{querMat}{Query matrix (Rows = genes and columns = compounds)}
\item{refMat}{Reference matrix}
\item{type}{Type of Factor Analysis or Zhang & Gant ( \code{"CSfabia"}, \code{"CSmfa"}, \code{"CSpca"}, \code{"CSsmfa"} or \code{"CSzhang"})}
\item{...}{Additional parameters for analysis}
}
\value{
An object of the S4 Class \code{\link{CSresult-class}}.
}
\description{
Doing a CS analysis, interactively generating graphs. See specific type for additional parameteres.\cr
Types:\cr
\itemize{
\item \code{\link[=CSanalysis,matrix,matrix,CSzhang-method]{Zhang and Gant}}
\item \code{\link[=CSanalysis,matrix,matrix,CSmfa-method]{MFA}}
\item \code{\link[=CSanalysis,matrix,matrix,CSpca-method]{PCA}}
\item \code{\link[=CSanalysis,matrix,matrix,CSsmfa-method]{Sparse MFA}}
\item \code{\link[=CSanalysis,matrix,matrix,CSfabia-method]{FABIA}}
}
}
\examples{
\dontshow{
data("dataSIM",package="CSFA")
Mat1 <- dataSIM[,c(1:6)]
Mat2 <- dataSIM[,-c(1:6)]
ZHANG_analysis <- CSanalysis(Mat1,Mat2,"CSzhang")
}
\donttest{
data("dataSIM",package="CSFA")
Mat1 <- dataSIM[,c(1:6)]
Mat2 <- dataSIM[,-c(1:6)]
MFA_analysis <- CSanalysis(Mat1,Mat2,"CSmfa")
FABIA_analysis <- CSanalysis(Mat1,Mat2,"CSfabia")
ZHANG_analysis <- CSanalysis(Mat1,Mat2,"CSzhang")
}
}
| /man/CSanalysis-matrix-matrix-character-method.Rd | no_license | cran/CSFA | R | false | true | 1,643 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/method_structure.R
\docType{methods}
\name{CSanalysis,matrix,matrix,character-method}
\alias{CSanalysis,matrix,matrix,character-method}
\title{Connectivity Score Analysis.}
\usage{
\S4method{CSanalysis}{matrix,matrix,character}(querMat, refMat, type, ...)
}
\arguments{
\item{querMat}{Query matrix (Rows = genes and columns = compounds)}
\item{refMat}{Reference matrix}
\item{type}{Type of Factor Analysis or Zhang & Gant ( \code{"CSfabia"}, \code{"CSmfa"}, \code{"CSpca"}, \code{"CSsmfa"} or \code{"CSzhang"})}
\item{...}{Additional parameters for analysis}
}
\value{
An object of the S4 Class \code{\link{CSresult-class}}.
}
\description{
Doing a CS analysis, interactively generating graphs. See specific type for additional parameteres.\cr
Types:\cr
\itemize{
\item \code{\link[=CSanalysis,matrix,matrix,CSzhang-method]{Zhang and Gant}}
\item \code{\link[=CSanalysis,matrix,matrix,CSmfa-method]{MFA}}
\item \code{\link[=CSanalysis,matrix,matrix,CSpca-method]{PCA}}
\item \code{\link[=CSanalysis,matrix,matrix,CSsmfa-method]{Sparse MFA}}
\item \code{\link[=CSanalysis,matrix,matrix,CSfabia-method]{FABIA}}
}
}
\examples{
\dontshow{
data("dataSIM",package="CSFA")
Mat1 <- dataSIM[,c(1:6)]
Mat2 <- dataSIM[,-c(1:6)]
ZHANG_analysis <- CSanalysis(Mat1,Mat2,"CSzhang")
}
\donttest{
data("dataSIM",package="CSFA")
Mat1 <- dataSIM[,c(1:6)]
Mat2 <- dataSIM[,-c(1:6)]
MFA_analysis <- CSanalysis(Mat1,Mat2,"CSmfa")
FABIA_analysis <- CSanalysis(Mat1,Mat2,"CSfabia")
ZHANG_analysis <- CSanalysis(Mat1,Mat2,"CSzhang")
}
}
|
#' Convert a Seurat Object to a Monocle Cell Data Set
#'
#' @param seu
#'
#' @return
#' @export
#'
#' @examples
convert_seu_to_cds <- function(seu, resolution = 1) {
### Building the necessary parts for a basic cds
# part two, counts sparse matrix
if ("integrated" %in% names(seu@assays)) {
default_assay = "integrated"
} else {
default_assay = "RNA"
}
DefaultAssay(seu) <- default_assay
expression_matrix <- Seurat::GetAssayData(seu, slot = "data", assay = default_assay)
count_matrix <- Seurat::GetAssayData(seu, slot = "counts", assay = "RNA")
count_matrix <- count_matrix[row.names(expression_matrix),]
count_matrix <- count_matrix[,Matrix::colSums(count_matrix) != 0]
# part three, gene annotations
gene_annotation <- data.frame(gene_short_name = rownames(count_matrix),
row.names = rownames(count_matrix))
# part one, cell information
cell_metadata <- seu[[]][colnames(count_matrix),]
seu <- seu[,colnames(count_matrix)]
### Construct the basic cds object
cds_from_seurat <- monocle3::new_cell_data_set(expression_data = count_matrix,
cell_metadata = cell_metadata,
gene_metadata = gene_annotation)
cds_from_seurat <- cds_from_seurat[,colnames(seu)]
# estimate size factors
cds_from_seurat <- cds_from_seurat[, colSums(as.matrix(monocle3::exprs(cds_from_seurat))) != 0]
cds_from_seurat <- monocle3::estimate_size_factors(cds_from_seurat)
### Construct and assign the made up partition
recreate.partition <- c(rep(1, length(cds_from_seurat@colData@rownames)))
names(recreate.partition) <- cds_from_seurat@colData@rownames
recreate.partition <- as.factor(recreate.partition)
cds_from_seurat@clusters@listData[["UMAP"]][["partitions"]] <- recreate.partition
### Could be a space-holder, but essentially fills out louvain parameters
cds_from_seurat@clusters@listData[["UMAP"]][["louvain_res"]] <- "NA"
# cds_from_seurat <- monocle3::preprocess_cds(cds_from_seurat)
# cds_from_seurat <- monocle3::reduce_dimension(cds_from_seurat, "UMAP")
#
# reducedDim(cds_from_seurat, "PCA") <- Embeddings(seu, "pca")
# reducedDim(cds_from_seurat, "UMAP") <- Embeddings(seu, "umap")
cds_from_seurat@reducedDims@listData[["UMAP"]] <- Embeddings(seu, "umap")
cds_from_seurat@reducedDims@listData[["PCA"]] <- Embeddings(seu, "pca")
cds_from_seurat@preprocess_aux$gene_loadings <- Loadings(seu, "pca")
cds_from_seurat <- learn_graph_by_resolution(cds_from_seurat, seu, resolution = resolution)
return(cds_from_seurat)
}
#' Assign Clusters to CDS
#'
#' @param cds
#' @param clusters
#'
#' @return
#' @export
#'
#' @examples
assign_clusters_to_cds <- function(cds, clusters){
clusters <- clusters[colnames(cds)]
cds@clusters@listData[["UMAP"]][["clusters"]] <- clusters
names(cds@clusters@listData[["UMAP"]][["clusters"]]) <- cds@colData@rownames
return(cds)
}
#' Learn Monocle Graph by Resolution
#'
#' @param cds
#' @param resolution
#'
#' @return
#' @export
#'
#' @examples
learn_graph_by_resolution <- function(cds, seu, resolution = 1){
### Assign the cluster info
if (any(grepl("integrated", names(cds@colData)))){
default_assay = "integrated"
} else {
default_assay = "RNA"
}
cds <- monocle3::cluster_cells(cds)
clusters <- seu[[paste0(default_assay, '_snn_res.', resolution)]]
clusters <- purrr::set_names(clusters[[1]], rownames(clusters))
cds <- assign_clusters_to_cds(cds, clusters = clusters)
print("Learning graph, which can take a while depends on the sample")
cds <- monocle3::learn_graph(cds, use_partition = T)
return(cds)
}
#' Plot a Monocle Cell Data Set
#'
#' @param cds
#' @param color_cells_by
#'
#' @return
#' @export
#'
#' @examples
plot_cds <- function(cds, color_cells_by = NULL, genes = NULL){
key <- seq(1, length(colnames(cds)))
cellid <- colnames(cds)
cds[['key']] = key
cds[['cellid']] = cellid
if (any(grepl("integrated", names(cds@colData)))){
default_assay = "integrated"
} else {
default_assay = "RNA"
}
# if (color_cells_by == "louvain_cluster"){
# color_cells_by = paste0(default_assay, "_snn_res.", resolution)
# }
cds_plot <- plot_cells(cds,
genes = genes,
label_cell_groups = FALSE,
label_groups_by_cluster = FALSE,
label_leaves = FALSE,
label_branch_points = FALSE,
color_cells_by = color_cells_by, key = key, cellid = cellid,
cell_size = 0.75)
NULL
cds_plot <-
cds_plot %>%
plotly::ggplotly(height = 400) %>%
plotly_settings() %>%
plotly::toWebGL() %>%
# plotly::partial_bundle() %>%
identity()
}
#' Plot a Monocle Cell Data Set
#'
#' @param cds
#' @param resolution
#' @param color_cells_by
#'
#' @return
#' @export
#'
#' @examples
plot_pseudotime <- function(cds, resolution, color_cells_by = NULL, genes = NULL){
key <- seq(1, length(colnames(cds)))
cellid <- colnames(cds)
cds[['key']] = key
cds[['cellid']] = cellid
if (any(grepl("integrated", colnames(cds@colData)))){
default_assay = "integrated"
} else {
default_assay = "RNA"
}
cds_plot <- monocle3::plot_cells(cds,
show_trajectory_graph = TRUE,
genes = genes,
label_cell_groups = FALSE,
label_groups_by_cluster = FALSE,
label_leaves = FALSE,
label_branch_points = FALSE,
color_cells_by = color_cells_by,
cell_size = 0.75) +
# aes(key = key, cellid = cellid) +
NULL
cds_plot <-
cds_plot %>%
plotly::ggplotly(height = 400) %>%
plotly::ggplotly(height = 400) %>%
plotly_settings() %>%
plotly::toWebGL() %>%
# plotly::partial_bundle() %>%
identity()
# print(cds_plot)
}
#' Plot a Monocle Cell Data Set
#'
#' @param cds
#' @param resolution
#'
#' @return
#' @export
#'
#' @examples
plot_monocle_features <- function(cds, resolution, genes = NULL, ...){
key <- seq(1, length(colnames(cds)))
cellid <- colnames(cds)
cds[['key']] = key
cds[['cellid']] = cellid
if (any(grepl("integrated", colnames(cds@colData)))){
default_assay = "integrated"
} else {
default_assay = "RNA"
}
cds_plot <- plot_cells(cds,
genes = genes,
label_cell_groups = FALSE,
label_groups_by_cluster = FALSE,
label_leaves = FALSE,
label_branch_points = FALSE,
cell_size = 0.75) +
# aes(key = key, cellid = cellid) +
NULL
cds_plot <-
cds_plot %>%
plotly::ggplotly(height = 400) %>%
plotly::ggplotly(height = 400) %>%
plotly_settings() %>%
plotly::toWebGL() %>%
# plotly::partial_bundle() %>%
identity()
# print(cds_plot)
}
#' Title
#'
#' @param cds
#' @param x
#' @param y
#' @param reduction_method
#' @param color_cells_by
#' @param group_cells_by
#' @param genes
#' @param show_trajectory_graph
#' @param trajectory_graph_color
#' @param trajectory_graph_segment_size
#' @param norm_method
#' @param label_cell_groups
#' @param label_groups_by_cluster
#' @param group_label_size
#' @param labels_per_group
#' @param label_branch_points
#' @param label_roots
#' @param label_leaves
#' @param graph_label_size
#' @param cell_size
#' @param cell_stroke
#' @param alpha
#' @param min_expr
#' @param rasterize
#' @param scale_to_range
#' @param ...
#'
#' @return
#' @export
#'
#' @examples
plot_cells <- function(cds, x = 1, y = 2, reduction_method = c("UMAP", "tSNE",
"PCA", "LSI", "Aligned"), color_cells_by = "cluster", group_cells_by = c("cluster",
"partition"), genes = NULL, show_trajectory_graph = TRUE,
trajectory_graph_color = "grey28", trajectory_graph_segment_size = 0.75,
norm_method = c("log", "size_only"), label_cell_groups = TRUE,
label_groups_by_cluster = TRUE, group_label_size = 2, labels_per_group = 1,
label_branch_points = TRUE, label_roots = TRUE, label_leaves = TRUE,
graph_label_size = 2, cell_size = 0.35, cell_stroke = I(cell_size/2),
alpha = 1, min_expr = 0.1, rasterize = FALSE, scale_to_range = FALSE, ...)
{
reduction_method <- match.arg(reduction_method)
assertthat::assert_that(methods::is(cds, "cell_data_set"))
assertthat::assert_that(!is.null(reducedDims(cds)[[reduction_method]]),
msg = paste("No dimensionality reduction for", reduction_method,
"calculated.", "Please run reduce_dimensions with",
"reduction_method =", reduction_method, "before attempting to plot."))
low_dim_coords <- reducedDims(cds)[[reduction_method]]
assertthat::assert_that(ncol(low_dim_coords) >= max(x, y),
msg = paste("x and/or y is too large. x and y must",
"be dimensions in reduced dimension", "space."))
if (!is.null(color_cells_by)) {
assertthat::assert_that(color_cells_by %in% c("cluster",
"partition", "pseudotime") | color_cells_by %in%
names(colData(cds)), msg = paste("color_cells_by must one of",
"'cluster', 'partition', 'pseudotime,", "or a column in the colData table."))
if (color_cells_by == "pseudotime") {
tryCatch({
pseudotime(cds, reduction_method = reduction_method)
}, error = function(x) {
stop(paste("No pseudotime for", reduction_method,
"calculated. Please run order_cells with",
"reduction_method =", reduction_method, "before attempting to color by pseudotime."))
})
}
}
assertthat::assert_that(!is.null(color_cells_by) || !is.null(markers),
msg = paste("Either color_cells_by or markers must",
"be NULL, cannot color by both!"))
norm_method = match.arg(norm_method)
group_cells_by = match.arg(group_cells_by)
assertthat::assert_that(!is.null(color_cells_by) || !is.null(genes),
msg = paste("Either color_cells_by or genes must be",
"NULL, cannot color by both!"))
if (show_trajectory_graph && is.null(monocle3::principal_graph(cds)[[reduction_method]])) {
message("No trajectory to plot. Has learn_graph() been called yet?")
show_trajectory_graph = FALSE
}
gene_short_name <- NA
sample_name <- NA
data_dim_1 <- NA
data_dim_2 <- NA
if (rasterize) {
plotting_func <- ggrastr::geom_point_rast
}
else {
plotting_func <- ggplot2::geom_point
}
S_matrix <- reducedDims(cds)[[reduction_method]]
data_df <- data.frame(S_matrix[, c(x, y)])
colnames(data_df) <- c("data_dim_1", "data_dim_2")
data_df$sample_name <- row.names(data_df)
data_df <- as.data.frame(cbind(data_df, colData(cds)))
if (group_cells_by == "cluster") {
data_df$cell_group <- tryCatch({
clusters(cds, reduction_method = reduction_method)[data_df$sample_name]
}, error = function(e) {
NULL
})
}
else if (group_cells_by == "partition") {
data_df$cell_group <- tryCatch({
partitions(cds, reduction_method = reduction_method)[data_df$sample_name]
}, error = function(e) {
NULL
})
}
else {
stop("Error: unrecognized way of grouping cells.")
}
if (color_cells_by == "cluster") {
data_df$cell_color <- tryCatch({
clusters(cds, reduction_method = reduction_method)[data_df$sample_name]
}, error = function(e) {
NULL
})
}
else if (color_cells_by == "partition") {
data_df$cell_color <- tryCatch({
partitions(cds, reduction_method = reduction_method)[data_df$sample_name]
}, error = function(e) {
NULL
})
}
else if (color_cells_by == "pseudotime") {
data_df$cell_color <- tryCatch({
pseudotime(cds, reduction_method = reduction_method)[data_df$sample_name]
}, error = function(e) {
NULL
})
}
else {
data_df$cell_color <- colData(cds)[data_df$sample_name,
color_cells_by]
}
if (show_trajectory_graph) {
ica_space_df <- t(cds@principal_graph_aux[[reduction_method]]$dp_mst) %>%
as.data.frame() %>% dplyr::select_(prin_graph_dim_1 = x,
prin_graph_dim_2 = y) %>% dplyr::mutate(sample_name = rownames(.),
sample_state = rownames(.))
dp_mst <- cds@principal_graph[[reduction_method]]
edge_df <- dp_mst %>% igraph::as_data_frame() %>% dplyr::select_(source = "from",
target = "to") %>% dplyr::left_join(ica_space_df %>%
dplyr::select_(source = "sample_name", source_prin_graph_dim_1 = "prin_graph_dim_1",
source_prin_graph_dim_2 = "prin_graph_dim_2"),
by = "source") %>% dplyr::left_join(ica_space_df %>%
dplyr::select_(target = "sample_name", target_prin_graph_dim_1 = "prin_graph_dim_1",
target_prin_graph_dim_2 = "prin_graph_dim_2"),
by = "target")
}
markers_exprs <- NULL
expression_legend_label <- NULL
if (!is.null(genes)) {
if (!is.null(dim(genes)) && dim(genes) >= 2) {
markers = unlist(genes[, 1], use.names = FALSE)
}
else {
markers = genes
}
markers_rowData <- as.data.frame(subset(rowData(cds),
gene_short_name %in% markers | row.names(rowData(cds)) %in%
markers))
if (nrow(markers_rowData) == 0) {
stop("None of the provided genes were found in the cds")
}
if (nrow(markers_rowData) >= 1) {
cds_exprs <- SingleCellExperiment::counts(cds)[row.names(markers_rowData),
, drop = FALSE]
cds_exprs <- Matrix::t(Matrix::t(cds_exprs)/monocle3::size_factors(cds))
if (!is.null(dim(genes)) && dim(genes) >= 2) {
genes = as.data.frame(genes)
row.names(genes) = genes[, 1]
genes = genes[row.names(cds_exprs), ]
agg_mat = as.matrix(monocle3::aggregate_gene_expression(cds,
genes, norm_method = norm_method, scale_agg_values = FALSE))
if(dim(agg_mat)[2] == 1) agg_mat <- t(agg_mat)
markers_exprs = agg_mat
markers_exprs <- reshape2::melt(markers_exprs)
colnames(markers_exprs)[1:2] <- c("feature_id",
"cell_id")
if (is.factor(genes[, 2]))
markers_exprs$feature_id = factor(markers_exprs$feature_id,
levels = levels(genes[, 2]))
markers_exprs$feature_label <- markers_exprs$feature_id
norm_method = "size_only"
expression_legend_label = "Expression score"
}
else {
cds_exprs@x = round(10000 * cds_exprs@x)/10000
markers_exprs = matrix(cds_exprs, nrow = nrow(markers_rowData))
colnames(markers_exprs) = colnames(SingleCellExperiment::counts(cds))
row.names(markers_exprs) = row.names(markers_rowData)
markers_exprs <- reshape2::melt(markers_exprs)
colnames(markers_exprs)[1:2] <- c("feature_id",
"cell_id")
markers_exprs <- merge(markers_exprs, markers_rowData,
by.x = "feature_id", by.y = "row.names")
if (is.null(markers_exprs$gene_short_name)) {
markers_exprs$feature_label <- as.character(markers_exprs$feature_id)
}
else {
markers_exprs$feature_label <- as.character(markers_exprs$gene_short_name)
}
markers_exprs$feature_label <- ifelse(is.na(markers_exprs$feature_label) |
!as.character(markers_exprs$feature_label) %in%
markers, as.character(markers_exprs$feature_id),
as.character(markers_exprs$feature_label))
markers_exprs$feature_label <- factor(markers_exprs$feature_label,
levels = markers)
if (norm_method == "size_only")
expression_legend_label = "Expression"
else expression_legend_label = "log10(Expression)"
}
if (scale_to_range) {
markers_exprs = dplyr::group_by(markers_exprs,
feature_label) %>% dplyr::mutate(max_val_for_feature = max(value),
min_val_for_feature = min(value)) %>% dplyr::mutate(value = 100 *
(value - min_val_for_feature)/(max_val_for_feature -
min_val_for_feature))
expression_legend_label = "% Max"
}
}
}
if (label_cell_groups && is.null(color_cells_by) == FALSE) {
if (is.null(data_df$cell_color)) {
if (is.null(genes)) {
message(paste(color_cells_by, "not found in colData(cds), cells will",
"not be colored"))
}
text_df = NULL
label_cell_groups = FALSE
}
else {
if (is.character(data_df$cell_color) || is.factor(data_df$cell_color)) {
if (label_groups_by_cluster && is.null(data_df$cell_group) ==
FALSE) {
text_df = data_df %>% dplyr::group_by(cell_group) %>%
dplyr::mutate(cells_in_cluster = dplyr::n()) %>%
dplyr::group_by(cell_color, add = TRUE) %>%
dplyr::mutate(per = dplyr::n()/cells_in_cluster)
median_coord_df = text_df %>% dplyr::summarize(fraction_of_group = dplyr::n(),
text_x = stats::median(x = data_dim_1),
text_y = stats::median(x = data_dim_2))
text_df = suppressMessages(text_df %>% dplyr::select(per) %>%
dplyr::distinct())
text_df = suppressMessages(dplyr::inner_join(text_df,
median_coord_df))
text_df = text_df %>% dplyr::group_by(cell_group) %>%
dplyr::top_n(labels_per_group, per)
}
else {
text_df = data_df %>% dplyr::group_by(cell_color) %>%
dplyr::mutate(per = 1)
median_coord_df = text_df %>% dplyr::summarize(fraction_of_group = dplyr::n(),
text_x = stats::median(x = data_dim_1),
text_y = stats::median(x = data_dim_2))
text_df = suppressMessages(text_df %>% dplyr::select(per) %>%
dplyr::distinct())
text_df = suppressMessages(dplyr::inner_join(text_df,
median_coord_df))
text_df = text_df %>% dplyr::group_by(cell_color) %>%
dplyr::top_n(labels_per_group, per)
}
text_df$label = as.character(text_df %>% dplyr::pull(cell_color))
}
else {
message(paste("Cells aren't colored in a way that allows them to",
"be grouped."))
text_df = NULL
label_cell_groups = FALSE
}
}
}
if (!is.null(markers_exprs) && nrow(markers_exprs) > 0) {
data_df <- merge(data_df, markers_exprs, by.x = "sample_name",
by.y = "cell_id")
data_df$value <- with(data_df, ifelse(value >= min_expr,
value, NA))
na_sub <- data_df[is.na(data_df$value), ]
if (norm_method == "size_only") {
g <- ggplot(data = data_df, aes(x = data_dim_1,
y = data_dim_2)) + plotting_func(aes(data_dim_1,
data_dim_2), size = I(cell_size), stroke = I(cell_stroke),
color = "grey80", alpha = alpha, data = na_sub) +
plotting_func(aes(color = value), size = I(cell_size),
stroke = I(cell_stroke), na.rm = TRUE) + viridis::scale_color_viridis(option = "plasma",
name = expression_legend_label, na.value = "grey80",
end = 0.8, alpha = alpha) + guides(alpha = FALSE) +
facet_wrap(~feature_label)
}
else {
g <- ggplot(data = data_df, aes(x = data_dim_1,
y = data_dim_2)) + plotting_func(aes(data_dim_1,
data_dim_2), size = I(cell_size), stroke = I(cell_stroke),
color = "grey80", data = na_sub, alpha = alpha) +
plotting_func(aes(color = log10(value + min_expr)),
size = I(cell_size), stroke = I(cell_stroke),
na.rm = TRUE, alpha = alpha) + viridis::scale_color_viridis(option = "plasma",
name = expression_legend_label, na.value = "grey80",
end = 0.8, alpha = alpha) + guides(alpha = FALSE) +
facet_wrap(~feature_label)
}
}
else {
g <- ggplot(data = data_df, aes(x = data_dim_1, y = data_dim_2))
if (color_cells_by %in% c("cluster", "partition")) {
if (is.null(data_df$cell_color)) {
g <- g + geom_point(color = I("gray"), size = I(cell_size),
stroke = I(cell_stroke), na.rm = TRUE, alpha = I(alpha))
message(paste("cluster_cells() has not been called yet, can't",
"color cells by cluster"))
}
else {
g <- g + geom_point(aes(color = cell_color, ...),
size = I(cell_size), stroke = I(cell_stroke),
na.rm = TRUE, alpha = alpha)
}
g <- g + guides(color = guide_legend(title = color_cells_by,
override.aes = list(size = 4)))
}
else if (class(data_df$cell_color) == "numeric") {
g <- g + geom_point(aes(color = cell_color, ...), size = I(cell_size),
stroke = I(cell_stroke), na.rm = TRUE, alpha = alpha)
g <- g + viridis::scale_color_viridis(name = color_cells_by,
option = "C")
}
else {
g <- g + geom_point(aes(color = cell_color, ...), size = I(cell_size),
stroke = I(cell_stroke), na.rm = TRUE, alpha = alpha)
g <- g + guides(color = guide_legend(title = color_cells_by,
override.aes = list(size = 4)))
}
}
if (show_trajectory_graph) {
g <- g + geom_segment(aes_string(x = "source_prin_graph_dim_1",
y = "source_prin_graph_dim_2", xend = "target_prin_graph_dim_1",
yend = "target_prin_graph_dim_2"), size = trajectory_graph_segment_size,
color = I(trajectory_graph_color), linetype = "solid",
na.rm = TRUE, data = edge_df)
if (label_branch_points) {
mst_branch_nodes <- branch_nodes(cds)
branch_point_df <- ica_space_df %>% dplyr::slice(match(names(mst_branch_nodes),
sample_name)) %>% dplyr::mutate(branch_point_idx = seq_len(dplyr::n()))
g <- g + geom_point(aes_string(x = "prin_graph_dim_1",
y = "prin_graph_dim_2"), shape = 21, stroke = I(trajectory_graph_segment_size),
color = "white", fill = "black", size = I(graph_label_size *
1.5), na.rm = TRUE, branch_point_df) + geom_text(aes_string(x = "prin_graph_dim_1",
y = "prin_graph_dim_2", label = "branch_point_idx"),
size = I(graph_label_size), color = "white",
na.rm = TRUE, branch_point_df)
}
if (label_leaves) {
mst_leaf_nodes <- leaf_nodes(cds)
leaf_df <- ica_space_df %>% dplyr::slice(match(names(mst_leaf_nodes),
sample_name)) %>% dplyr::mutate(leaf_idx = seq_len(dplyr::n()))
g <- g + geom_point(aes_string(x = "prin_graph_dim_1",
y = "prin_graph_dim_2"), shape = 21, stroke = I(trajectory_graph_segment_size),
color = "black", fill = "lightgray", size = I(graph_label_size *
1.5), na.rm = TRUE, leaf_df) + geom_text(aes_string(x = "prin_graph_dim_1",
y = "prin_graph_dim_2", label = "leaf_idx"),
size = I(graph_label_size), color = "black",
na.rm = TRUE, leaf_df)
}
if (label_roots) {
mst_root_nodes <- monocle3:::root_nodes(cds)
root_df <- ica_space_df %>% dplyr::slice(match(names(mst_root_nodes),
sample_name)) %>% dplyr::mutate(root_idx = seq_len(dplyr::n()))
g <- g + geom_point(aes_string(x = "prin_graph_dim_1",
y = "prin_graph_dim_2"), shape = 21, stroke = I(trajectory_graph_segment_size),
color = "black", fill = "white", size = I(graph_label_size *
1.5), na.rm = TRUE, root_df) + geom_text(aes_string(x = "prin_graph_dim_1",
y = "prin_graph_dim_2", label = "root_idx"),
size = I(graph_label_size), color = "black",
na.rm = TRUE, root_df)
}
}
if (label_cell_groups) {
g <- g + ggrepel::geom_text_repel(data = text_df, mapping = aes_string(x = "text_x",
y = "text_y", label = "label"), size = I(group_label_size))
if (is.null(markers_exprs))
g <- g + theme(legend.position = "none")
}
g <- g + monocle3:::monocle_theme_opts() + xlab(paste(reduction_method,
x)) + ylab(paste(reduction_method, y)) + theme(legend.key = element_blank()) +
theme(panel.background = element_rect(fill = "white"))
g
}
#' Title
#'
#' @param cds
#' @param pr_deg_ids
#' @param collapse_rows
#' @param collapse_cols
#' @param seu_resolution
#'
#' @return
#' @export
#'
#' @examples
monocle_module_heatmap <- function(cds, pr_deg_ids, seu_resolution, collapse_rows = TRUE, collapse_cols = TRUE, resolution = 10^seq(-6,-1)) {
if (any(grepl("integrated", colnames(cds@colData)))){
default_assay = "integrated"
} else {
default_assay = "RNA"
}
seu_resolution = paste0(default_assay, "_snn_res.", seu_resolution)
cds <- cds[pr_deg_ids,]
gene_module_df <- monocle3::find_gene_modules(cds, resolution=resolution) %>%
dplyr::arrange(module)
if(collapse_rows != TRUE){
gene_module_df <-
dplyr::select(gene_module_df, id) %>%
dplyr::mutate(module = id)
}
cell_group_df <- tibble::tibble(cell=row.names(colData(cds)),
cell_group=colData(cds)[[seu_resolution]])
if (collapse_cols != TRUE){
cell_group_df <- dplyr::mutate(cell_group_df, cell_group = cell)
}
agg_mat <- monocle3::aggregate_gene_expression(cds, gene_module_df, cell_group_df)
if (dim(agg_mat)[2] > 20){
col_order <- sort(monocle3::pseudotime(cds))
agg_mat <- agg_mat[,names(col_order)]
module_heatmap <- iheatmapr::iheatmap(as.matrix(agg_mat), col_labels = TRUE, row_labels = TRUE, cluster_rows = "hclust", cluster_cols = NULL) %>%
# add_col_annotation(data.frame("Groups" = patient_groups)) %>%
add_col_plot(y = col_order,
tracename = "pseudotime",
layout = list(title = "Pseudotime")) %>%
identity()
} else {
module_heatmap <- iheatmapr::iheatmap(as.matrix(agg_mat), col_labels = TRUE, row_labels = TRUE, cluster_rows = "hclust", cluster_cols = "hclust")
}
return(list(module_table = gene_module_df, module_heatmap = module_heatmap, agg_mat = agg_mat))
}
| /R/monocle.R | permissive | mitsingh/seuratTools | R | false | false | 31,326 | r | #' Convert a Seurat Object to a Monocle Cell Data Set
#'
#' @param seu
#'
#' @return
#' @export
#'
#' @examples
convert_seu_to_cds <- function(seu, resolution = 1) {
### Building the necessary parts for a basic cds
# part two, counts sparse matrix
if ("integrated" %in% names(seu@assays)) {
default_assay = "integrated"
} else {
default_assay = "RNA"
}
DefaultAssay(seu) <- default_assay
expression_matrix <- Seurat::GetAssayData(seu, slot = "data", assay = default_assay)
count_matrix <- Seurat::GetAssayData(seu, slot = "counts", assay = "RNA")
count_matrix <- count_matrix[row.names(expression_matrix),]
count_matrix <- count_matrix[,Matrix::colSums(count_matrix) != 0]
# part three, gene annotations
gene_annotation <- data.frame(gene_short_name = rownames(count_matrix),
row.names = rownames(count_matrix))
# part one, cell information
cell_metadata <- seu[[]][colnames(count_matrix),]
seu <- seu[,colnames(count_matrix)]
### Construct the basic cds object
cds_from_seurat <- monocle3::new_cell_data_set(expression_data = count_matrix,
cell_metadata = cell_metadata,
gene_metadata = gene_annotation)
cds_from_seurat <- cds_from_seurat[,colnames(seu)]
# estimate size factors
cds_from_seurat <- cds_from_seurat[, colSums(as.matrix(monocle3::exprs(cds_from_seurat))) != 0]
cds_from_seurat <- monocle3::estimate_size_factors(cds_from_seurat)
### Construct and assign the made up partition
recreate.partition <- c(rep(1, length(cds_from_seurat@colData@rownames)))
names(recreate.partition) <- cds_from_seurat@colData@rownames
recreate.partition <- as.factor(recreate.partition)
cds_from_seurat@clusters@listData[["UMAP"]][["partitions"]] <- recreate.partition
### Could be a space-holder, but essentially fills out louvain parameters
cds_from_seurat@clusters@listData[["UMAP"]][["louvain_res"]] <- "NA"
# cds_from_seurat <- monocle3::preprocess_cds(cds_from_seurat)
# cds_from_seurat <- monocle3::reduce_dimension(cds_from_seurat, "UMAP")
#
# reducedDim(cds_from_seurat, "PCA") <- Embeddings(seu, "pca")
# reducedDim(cds_from_seurat, "UMAP") <- Embeddings(seu, "umap")
cds_from_seurat@reducedDims@listData[["UMAP"]] <- Embeddings(seu, "umap")
cds_from_seurat@reducedDims@listData[["PCA"]] <- Embeddings(seu, "pca")
cds_from_seurat@preprocess_aux$gene_loadings <- Loadings(seu, "pca")
cds_from_seurat <- learn_graph_by_resolution(cds_from_seurat, seu, resolution = resolution)
return(cds_from_seurat)
}
#' Assign Clusters to CDS
#'
#' @param cds
#' @param clusters
#'
#' @return
#' @export
#'
#' @examples
assign_clusters_to_cds <- function(cds, clusters){
clusters <- clusters[colnames(cds)]
cds@clusters@listData[["UMAP"]][["clusters"]] <- clusters
names(cds@clusters@listData[["UMAP"]][["clusters"]]) <- cds@colData@rownames
return(cds)
}
#' Learn Monocle Graph by Resolution
#'
#' @param cds
#' @param resolution
#'
#' @return
#' @export
#'
#' @examples
learn_graph_by_resolution <- function(cds, seu, resolution = 1){
### Assign the cluster info
if (any(grepl("integrated", names(cds@colData)))){
default_assay = "integrated"
} else {
default_assay = "RNA"
}
cds <- monocle3::cluster_cells(cds)
clusters <- seu[[paste0(default_assay, '_snn_res.', resolution)]]
clusters <- purrr::set_names(clusters[[1]], rownames(clusters))
cds <- assign_clusters_to_cds(cds, clusters = clusters)
print("Learning graph, which can take a while depends on the sample")
cds <- monocle3::learn_graph(cds, use_partition = T)
return(cds)
}
#' Plot a Monocle Cell Data Set
#'
#' @param cds
#' @param color_cells_by
#'
#' @return
#' @export
#'
#' @examples
plot_cds <- function(cds, color_cells_by = NULL, genes = NULL){
key <- seq(1, length(colnames(cds)))
cellid <- colnames(cds)
cds[['key']] = key
cds[['cellid']] = cellid
if (any(grepl("integrated", names(cds@colData)))){
default_assay = "integrated"
} else {
default_assay = "RNA"
}
# if (color_cells_by == "louvain_cluster"){
# color_cells_by = paste0(default_assay, "_snn_res.", resolution)
# }
cds_plot <- plot_cells(cds,
genes = genes,
label_cell_groups = FALSE,
label_groups_by_cluster = FALSE,
label_leaves = FALSE,
label_branch_points = FALSE,
color_cells_by = color_cells_by, key = key, cellid = cellid,
cell_size = 0.75)
NULL
cds_plot <-
cds_plot %>%
plotly::ggplotly(height = 400) %>%
plotly_settings() %>%
plotly::toWebGL() %>%
# plotly::partial_bundle() %>%
identity()
}
#' Plot a Monocle Cell Data Set
#'
#' @param cds
#' @param resolution
#' @param color_cells_by
#'
#' @return
#' @export
#'
#' @examples
plot_pseudotime <- function(cds, resolution, color_cells_by = NULL, genes = NULL){
key <- seq(1, length(colnames(cds)))
cellid <- colnames(cds)
cds[['key']] = key
cds[['cellid']] = cellid
if (any(grepl("integrated", colnames(cds@colData)))){
default_assay = "integrated"
} else {
default_assay = "RNA"
}
cds_plot <- monocle3::plot_cells(cds,
show_trajectory_graph = TRUE,
genes = genes,
label_cell_groups = FALSE,
label_groups_by_cluster = FALSE,
label_leaves = FALSE,
label_branch_points = FALSE,
color_cells_by = color_cells_by,
cell_size = 0.75) +
# aes(key = key, cellid = cellid) +
NULL
cds_plot <-
cds_plot %>%
plotly::ggplotly(height = 400) %>%
plotly::ggplotly(height = 400) %>%
plotly_settings() %>%
plotly::toWebGL() %>%
# plotly::partial_bundle() %>%
identity()
# print(cds_plot)
}
#' Plot a Monocle Cell Data Set
#'
#' @param cds
#' @param resolution
#'
#' @return
#' @export
#'
#' @examples
plot_monocle_features <- function(cds, resolution, genes = NULL, ...){
key <- seq(1, length(colnames(cds)))
cellid <- colnames(cds)
cds[['key']] = key
cds[['cellid']] = cellid
if (any(grepl("integrated", colnames(cds@colData)))){
default_assay = "integrated"
} else {
default_assay = "RNA"
}
cds_plot <- plot_cells(cds,
genes = genes,
label_cell_groups = FALSE,
label_groups_by_cluster = FALSE,
label_leaves = FALSE,
label_branch_points = FALSE,
cell_size = 0.75) +
# aes(key = key, cellid = cellid) +
NULL
cds_plot <-
cds_plot %>%
plotly::ggplotly(height = 400) %>%
plotly::ggplotly(height = 400) %>%
plotly_settings() %>%
plotly::toWebGL() %>%
# plotly::partial_bundle() %>%
identity()
# print(cds_plot)
}
#' Title
#'
#' @param cds
#' @param x
#' @param y
#' @param reduction_method
#' @param color_cells_by
#' @param group_cells_by
#' @param genes
#' @param show_trajectory_graph
#' @param trajectory_graph_color
#' @param trajectory_graph_segment_size
#' @param norm_method
#' @param label_cell_groups
#' @param label_groups_by_cluster
#' @param group_label_size
#' @param labels_per_group
#' @param label_branch_points
#' @param label_roots
#' @param label_leaves
#' @param graph_label_size
#' @param cell_size
#' @param cell_stroke
#' @param alpha
#' @param min_expr
#' @param rasterize
#' @param scale_to_range
#' @param ...
#'
#' @return
#' @export
#'
#' @examples
plot_cells <- function(cds, x = 1, y = 2, reduction_method = c("UMAP", "tSNE",
"PCA", "LSI", "Aligned"), color_cells_by = "cluster", group_cells_by = c("cluster",
"partition"), genes = NULL, show_trajectory_graph = TRUE,
trajectory_graph_color = "grey28", trajectory_graph_segment_size = 0.75,
norm_method = c("log", "size_only"), label_cell_groups = TRUE,
label_groups_by_cluster = TRUE, group_label_size = 2, labels_per_group = 1,
label_branch_points = TRUE, label_roots = TRUE, label_leaves = TRUE,
graph_label_size = 2, cell_size = 0.35, cell_stroke = I(cell_size/2),
alpha = 1, min_expr = 0.1, rasterize = FALSE, scale_to_range = FALSE, ...)
{
reduction_method <- match.arg(reduction_method)
assertthat::assert_that(methods::is(cds, "cell_data_set"))
assertthat::assert_that(!is.null(reducedDims(cds)[[reduction_method]]),
msg = paste("No dimensionality reduction for", reduction_method,
"calculated.", "Please run reduce_dimensions with",
"reduction_method =", reduction_method, "before attempting to plot."))
low_dim_coords <- reducedDims(cds)[[reduction_method]]
assertthat::assert_that(ncol(low_dim_coords) >= max(x, y),
msg = paste("x and/or y is too large. x and y must",
"be dimensions in reduced dimension", "space."))
if (!is.null(color_cells_by)) {
assertthat::assert_that(color_cells_by %in% c("cluster",
"partition", "pseudotime") | color_cells_by %in%
names(colData(cds)), msg = paste("color_cells_by must one of",
"'cluster', 'partition', 'pseudotime,", "or a column in the colData table."))
if (color_cells_by == "pseudotime") {
tryCatch({
pseudotime(cds, reduction_method = reduction_method)
}, error = function(x) {
stop(paste("No pseudotime for", reduction_method,
"calculated. Please run order_cells with",
"reduction_method =", reduction_method, "before attempting to color by pseudotime."))
})
}
}
assertthat::assert_that(!is.null(color_cells_by) || !is.null(markers),
msg = paste("Either color_cells_by or markers must",
"be NULL, cannot color by both!"))
norm_method = match.arg(norm_method)
group_cells_by = match.arg(group_cells_by)
assertthat::assert_that(!is.null(color_cells_by) || !is.null(genes),
msg = paste("Either color_cells_by or genes must be",
"NULL, cannot color by both!"))
if (show_trajectory_graph && is.null(monocle3::principal_graph(cds)[[reduction_method]])) {
message("No trajectory to plot. Has learn_graph() been called yet?")
show_trajectory_graph = FALSE
}
gene_short_name <- NA
sample_name <- NA
data_dim_1 <- NA
data_dim_2 <- NA
if (rasterize) {
plotting_func <- ggrastr::geom_point_rast
}
else {
plotting_func <- ggplot2::geom_point
}
S_matrix <- reducedDims(cds)[[reduction_method]]
data_df <- data.frame(S_matrix[, c(x, y)])
colnames(data_df) <- c("data_dim_1", "data_dim_2")
data_df$sample_name <- row.names(data_df)
data_df <- as.data.frame(cbind(data_df, colData(cds)))
if (group_cells_by == "cluster") {
data_df$cell_group <- tryCatch({
clusters(cds, reduction_method = reduction_method)[data_df$sample_name]
}, error = function(e) {
NULL
})
}
else if (group_cells_by == "partition") {
data_df$cell_group <- tryCatch({
partitions(cds, reduction_method = reduction_method)[data_df$sample_name]
}, error = function(e) {
NULL
})
}
else {
stop("Error: unrecognized way of grouping cells.")
}
if (color_cells_by == "cluster") {
data_df$cell_color <- tryCatch({
clusters(cds, reduction_method = reduction_method)[data_df$sample_name]
}, error = function(e) {
NULL
})
}
else if (color_cells_by == "partition") {
data_df$cell_color <- tryCatch({
partitions(cds, reduction_method = reduction_method)[data_df$sample_name]
}, error = function(e) {
NULL
})
}
else if (color_cells_by == "pseudotime") {
data_df$cell_color <- tryCatch({
pseudotime(cds, reduction_method = reduction_method)[data_df$sample_name]
}, error = function(e) {
NULL
})
}
else {
data_df$cell_color <- colData(cds)[data_df$sample_name,
color_cells_by]
}
if (show_trajectory_graph) {
ica_space_df <- t(cds@principal_graph_aux[[reduction_method]]$dp_mst) %>%
as.data.frame() %>% dplyr::select_(prin_graph_dim_1 = x,
prin_graph_dim_2 = y) %>% dplyr::mutate(sample_name = rownames(.),
sample_state = rownames(.))
dp_mst <- cds@principal_graph[[reduction_method]]
edge_df <- dp_mst %>% igraph::as_data_frame() %>% dplyr::select_(source = "from",
target = "to") %>% dplyr::left_join(ica_space_df %>%
dplyr::select_(source = "sample_name", source_prin_graph_dim_1 = "prin_graph_dim_1",
source_prin_graph_dim_2 = "prin_graph_dim_2"),
by = "source") %>% dplyr::left_join(ica_space_df %>%
dplyr::select_(target = "sample_name", target_prin_graph_dim_1 = "prin_graph_dim_1",
target_prin_graph_dim_2 = "prin_graph_dim_2"),
by = "target")
}
markers_exprs <- NULL
expression_legend_label <- NULL
if (!is.null(genes)) {
if (!is.null(dim(genes)) && dim(genes) >= 2) {
markers = unlist(genes[, 1], use.names = FALSE)
}
else {
markers = genes
}
markers_rowData <- as.data.frame(subset(rowData(cds),
gene_short_name %in% markers | row.names(rowData(cds)) %in%
markers))
if (nrow(markers_rowData) == 0) {
stop("None of the provided genes were found in the cds")
}
if (nrow(markers_rowData) >= 1) {
cds_exprs <- SingleCellExperiment::counts(cds)[row.names(markers_rowData),
, drop = FALSE]
cds_exprs <- Matrix::t(Matrix::t(cds_exprs)/monocle3::size_factors(cds))
if (!is.null(dim(genes)) && dim(genes) >= 2) {
genes = as.data.frame(genes)
row.names(genes) = genes[, 1]
genes = genes[row.names(cds_exprs), ]
agg_mat = as.matrix(monocle3::aggregate_gene_expression(cds,
genes, norm_method = norm_method, scale_agg_values = FALSE))
if(dim(agg_mat)[2] == 1) agg_mat <- t(agg_mat)
markers_exprs = agg_mat
markers_exprs <- reshape2::melt(markers_exprs)
colnames(markers_exprs)[1:2] <- c("feature_id",
"cell_id")
if (is.factor(genes[, 2]))
markers_exprs$feature_id = factor(markers_exprs$feature_id,
levels = levels(genes[, 2]))
markers_exprs$feature_label <- markers_exprs$feature_id
norm_method = "size_only"
expression_legend_label = "Expression score"
}
else {
cds_exprs@x = round(10000 * cds_exprs@x)/10000
markers_exprs = matrix(cds_exprs, nrow = nrow(markers_rowData))
colnames(markers_exprs) = colnames(SingleCellExperiment::counts(cds))
row.names(markers_exprs) = row.names(markers_rowData)
markers_exprs <- reshape2::melt(markers_exprs)
colnames(markers_exprs)[1:2] <- c("feature_id",
"cell_id")
markers_exprs <- merge(markers_exprs, markers_rowData,
by.x = "feature_id", by.y = "row.names")
if (is.null(markers_exprs$gene_short_name)) {
markers_exprs$feature_label <- as.character(markers_exprs$feature_id)
}
else {
markers_exprs$feature_label <- as.character(markers_exprs$gene_short_name)
}
markers_exprs$feature_label <- ifelse(is.na(markers_exprs$feature_label) |
!as.character(markers_exprs$feature_label) %in%
markers, as.character(markers_exprs$feature_id),
as.character(markers_exprs$feature_label))
markers_exprs$feature_label <- factor(markers_exprs$feature_label,
levels = markers)
if (norm_method == "size_only")
expression_legend_label = "Expression"
else expression_legend_label = "log10(Expression)"
}
if (scale_to_range) {
markers_exprs = dplyr::group_by(markers_exprs,
feature_label) %>% dplyr::mutate(max_val_for_feature = max(value),
min_val_for_feature = min(value)) %>% dplyr::mutate(value = 100 *
(value - min_val_for_feature)/(max_val_for_feature -
min_val_for_feature))
expression_legend_label = "% Max"
}
}
}
if (label_cell_groups && is.null(color_cells_by) == FALSE) {
if (is.null(data_df$cell_color)) {
if (is.null(genes)) {
message(paste(color_cells_by, "not found in colData(cds), cells will",
"not be colored"))
}
text_df = NULL
label_cell_groups = FALSE
}
else {
if (is.character(data_df$cell_color) || is.factor(data_df$cell_color)) {
if (label_groups_by_cluster && is.null(data_df$cell_group) ==
FALSE) {
text_df = data_df %>% dplyr::group_by(cell_group) %>%
dplyr::mutate(cells_in_cluster = dplyr::n()) %>%
dplyr::group_by(cell_color, add = TRUE) %>%
dplyr::mutate(per = dplyr::n()/cells_in_cluster)
median_coord_df = text_df %>% dplyr::summarize(fraction_of_group = dplyr::n(),
text_x = stats::median(x = data_dim_1),
text_y = stats::median(x = data_dim_2))
text_df = suppressMessages(text_df %>% dplyr::select(per) %>%
dplyr::distinct())
text_df = suppressMessages(dplyr::inner_join(text_df,
median_coord_df))
text_df = text_df %>% dplyr::group_by(cell_group) %>%
dplyr::top_n(labels_per_group, per)
}
else {
text_df = data_df %>% dplyr::group_by(cell_color) %>%
dplyr::mutate(per = 1)
median_coord_df = text_df %>% dplyr::summarize(fraction_of_group = dplyr::n(),
text_x = stats::median(x = data_dim_1),
text_y = stats::median(x = data_dim_2))
text_df = suppressMessages(text_df %>% dplyr::select(per) %>%
dplyr::distinct())
text_df = suppressMessages(dplyr::inner_join(text_df,
median_coord_df))
text_df = text_df %>% dplyr::group_by(cell_color) %>%
dplyr::top_n(labels_per_group, per)
}
text_df$label = as.character(text_df %>% dplyr::pull(cell_color))
}
else {
message(paste("Cells aren't colored in a way that allows them to",
"be grouped."))
text_df = NULL
label_cell_groups = FALSE
}
}
}
if (!is.null(markers_exprs) && nrow(markers_exprs) > 0) {
data_df <- merge(data_df, markers_exprs, by.x = "sample_name",
by.y = "cell_id")
data_df$value <- with(data_df, ifelse(value >= min_expr,
value, NA))
na_sub <- data_df[is.na(data_df$value), ]
if (norm_method == "size_only") {
g <- ggplot(data = data_df, aes(x = data_dim_1,
y = data_dim_2)) + plotting_func(aes(data_dim_1,
data_dim_2), size = I(cell_size), stroke = I(cell_stroke),
color = "grey80", alpha = alpha, data = na_sub) +
plotting_func(aes(color = value), size = I(cell_size),
stroke = I(cell_stroke), na.rm = TRUE) + viridis::scale_color_viridis(option = "plasma",
name = expression_legend_label, na.value = "grey80",
end = 0.8, alpha = alpha) + guides(alpha = FALSE) +
facet_wrap(~feature_label)
}
else {
g <- ggplot(data = data_df, aes(x = data_dim_1,
y = data_dim_2)) + plotting_func(aes(data_dim_1,
data_dim_2), size = I(cell_size), stroke = I(cell_stroke),
color = "grey80", data = na_sub, alpha = alpha) +
plotting_func(aes(color = log10(value + min_expr)),
size = I(cell_size), stroke = I(cell_stroke),
na.rm = TRUE, alpha = alpha) + viridis::scale_color_viridis(option = "plasma",
name = expression_legend_label, na.value = "grey80",
end = 0.8, alpha = alpha) + guides(alpha = FALSE) +
facet_wrap(~feature_label)
}
}
else {
g <- ggplot(data = data_df, aes(x = data_dim_1, y = data_dim_2))
if (color_cells_by %in% c("cluster", "partition")) {
if (is.null(data_df$cell_color)) {
g <- g + geom_point(color = I("gray"), size = I(cell_size),
stroke = I(cell_stroke), na.rm = TRUE, alpha = I(alpha))
message(paste("cluster_cells() has not been called yet, can't",
"color cells by cluster"))
}
else {
g <- g + geom_point(aes(color = cell_color, ...),
size = I(cell_size), stroke = I(cell_stroke),
na.rm = TRUE, alpha = alpha)
}
g <- g + guides(color = guide_legend(title = color_cells_by,
override.aes = list(size = 4)))
}
else if (class(data_df$cell_color) == "numeric") {
g <- g + geom_point(aes(color = cell_color, ...), size = I(cell_size),
stroke = I(cell_stroke), na.rm = TRUE, alpha = alpha)
g <- g + viridis::scale_color_viridis(name = color_cells_by,
option = "C")
}
else {
g <- g + geom_point(aes(color = cell_color, ...), size = I(cell_size),
stroke = I(cell_stroke), na.rm = TRUE, alpha = alpha)
g <- g + guides(color = guide_legend(title = color_cells_by,
override.aes = list(size = 4)))
}
}
if (show_trajectory_graph) {
g <- g + geom_segment(aes_string(x = "source_prin_graph_dim_1",
y = "source_prin_graph_dim_2", xend = "target_prin_graph_dim_1",
yend = "target_prin_graph_dim_2"), size = trajectory_graph_segment_size,
color = I(trajectory_graph_color), linetype = "solid",
na.rm = TRUE, data = edge_df)
if (label_branch_points) {
mst_branch_nodes <- branch_nodes(cds)
branch_point_df <- ica_space_df %>% dplyr::slice(match(names(mst_branch_nodes),
sample_name)) %>% dplyr::mutate(branch_point_idx = seq_len(dplyr::n()))
g <- g + geom_point(aes_string(x = "prin_graph_dim_1",
y = "prin_graph_dim_2"), shape = 21, stroke = I(trajectory_graph_segment_size),
color = "white", fill = "black", size = I(graph_label_size *
1.5), na.rm = TRUE, branch_point_df) + geom_text(aes_string(x = "prin_graph_dim_1",
y = "prin_graph_dim_2", label = "branch_point_idx"),
size = I(graph_label_size), color = "white",
na.rm = TRUE, branch_point_df)
}
if (label_leaves) {
mst_leaf_nodes <- leaf_nodes(cds)
leaf_df <- ica_space_df %>% dplyr::slice(match(names(mst_leaf_nodes),
sample_name)) %>% dplyr::mutate(leaf_idx = seq_len(dplyr::n()))
g <- g + geom_point(aes_string(x = "prin_graph_dim_1",
y = "prin_graph_dim_2"), shape = 21, stroke = I(trajectory_graph_segment_size),
color = "black", fill = "lightgray", size = I(graph_label_size *
1.5), na.rm = TRUE, leaf_df) + geom_text(aes_string(x = "prin_graph_dim_1",
y = "prin_graph_dim_2", label = "leaf_idx"),
size = I(graph_label_size), color = "black",
na.rm = TRUE, leaf_df)
}
if (label_roots) {
mst_root_nodes <- monocle3:::root_nodes(cds)
root_df <- ica_space_df %>% dplyr::slice(match(names(mst_root_nodes),
sample_name)) %>% dplyr::mutate(root_idx = seq_len(dplyr::n()))
g <- g + geom_point(aes_string(x = "prin_graph_dim_1",
y = "prin_graph_dim_2"), shape = 21, stroke = I(trajectory_graph_segment_size),
color = "black", fill = "white", size = I(graph_label_size *
1.5), na.rm = TRUE, root_df) + geom_text(aes_string(x = "prin_graph_dim_1",
y = "prin_graph_dim_2", label = "root_idx"),
size = I(graph_label_size), color = "black",
na.rm = TRUE, root_df)
}
}
if (label_cell_groups) {
g <- g + ggrepel::geom_text_repel(data = text_df, mapping = aes_string(x = "text_x",
y = "text_y", label = "label"), size = I(group_label_size))
if (is.null(markers_exprs))
g <- g + theme(legend.position = "none")
}
g <- g + monocle3:::monocle_theme_opts() + xlab(paste(reduction_method,
x)) + ylab(paste(reduction_method, y)) + theme(legend.key = element_blank()) +
theme(panel.background = element_rect(fill = "white"))
g
}
#' Title
#'
#' @param cds
#' @param pr_deg_ids
#' @param collapse_rows
#' @param collapse_cols
#' @param seu_resolution
#'
#' @return
#' @export
#'
#' @examples
monocle_module_heatmap <- function(cds, pr_deg_ids, seu_resolution, collapse_rows = TRUE, collapse_cols = TRUE, resolution = 10^seq(-6,-1)) {
if (any(grepl("integrated", colnames(cds@colData)))){
default_assay = "integrated"
} else {
default_assay = "RNA"
}
seu_resolution = paste0(default_assay, "_snn_res.", seu_resolution)
cds <- cds[pr_deg_ids,]
gene_module_df <- monocle3::find_gene_modules(cds, resolution=resolution) %>%
dplyr::arrange(module)
if(collapse_rows != TRUE){
gene_module_df <-
dplyr::select(gene_module_df, id) %>%
dplyr::mutate(module = id)
}
cell_group_df <- tibble::tibble(cell=row.names(colData(cds)),
cell_group=colData(cds)[[seu_resolution]])
if (collapse_cols != TRUE){
cell_group_df <- dplyr::mutate(cell_group_df, cell_group = cell)
}
agg_mat <- monocle3::aggregate_gene_expression(cds, gene_module_df, cell_group_df)
if (dim(agg_mat)[2] > 20){
col_order <- sort(monocle3::pseudotime(cds))
agg_mat <- agg_mat[,names(col_order)]
module_heatmap <- iheatmapr::iheatmap(as.matrix(agg_mat), col_labels = TRUE, row_labels = TRUE, cluster_rows = "hclust", cluster_cols = NULL) %>%
# add_col_annotation(data.frame("Groups" = patient_groups)) %>%
add_col_plot(y = col_order,
tracename = "pseudotime",
layout = list(title = "Pseudotime")) %>%
identity()
} else {
module_heatmap <- iheatmapr::iheatmap(as.matrix(agg_mat), col_labels = TRUE, row_labels = TRUE, cluster_rows = "hclust", cluster_cols = "hclust")
}
return(list(module_table = gene_module_df, module_heatmap = module_heatmap, agg_mat = agg_mat))
}
|
########################################
#
# Week 4: "Real" NN using iris data
#
########################################
library(nnet)
#
# Look at the iris data
#
str(iris)
#
# Play with colors a bit and make a plot
#
col <- c("orange","blue","red")
col <- adjustcolor(col,alpha=.4) # cool function
col <- col[iris$Species]
pairs( iris[ , -5], col = col, pch =19, cex=1.2)
#
# Look at the nnet help page
#
help(nnet)
#
# Now train a NN
# nnet "only" supports one hidden layer
# Species is a factor, so our nnet should give back
# a vector of predicted Species probabilities for each obs.
#
set.seed(94) # set random number seed, for reproducibility
# The net initializes the weights randomly, then tries to optimize,
# but beware of local minima
iris.94 <- nnet(Species ~ ., data = iris, size = 3)
iris.94
#
# How well did we do?
#
table(iris$Species, predict(iris.94, type = "class"))
summary(iris.94) # note the spread of the weights
#
# Let's try to start where iris.94 left out and "jitter" our way out of the flat spot
#
iris.x <- nnet(Species~., data=iris, size=3,
Wts=jitter(iris.94$wts))# Nope!
#
# Try different starting values
#
set.seed(80)
iris.80 <- nnet(Species ~ ., data = iris, size = 3)
table (iris$Species, predict (iris.80,type = "class")) # much better
#
# Standardize the variables and regularize (with "decay" parameter)
# If we scale, we need to save the mean and SD values for use with
# a test set.
#
set.seed(94)
iris.scaled <- scale (iris[,1:4], center = T, scale = TRUE)
iris.new <- data.frame(iris.scaled, Species=iris[,5])
iris.W.94 <- nnet(Species ~., decay = .01, data = iris.new, size = 3)
summary (iris.W.94) # less spread-out weights
attributes (iris.scaled) # keep track of mean and SD
# We would scale new data like this:
# newdata <- scale (newdata,
# mean = attributes (iris.scaled)$'scaled:center',
# scale = attributes (iris.scaled)$'scaled:scale')
#
# Let's try something a little bigger. Let's use the test set
# from optdigits to train, and the training set as a test set
# (for reasons of size and time).
#
opt.test <- read.csv ("optdigits.test.txt", header=F) # remember this?
opt.test$V65 <- factor (opt.test$V65)
set.seed (1230)
opt.nnet <- nnet (V65 ~ ., data = opt.test, size = 10, maxit = 400,
decay = 0.01)
# Let's stop here. How did we do?
(train.tbl <- table (opt.test$V65,
predict (opt.nnet, type = "class"))) # training set
1 - sum (diag(train.tbl)) / sum (train.tbl) # 1%-ish error rate
# How do we do on new data?
opt.train <- read.csv ("optdigits.train.txt", header=F) #
opt.train$V65 <- factor (opt.train$V65)
(test.tbl <- table (opt.train$V65,
predict (opt.nnet, opt.train, type = "class"))) # test set
1 - sum (diag(test.tbl)) / sum (test.tbl) # 10%-ish error rate
#
# Can we do better?
#
opt.nnet <- nnet (V65 ~ ., data = opt.test, size = 10, maxit = 400,
Wts = opt.nnet$wts, decay = 0.01) # keep fitting
(train.tbl <- table (opt.test$V65,
predict (opt.nnet, type = "class"))) # training set
1 - sum (diag(train.tbl)) / sum (train.tbl) # 0% error rate (?!)
(test.tbl <- table (opt.train$V65,
predict (opt.nnet, opt.train, type = "class"))) # test set
1 - sum (diag(test.tbl)) / sum (test.tbl) # 8% error rate
| /Week 4/R/Nnet2.R | no_license | mwickers1/Statistical-Machine-Learning | R | false | false | 3,311 | r | ########################################
#
# Week 4: "Real" NN using iris data
#
########################################
library(nnet)
#
# Look at the iris data
#
str(iris)
#
# Play with colors a bit and make a plot
#
col <- c("orange","blue","red")
col <- adjustcolor(col,alpha=.4) # cool function
col <- col[iris$Species]
pairs( iris[ , -5], col = col, pch =19, cex=1.2)
#
# Look at the nnet help page
#
help(nnet)
#
# Now train a NN
# nnet "only" supports one hidden layer
# Species is a factor, so our nnet should give back
# a vector of predicted Species probabilities for each obs.
#
set.seed(94) # set random number seed, for reproducibility
# The net initializes the weights randomly, then tries to optimize,
# but beware of local minima
iris.94 <- nnet(Species ~ ., data = iris, size = 3)
iris.94
#
# How well did we do?
#
table(iris$Species, predict(iris.94, type = "class"))
summary(iris.94) # note the spread of the weights
#
# Let's try to start where iris.94 left out and "jitter" our way out of the flat spot
#
iris.x <- nnet(Species~., data=iris, size=3,
Wts=jitter(iris.94$wts))# Nope!
#
# Try different starting values
#
set.seed(80)
iris.80 <- nnet(Species ~ ., data = iris, size = 3)
table (iris$Species, predict (iris.80,type = "class")) # much better
#
# Standardize the variables and regularize (with "decay" parameter)
# If we scale, we need to save the mean and SD values for use with
# a test set.
#
set.seed(94)
iris.scaled <- scale (iris[,1:4], center = T, scale = TRUE)
iris.new <- data.frame(iris.scaled, Species=iris[,5])
iris.W.94 <- nnet(Species ~., decay = .01, data = iris.new, size = 3)
summary (iris.W.94) # less spread-out weights
attributes (iris.scaled) # keep track of mean and SD
# We would scale new data like this:
# newdata <- scale (newdata,
# mean = attributes (iris.scaled)$'scaled:center',
# scale = attributes (iris.scaled)$'scaled:scale')
#
# Let's try something a little bigger. Let's use the test set
# from optdigits to train, and the training set as a test set
# (for reasons of size and time).
#
opt.test <- read.csv ("optdigits.test.txt", header=F) # remember this?
opt.test$V65 <- factor (opt.test$V65)
set.seed (1230)
opt.nnet <- nnet (V65 ~ ., data = opt.test, size = 10, maxit = 400,
decay = 0.01)
# Let's stop here. How did we do?
(train.tbl <- table (opt.test$V65,
predict (opt.nnet, type = "class"))) # training set
1 - sum (diag(train.tbl)) / sum (train.tbl) # 1%-ish error rate
# How do we do on new data?
opt.train <- read.csv ("optdigits.train.txt", header=F) #
opt.train$V65 <- factor (opt.train$V65)
(test.tbl <- table (opt.train$V65,
predict (opt.nnet, opt.train, type = "class"))) # test set
1 - sum (diag(test.tbl)) / sum (test.tbl) # 10%-ish error rate
#
# Can we do better?
#
opt.nnet <- nnet (V65 ~ ., data = opt.test, size = 10, maxit = 400,
Wts = opt.nnet$wts, decay = 0.01) # keep fitting
(train.tbl <- table (opt.test$V65,
predict (opt.nnet, type = "class"))) # training set
1 - sum (diag(train.tbl)) / sum (train.tbl) # 0% error rate (?!)
(test.tbl <- table (opt.train$V65,
predict (opt.nnet, opt.train, type = "class"))) # test set
1 - sum (diag(test.tbl)) / sum (test.tbl) # 8% error rate
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536106953e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615783602-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 329 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536106953e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
#' @title Get Quota
#' @description Get Quota
#' @template dots
#' @examples
#' \dontrun{
#' get_quota()
#' }
#' @export
get_quota <- function(...) {
query <- list(Action = "GetSendQuota")
r <- sesPOST(query = query, ...)
return(r)
}
#' @title Get Statistics
#' @description Get Statistics
#' @template dots
#' @examples
#' \dontrun{
#' get_statistics()
#' }
#' @export
get_statistics <- function(...) {
query <- list(Action = "GetSendStatistics")
r <- sesPOST(query = query, ...)
return(r)
}
| /R/statistics.R | no_license | metanoid/aws.ses | R | false | false | 523 | r | #' @title Get Quota
#' @description Get Quota
#' @template dots
#' @examples
#' \dontrun{
#' get_quota()
#' }
#' @export
get_quota <- function(...) {
query <- list(Action = "GetSendQuota")
r <- sesPOST(query = query, ...)
return(r)
}
#' @title Get Statistics
#' @description Get Statistics
#' @template dots
#' @examples
#' \dontrun{
#' get_statistics()
#' }
#' @export
get_statistics <- function(...) {
query <- list(Action = "GetSendStatistics")
r <- sesPOST(query = query, ...)
return(r)
}
|
library(shiny)
shinyUI(fluidPage(
titlePanel("PAM - Prediction Analysis of Microarrays"),
fluidRow(
column(3,
wellPanel(
fileInput(inputId = "iFile", label = "", accept="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"),
selectInput("analysisType", "", c("Classification" = "Classification", "Survival" = "Survival", "Regression" = "Regression")),
conditionalPanel(condition = "input.analysisType == 'Classification'",
numericInput("classLabel", label = "Class labels row", min = 1, max = 5, value = NULL, step = 1)
),
conditionalPanel(condition = "input.analysisType == 'Survival'",
numericInput("survivalTimeLabel", label = "Survival Times row", min = 1, max = 5, value = NULL, step = 1),
numericInput("censoringStatusLabel", label = "Censoring Status row", min = 1, max = 5, value = NULL, step = 1)
),
conditionalPanel(condition = "input.analysisType == 'Regression'",
numericInput("outcomeValueLabel", label = "Outcome Values row", min = 1, max = 5, value = NULL, step = 1)
),
numericInput("sampleLabel", label = "Sample labels row", min = 1, max = 5, value = NULL, step = 1),
numericInput("batchLabel", label = "Batch labels row", min = 1, max = 5, value = NULL, step = 1),
numericInput("expressionStart", label = "Expression data row", min = 1, max = 5, value = NULL, step = 1),
textInput("dir", "Paste the filepath to save the output", value = getwd()),
textInput("fname", "Type the file name you would like to save as", value = "result"),
conditionalPanel(condition = "input.analysisType == 'Classification'",
actionButton("saveButton", "Save")
),
conditionalPanel(condition = "input.analysisType == 'Survival' || input.analysisType == 'Regression'",
actionButton("saveButton2", "Save")
)
),
wellPanel(
conditionalPanel(condition = "input.analysisType == 'Classification'",
uiOutput(outputId = "threshold"),
numericInput("s0percentile", label = "Std. Dev. Factor S0 percentile (0-100)", min = 0, max = 100, value = 50, step = 0.0001),
radioButtons("sign.contrast", "Contrast Sign", c("Both" = "both", "Positive" = "positive", "Negative" = "negative")),
radioButtons("classPrior", "Class Prior", c("Sample Prior" = "sampleprior", "Uniform Prior" = "uniformprior", "Custom Prior" = "customprior")),
conditionalPanel(condition = "input.classPrior == 'customprior'",
uiOutput(outputId = "customvalue")
)
),
conditionalPanel(condition = "input.analysisType == 'Survival' || input.analysisType == 'Regression'",
uiOutput(outputId = "threshold2")
),
conditionalPanel(condition = "input.analysisType == 'Survival' || input.analysisType == 'Regression'",
numericInput("princomp", label = "Princ Comp number for gene scores", min = 1, max = 3, value = NULL, step = 1)
),
conditionalPanel(condition = "input.analysisType == 'Survival'",
numericInput("shrinkage", label ="Shrinkage", min = 0, value = NULL, step = 0.000001)
),
numericInput("randomSeed", label = "Random Number Generator Seed", min = 0, max = 1000000, value = 420473, step = 1),
checkboxInput("cuberoot", label = "Transform by cube root?", value = FALSE),
checkboxInput("center", label = "Center columns?", value = FALSE),
checkboxInput("scale", label = "Scale columns?", value = FALSE),
numericInput("numberOfNeighbors", "K-Nearest Neighbors Imputer: Number of Neighbors", value= 10, step=1)
)
),
column(9,
conditionalPanel(condition = "input.analysisType == 'Classification'",
tabsetPanel(id = "PAM",
tabPanel("Data", h3(textOutput("originalDataText")), dataTableOutput("dat"), h3(textOutput("imputedDataText")), dataTableOutput("imputedX"), h3(textOutput("testDataText")), dataTableOutput("testdat") ),
tabPanel("Training", h3(textOutput("trainErrorPlotText")), plotOutput("plotTrainError"), h3(textOutput("confusionTrainText")), tableOutput("pamrConfusionTrain"), h3(textOutput("listgenesText")), tableOutput("listgenes"), h3(textOutput("centroidText")),plotOutput("plotcen"), h3(textOutput("fdrText")), tableOutput("fdr"), h3(textOutput("fdrPlotText")), plotOutput("fdrPlot")),
tabPanel("Cross Validation", h3(textOutput("overallText")), plotOutput("plotcv"), h3(textOutput("individualText")), plotOutput("plotcv2"), h3(textOutput("plotcvText")), plotOutput("plotcvprob"), h3(textOutput("cvConfusionMatrix")), tableOutput("pamrConfusion")),
tabPanel("Test Set Prediction", fileInput(inputId = "testFile", label = "Test Set", accept="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"), h3(textOutput("testErrorPlotText")), plotOutput("plotTestError"), h3(textOutput("predictPlotText")), plotOutput("plotpredprob"), h3(textOutput("predictTableText")), tableOutput("predict") ),
tabPanel("Settings", h3(textOutput("settingsText")), tableOutput("settings"), h3(textOutput("settingsPriorText")), tableOutput("settingsPrior"))
)
),
conditionalPanel(condition = "input.analysisType == 'Survival' || input.analysisType == 'Regression'",
tabsetPanel(id = "PAMSurv",
tabPanel("Data", h3(textOutput("originalXText")), dataTableOutput("survdata"), h3(textOutput("imputedXSurvText")), dataTableOutput("imputedXSurv"), h3(textOutput("testSurvText")), dataTableOutput("survTestData")),
tabPanel("Training", fileInput(inputId = "competingPredictorFile", label = "Competing Predictors", accept="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"), h3(textOutput("survTrainErrorText")), plotOutput("plotLrtest"), h3(textOutput("listSurvGenesText")), tableOutput("listfeatures"), h3(textOutput("responsePredictionText")), plotOutput("survPredictionPlot")),
tabPanel("Cross Validation", h3(textOutput("plotCvSurvText")), plotOutput("plotcvsurv"), h3(textOutput("plotredLrtestText")), plotOutput("plotredLrtest")),
tabPanel("Test Set Prediction", fileInput(inputId = "testFileSurv", label = "Test Set", accept="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"), fileInput(inputId = "competingPredictorFitFile", label = "Fit with Competing Predictors", accept="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"), h3(textOutput("lrtestObjTestText")), plotOutput("plotLrtestTest"), h3(textOutput("predictionInfoText")), tableOutput("predictionscore"), tableOutput("coeftable"), tableOutput("teststatTable"), h3(textOutput("responsePredictionPlotText")), plotOutput("responsePredictionPlot"), h3(textOutput("rainbowPlotText")), plotOutput("rainbowPlot"))
)
)
)
)
)) | /ui.R | no_license | MikeJSeo/PAM | R | false | false | 6,853 | r | library(shiny)
shinyUI(fluidPage(
titlePanel("PAM - Prediction Analysis of Microarrays"),
fluidRow(
column(3,
wellPanel(
fileInput(inputId = "iFile", label = "", accept="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"),
selectInput("analysisType", "", c("Classification" = "Classification", "Survival" = "Survival", "Regression" = "Regression")),
conditionalPanel(condition = "input.analysisType == 'Classification'",
numericInput("classLabel", label = "Class labels row", min = 1, max = 5, value = NULL, step = 1)
),
conditionalPanel(condition = "input.analysisType == 'Survival'",
numericInput("survivalTimeLabel", label = "Survival Times row", min = 1, max = 5, value = NULL, step = 1),
numericInput("censoringStatusLabel", label = "Censoring Status row", min = 1, max = 5, value = NULL, step = 1)
),
conditionalPanel(condition = "input.analysisType == 'Regression'",
numericInput("outcomeValueLabel", label = "Outcome Values row", min = 1, max = 5, value = NULL, step = 1)
),
numericInput("sampleLabel", label = "Sample labels row", min = 1, max = 5, value = NULL, step = 1),
numericInput("batchLabel", label = "Batch labels row", min = 1, max = 5, value = NULL, step = 1),
numericInput("expressionStart", label = "Expression data row", min = 1, max = 5, value = NULL, step = 1),
textInput("dir", "Paste the filepath to save the output", value = getwd()),
textInput("fname", "Type the file name you would like to save as", value = "result"),
conditionalPanel(condition = "input.analysisType == 'Classification'",
actionButton("saveButton", "Save")
),
conditionalPanel(condition = "input.analysisType == 'Survival' || input.analysisType == 'Regression'",
actionButton("saveButton2", "Save")
)
),
wellPanel(
conditionalPanel(condition = "input.analysisType == 'Classification'",
uiOutput(outputId = "threshold"),
numericInput("s0percentile", label = "Std. Dev. Factor S0 percentile (0-100)", min = 0, max = 100, value = 50, step = 0.0001),
radioButtons("sign.contrast", "Contrast Sign", c("Both" = "both", "Positive" = "positive", "Negative" = "negative")),
radioButtons("classPrior", "Class Prior", c("Sample Prior" = "sampleprior", "Uniform Prior" = "uniformprior", "Custom Prior" = "customprior")),
conditionalPanel(condition = "input.classPrior == 'customprior'",
uiOutput(outputId = "customvalue")
)
),
conditionalPanel(condition = "input.analysisType == 'Survival' || input.analysisType == 'Regression'",
uiOutput(outputId = "threshold2")
),
conditionalPanel(condition = "input.analysisType == 'Survival' || input.analysisType == 'Regression'",
numericInput("princomp", label = "Princ Comp number for gene scores", min = 1, max = 3, value = NULL, step = 1)
),
conditionalPanel(condition = "input.analysisType == 'Survival'",
numericInput("shrinkage", label ="Shrinkage", min = 0, value = NULL, step = 0.000001)
),
numericInput("randomSeed", label = "Random Number Generator Seed", min = 0, max = 1000000, value = 420473, step = 1),
checkboxInput("cuberoot", label = "Transform by cube root?", value = FALSE),
checkboxInput("center", label = "Center columns?", value = FALSE),
checkboxInput("scale", label = "Scale columns?", value = FALSE),
numericInput("numberOfNeighbors", "K-Nearest Neighbors Imputer: Number of Neighbors", value= 10, step=1)
)
),
column(9,
conditionalPanel(condition = "input.analysisType == 'Classification'",
tabsetPanel(id = "PAM",
tabPanel("Data", h3(textOutput("originalDataText")), dataTableOutput("dat"), h3(textOutput("imputedDataText")), dataTableOutput("imputedX"), h3(textOutput("testDataText")), dataTableOutput("testdat") ),
tabPanel("Training", h3(textOutput("trainErrorPlotText")), plotOutput("plotTrainError"), h3(textOutput("confusionTrainText")), tableOutput("pamrConfusionTrain"), h3(textOutput("listgenesText")), tableOutput("listgenes"), h3(textOutput("centroidText")),plotOutput("plotcen"), h3(textOutput("fdrText")), tableOutput("fdr"), h3(textOutput("fdrPlotText")), plotOutput("fdrPlot")),
tabPanel("Cross Validation", h3(textOutput("overallText")), plotOutput("plotcv"), h3(textOutput("individualText")), plotOutput("plotcv2"), h3(textOutput("plotcvText")), plotOutput("plotcvprob"), h3(textOutput("cvConfusionMatrix")), tableOutput("pamrConfusion")),
tabPanel("Test Set Prediction", fileInput(inputId = "testFile", label = "Test Set", accept="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"), h3(textOutput("testErrorPlotText")), plotOutput("plotTestError"), h3(textOutput("predictPlotText")), plotOutput("plotpredprob"), h3(textOutput("predictTableText")), tableOutput("predict") ),
tabPanel("Settings", h3(textOutput("settingsText")), tableOutput("settings"), h3(textOutput("settingsPriorText")), tableOutput("settingsPrior"))
)
),
conditionalPanel(condition = "input.analysisType == 'Survival' || input.analysisType == 'Regression'",
tabsetPanel(id = "PAMSurv",
tabPanel("Data", h3(textOutput("originalXText")), dataTableOutput("survdata"), h3(textOutput("imputedXSurvText")), dataTableOutput("imputedXSurv"), h3(textOutput("testSurvText")), dataTableOutput("survTestData")),
tabPanel("Training", fileInput(inputId = "competingPredictorFile", label = "Competing Predictors", accept="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"), h3(textOutput("survTrainErrorText")), plotOutput("plotLrtest"), h3(textOutput("listSurvGenesText")), tableOutput("listfeatures"), h3(textOutput("responsePredictionText")), plotOutput("survPredictionPlot")),
tabPanel("Cross Validation", h3(textOutput("plotCvSurvText")), plotOutput("plotcvsurv"), h3(textOutput("plotredLrtestText")), plotOutput("plotredLrtest")),
tabPanel("Test Set Prediction", fileInput(inputId = "testFileSurv", label = "Test Set", accept="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"), fileInput(inputId = "competingPredictorFitFile", label = "Fit with Competing Predictors", accept="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"), h3(textOutput("lrtestObjTestText")), plotOutput("plotLrtestTest"), h3(textOutput("predictionInfoText")), tableOutput("predictionscore"), tableOutput("coeftable"), tableOutput("teststatTable"), h3(textOutput("responsePredictionPlotText")), plotOutput("responsePredictionPlot"), h3(textOutput("rainbowPlotText")), plotOutput("rainbowPlot"))
)
)
)
)
)) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predlinear.R
\name{predlinear}
\alias{predlinear}
\title{Determine the Effective Dose from a Linear Regression Fit}
\usage{
predlinear(pct, LWmod, simple = FALSE)
}
\arguments{
\item{pct}{A numeric vector of effects (in percents) for which to estimate the
effective dose(s).}
\item{LWmod}{If \code{simple=TRUE}, a numeric vector of length two giving the intercept
and slope of the linear relation between the dose (x, the concentration
of the applied chemical on the log10 scale), and the proportion of
affected individuals (y, on the probit scale, with 0s converted to
0.1\% and 1s converted to 99.9\%).
If \code{simple=FALSE}, a list with the results of fitting a Litchfield and
Wilcoxon model to dose-effect data,
the output from \code{\link{LWestimate}}.}
\item{simple}{A logical scalar indicating whether to carry out a simple estimation of
effective doses from the intercept and slope (TRUE),
or an estimation of effective doses with confidence intervals from the
Litchfield and Wilcoxon model (default, FALSE).}
}
\value{
If \code{simple=TRUE}, a numeric vector the same length as \code{pct} with
the estimated effective doses.
If \code{simple=FALSE}, an n*4 numeric matrix with the given effects
(\code{pct}), the effective doses (\code{ED}), and Litchfield and
Wilcoxon's (1949) 95\% confidence intervals for the effective doses
(\code{lower} and \code{upper}).
The number of rows of the matrix, n, is the length of \code{pct}.
}
\description{
Determine the effective dose for a specified percent effect from the
intercept and slope of a linear regression.
}
\details{
Follows methods outlined in Litchfield and Wilcoxon (1949).
Specifically, for the 95\% confidence intervals, see page 105, and
equation 13 in the Appendix (corresponding to Nomograph 4).
}
\examples{
predlinear(c(16, 50, 84, 99.9), c(1.700875, 2.199559), simple=TRUE)
dose <- c(0.0625, 0.125, 0.25, 0.5, 1)
ntested <- rep(8, 5)
nalive <- c(1, 4, 4, 7, 8)
mydat <- dataprep(dose=dose, ntot=ntested, nfx=nalive)
fLW <- LWestimate(fitLWauto(mydat), mydat)
predlinear(c(25, 50, 99.9), fLW)
}
\references{
Litchfield, JT Jr. and F Wilcoxon. 1949.
A simplified method of evaluating dose-effect experiments.
Journal of Pharmacology and Experimental Therapeutics 96(2):99-113.
\href{http://jpet.aspetjournals.org/content/96/2/99.abstract}{[link]}.
}
| /man/predlinear.Rd | no_license | JVAdams/LW1949 | R | false | true | 2,457 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predlinear.R
\name{predlinear}
\alias{predlinear}
\title{Determine the Effective Dose from a Linear Regression Fit}
\usage{
predlinear(pct, LWmod, simple = FALSE)
}
\arguments{
\item{pct}{A numeric vector of effects (in percents) for which to estimate the
effective dose(s).}
\item{LWmod}{If \code{simple=TRUE}, a numeric vector of length two giving the intercept
and slope of the linear relation between the dose (x, the concentration
of the applied chemical on the log10 scale), and the proportion of
affected individuals (y, on the probit scale, with 0s converted to
0.1\% and 1s converted to 99.9\%).
If \code{simple=FALSE}, a list with the results of fitting a Litchfield and
Wilcoxon model to dose-effect data,
the output from \code{\link{LWestimate}}.}
\item{simple}{A logical scalar indicating whether to carry out a simple estimation of
effective doses from the intercept and slope (TRUE),
or an estimation of effective doses with confidence intervals from the
Litchfield and Wilcoxon model (default, FALSE).}
}
\value{
If \code{simple=TRUE}, a numeric vector the same length as \code{pct} with
the estimated effective doses.
If \code{simple=FALSE}, an n*4 numeric matrix with the given effects
(\code{pct}), the effective doses (\code{ED}), and Litchfield and
Wilcoxon's (1949) 95\% confidence intervals for the effective doses
(\code{lower} and \code{upper}).
The number of rows of the matrix, n, is the length of \code{pct}.
}
\description{
Determine the effective dose for a specified percent effect from the
intercept and slope of a linear regression.
}
\details{
Follows methods outlined in Litchfield and Wilcoxon (1949).
Specifically, for the 95\% confidence intervals, see page 105, and
equation 13 in the Appendix (corresponding to Nomograph 4).
}
\examples{
predlinear(c(16, 50, 84, 99.9), c(1.700875, 2.199559), simple=TRUE)
dose <- c(0.0625, 0.125, 0.25, 0.5, 1)
ntested <- rep(8, 5)
nalive <- c(1, 4, 4, 7, 8)
mydat <- dataprep(dose=dose, ntot=ntested, nfx=nalive)
fLW <- LWestimate(fitLWauto(mydat), mydat)
predlinear(c(25, 50, 99.9), fLW)
}
\references{
Litchfield, JT Jr. and F Wilcoxon. 1949.
A simplified method of evaluating dose-effect experiments.
Journal of Pharmacology and Experimental Therapeutics 96(2):99-113.
\href{http://jpet.aspetjournals.org/content/96/2/99.abstract}{[link]}.
}
|
library(shiny)
library(shinydashboard)
library(shinyWidgets)
#nonduplicated <- read.csv(here::here('Projections_2018_28_cleaned.csv'))
#source(here::here('generating_1st_cluster_graphs.R'))
#source(here::here('generating_2nd_cluster_graphs.R'))
#source(here::here("generating_3rd_cluster_graphs.R"))
#info_text <- read.csv(here::here('informational_text.csv'))
source('text_objects.R')
# source('all_graph_objects_in_one_place.R')
# source('ggobjects.R')
all_ggplots_list <- readRDS("all_ggplots_list.Rds")
cluster_names_vec <- readRDS("cluster_names_vec.Rds")
clusters <- c(
"Agriculture, Food, and Natural Resources",
"Architecture and Construction",
"Arts, Audio/Video Technology, and Communications",
"Business Management and Administration",
"Education and Training",
"Finance",
"Government and Public Administration",
"Health Science",
"Hospitality and Tourism",
"Human Services",
"Information Technology",
"Law, Public Safety, Corrections, and Security",
"Manufacturing",
"Marketing",
"Science, Technology, Engineering, and Mathematics",
"Transportation, Distribution, and Logistics",
"Energy"
)
# dbHeader <-
# dashboardHeader(
# tags$li(
# class = "dropdown",
# tags$style(".main-header {max-height: 50px}"),
# tags$style(
# ".main-header .logo {height: 50px;
# line-height: 55px !important;
# padding: 0 0px;}"
# ),
# tags$style(
# ".main-header .sidebar-toggle {height:50px;
# line-height: 55px !important;
# padding: 0 00px;}"
# )
# ),
# # tags$li(a(
# # href = 'https://ctetrailblazers.org/',
# # tags$img(
# # src = 'http://ctetrailblazers.org/wp-content/uploads/2015/01/Header1000X288_smallerimage.jpg',
# # height = '82'
# # )
# # ), class = "dropdown"),
# # tags$li(a(
# # href = 'https://ceps.coopercenter.org/',
# # tags$img(
# # src = 'CCPS-Logo_Horiz_Color.png',
# # height = '62'
# # )
# # ), class = "dropdown"),
# titleWidth = 0
# )
#dbHeader <- dashboardHeader(
#title = HTML(
#'<h1 style="text-align:left;">Virginia Labor Market: Career Cluster Analysis</h1>'
# )
#)
black_line <- hr(style = "border-top: 1px solid #000000;")
ui <- fluidPage( #titlePanel(dbHeader), commented out to remove whitespace from logo removal
fluidRow(
column(width = 12,
HTML(
'<h1 style="text-align:left;">Virginia Labor Market: Career Cluster Analysis</h1>
<br>'
)
)
),
fluidRow(
# column(4,
# wellPanel(
# h3(#"Please select a cluster from the dropdown menu below to see charts and information on the cluster"
# ),
# # selectInput(
# # inputId = "clusters",
# # label = "Select a Cluster to Display",
# # choices = clusters
# # ), #close selection object
# # h3("What trends do we currently see? What trends may we anticipate?"),
# # textOutput('cluster_trend_bullet_1'),
# # br(),
# # textOutput('cluster_trend_bullet_2'),
# # br(),
# # textOutput('cluster_trend_bullet_3'),
# # br(),
# # textOutput('cluster_trend_bullet_4'),
# # br(),
# # textOutput('cluster_trend_bullet_5')
# ) #close wellPanel
# ), #close column
# column(12,
mainPanel(
wellPanel(
selectInput(
inputId = "clusters",
label = "Select a Cluster to Display",
choices = clusters
) #close selection object
), # close wellPanel
#box(
h2('Education'),
h5("Distribution of predominant education levels, by career pathway"),
plotOutput("plot1"),
h5('Predominant education levels among occupations in each career pathway within the cluster, 2018 - 2028.
Reported percentages are based on the number of occupations at each educational level within a pathway.
Percentages are not based on the number of workers employed in each pathway.
Reported education levels reflect both the prevailing requirements of occupations and the typical level of education attained by workers employed in the occupations.
Source: Determined by Trailblazers based on national-level data from the U.S. Bureau of Labor Statistics.')
#) #close box
,
black_line,
#box(
h2("Occupational Growth"),
h5("Projected growth in number of jobs, Virginia, 2018 - 2028"),
plotOutput("plot2"),
h5("Projected change in the number of jobs in Virginia within each career pathway, 2018 - 2028.
Shows change between estimated number of jobs in each pathway in 2018 versus projected number in 2028. (Projected percent change in parentheses.)
Source: Virginia Employment Commission.")
# ) #close box
,
black_line,
# box(
h2('Wages'),
h5("Median 2018 annual wages in Virginia"),
plotOutput("plot3"),
h5("Figure displays the median 2018 Virginia annual wages of all occupations within each pathway.
Source: Virginia Employment Commission.")
# ), # close box
,
black_line,
# add trends
h2("Trends"),
textOutput('cluster_trend_bullet_1'),
br(),
textOutput('cluster_trend_bullet_2'),
br(),
textOutput('cluster_trend_bullet_3'),
br(),
textOutput('cluster_trend_bullet_4'),
br(),
textOutput('cluster_trend_bullet_5')
) #close main panel
# ) #close column
) #Close fluidRow
) #close UI
server <- function(input, output){
clusterSummary <- reactive({switch(
input$clusters,
"Agriculture, Food, and Natural Resources"= Ag_text,
"Architecture and Construction"=Arc_text,
"Arts, Audio/Video Technology, and Communications"=Arts_text,
"Business Management and Administration"=Bus_text,
"Education and Training"=Edu_text,
"Finance"=Fin_text,
"Government and Public Administration"= Gov_text,
"Health Science"=Health_text,
"Hospitality and Tourism"=Tour_text,
"Human Services"=Hum_text,
"Information Technology"=IT_text,
"Law, Public Safety, Corrections, and Security"=Law_text,
"Manufacturing"=Manuf_text,
"Marketing"=Mark_text,
"Science, Technology, Engineering, and Mathematics"=STEM_text,
"Transportation, Distribution, and Logistics"=Transp_text,
"Energy"=Energy_text
)})
graph1 <- reactive({switch(
input$clusters,
all_ggplots_list$edu[[which(cluster_names_vec == input$clusters)]]
)})
graph2 <- reactive({switch(
input$clusters,
all_ggplots_list$job_growth[[which(cluster_names_vec == input$clusters)]]
)})
graph3 <- reactive({switch(
input$clusters,
all_ggplots_list$wages[[which(cluster_names_vec == input$clusters)]]
)})
output$plot1 <- renderPlot({
#clusterx <- input$clusters
#generate_cluster_graph_1(nonduplicated, clusterx)
graph1()
})
output$plot2 = renderPlot({
#clusterx <- input$clusters
#generate_cluster_graph_2(nonduplicated,clusterx)
graph2()
})
output$plot3 = renderPlot({
#clusterx <- input$clusters
#generate_cluster_graph_3(nonduplicated,clusterx)
graph3()
})
output$cluster_trend_bullet_1 <- renderText({
text_list <- clusterSummary()
text_list[1]
})
output$cluster_trend_bullet_2 <- renderText({
text_list <- clusterSummary()
text_list[2]
})
output$cluster_trend_bullet_3 <- renderText({
text_list <- clusterSummary()
text_list[3]
})
output$cluster_trend_bullet_4 <- renderText({
text_list <- clusterSummary()
text_list[4]
})
output$cluster_trend_bullet_5 <- renderText({
text_list <- clusterSummary()
text_list[5]
})
}
shinyApp(ui=ui, server=server) | /dashboard/app.R | permissive | coopercenter/cte-trailblazers | R | false | false | 9,509 | r | library(shiny)
library(shinydashboard)
library(shinyWidgets)
#nonduplicated <- read.csv(here::here('Projections_2018_28_cleaned.csv'))
#source(here::here('generating_1st_cluster_graphs.R'))
#source(here::here('generating_2nd_cluster_graphs.R'))
#source(here::here("generating_3rd_cluster_graphs.R"))
#info_text <- read.csv(here::here('informational_text.csv'))
source('text_objects.R')
# source('all_graph_objects_in_one_place.R')
# source('ggobjects.R')
all_ggplots_list <- readRDS("all_ggplots_list.Rds")
cluster_names_vec <- readRDS("cluster_names_vec.Rds")
clusters <- c(
"Agriculture, Food, and Natural Resources",
"Architecture and Construction",
"Arts, Audio/Video Technology, and Communications",
"Business Management and Administration",
"Education and Training",
"Finance",
"Government and Public Administration",
"Health Science",
"Hospitality and Tourism",
"Human Services",
"Information Technology",
"Law, Public Safety, Corrections, and Security",
"Manufacturing",
"Marketing",
"Science, Technology, Engineering, and Mathematics",
"Transportation, Distribution, and Logistics",
"Energy"
)
# dbHeader <-
# dashboardHeader(
# tags$li(
# class = "dropdown",
# tags$style(".main-header {max-height: 50px}"),
# tags$style(
# ".main-header .logo {height: 50px;
# line-height: 55px !important;
# padding: 0 0px;}"
# ),
# tags$style(
# ".main-header .sidebar-toggle {height:50px;
# line-height: 55px !important;
# padding: 0 00px;}"
# )
# ),
# # tags$li(a(
# # href = 'https://ctetrailblazers.org/',
# # tags$img(
# # src = 'http://ctetrailblazers.org/wp-content/uploads/2015/01/Header1000X288_smallerimage.jpg',
# # height = '82'
# # )
# # ), class = "dropdown"),
# # tags$li(a(
# # href = 'https://ceps.coopercenter.org/',
# # tags$img(
# # src = 'CCPS-Logo_Horiz_Color.png',
# # height = '62'
# # )
# # ), class = "dropdown"),
# titleWidth = 0
# )
#dbHeader <- dashboardHeader(
#title = HTML(
#'<h1 style="text-align:left;">Virginia Labor Market: Career Cluster Analysis</h1>'
# )
#)
black_line <- hr(style = "border-top: 1px solid #000000;")
ui <- fluidPage( #titlePanel(dbHeader), commented out to remove whitespace from logo removal
fluidRow(
column(width = 12,
HTML(
'<h1 style="text-align:left;">Virginia Labor Market: Career Cluster Analysis</h1>
<br>'
)
)
),
fluidRow(
# column(4,
# wellPanel(
# h3(#"Please select a cluster from the dropdown menu below to see charts and information on the cluster"
# ),
# # selectInput(
# # inputId = "clusters",
# # label = "Select a Cluster to Display",
# # choices = clusters
# # ), #close selection object
# # h3("What trends do we currently see? What trends may we anticipate?"),
# # textOutput('cluster_trend_bullet_1'),
# # br(),
# # textOutput('cluster_trend_bullet_2'),
# # br(),
# # textOutput('cluster_trend_bullet_3'),
# # br(),
# # textOutput('cluster_trend_bullet_4'),
# # br(),
# # textOutput('cluster_trend_bullet_5')
# ) #close wellPanel
# ), #close column
# column(12,
mainPanel(
wellPanel(
selectInput(
inputId = "clusters",
label = "Select a Cluster to Display",
choices = clusters
) #close selection object
), # close wellPanel
#box(
h2('Education'),
h5("Distribution of predominant education levels, by career pathway"),
plotOutput("plot1"),
h5('Predominant education levels among occupations in each career pathway within the cluster, 2018 - 2028.
Reported percentages are based on the number of occupations at each educational level within a pathway.
Percentages are not based on the number of workers employed in each pathway.
Reported education levels reflect both the prevailing requirements of occupations and the typical level of education attained by workers employed in the occupations.
Source: Determined by Trailblazers based on national-level data from the U.S. Bureau of Labor Statistics.')
#) #close box
,
black_line,
#box(
h2("Occupational Growth"),
h5("Projected growth in number of jobs, Virginia, 2018 - 2028"),
plotOutput("plot2"),
h5("Projected change in the number of jobs in Virginia within each career pathway, 2018 - 2028.
Shows change between estimated number of jobs in each pathway in 2018 versus projected number in 2028. (Projected percent change in parentheses.)
Source: Virginia Employment Commission.")
# ) #close box
,
black_line,
# box(
h2('Wages'),
h5("Median 2018 annual wages in Virginia"),
plotOutput("plot3"),
h5("Figure displays the median 2018 Virginia annual wages of all occupations within each pathway.
Source: Virginia Employment Commission.")
# ), # close box
,
black_line,
# add trends
h2("Trends"),
textOutput('cluster_trend_bullet_1'),
br(),
textOutput('cluster_trend_bullet_2'),
br(),
textOutput('cluster_trend_bullet_3'),
br(),
textOutput('cluster_trend_bullet_4'),
br(),
textOutput('cluster_trend_bullet_5')
) #close main panel
# ) #close column
) #Close fluidRow
) #close UI
server <- function(input, output){
clusterSummary <- reactive({switch(
input$clusters,
"Agriculture, Food, and Natural Resources"= Ag_text,
"Architecture and Construction"=Arc_text,
"Arts, Audio/Video Technology, and Communications"=Arts_text,
"Business Management and Administration"=Bus_text,
"Education and Training"=Edu_text,
"Finance"=Fin_text,
"Government and Public Administration"= Gov_text,
"Health Science"=Health_text,
"Hospitality and Tourism"=Tour_text,
"Human Services"=Hum_text,
"Information Technology"=IT_text,
"Law, Public Safety, Corrections, and Security"=Law_text,
"Manufacturing"=Manuf_text,
"Marketing"=Mark_text,
"Science, Technology, Engineering, and Mathematics"=STEM_text,
"Transportation, Distribution, and Logistics"=Transp_text,
"Energy"=Energy_text
)})
graph1 <- reactive({switch(
input$clusters,
all_ggplots_list$edu[[which(cluster_names_vec == input$clusters)]]
)})
graph2 <- reactive({switch(
input$clusters,
all_ggplots_list$job_growth[[which(cluster_names_vec == input$clusters)]]
)})
graph3 <- reactive({switch(
input$clusters,
all_ggplots_list$wages[[which(cluster_names_vec == input$clusters)]]
)})
output$plot1 <- renderPlot({
#clusterx <- input$clusters
#generate_cluster_graph_1(nonduplicated, clusterx)
graph1()
})
output$plot2 = renderPlot({
#clusterx <- input$clusters
#generate_cluster_graph_2(nonduplicated,clusterx)
graph2()
})
output$plot3 = renderPlot({
#clusterx <- input$clusters
#generate_cluster_graph_3(nonduplicated,clusterx)
graph3()
})
output$cluster_trend_bullet_1 <- renderText({
text_list <- clusterSummary()
text_list[1]
})
output$cluster_trend_bullet_2 <- renderText({
text_list <- clusterSummary()
text_list[2]
})
output$cluster_trend_bullet_3 <- renderText({
text_list <- clusterSummary()
text_list[3]
})
output$cluster_trend_bullet_4 <- renderText({
text_list <- clusterSummary()
text_list[4]
})
output$cluster_trend_bullet_5 <- renderText({
text_list <- clusterSummary()
text_list[5]
})
}
shinyApp(ui=ui, server=server) |
##########################################################################################################################################
#### ####
### ###
## ##
# E-GEOD-59867 PROCESSING #
## ##
### ###
#### ####
##########################################################################################################################################
# QC PIPELINE VERSION: AD_classification 1.0 single dataset, single tissue
# DATE: 12/05/2018
# ARRAY EXPRESS NUMBER: E-GEOD-59867
# DISORDER: CAD
# MICROARRAY PLATFORM: Affy
# EXPRESSION CHIP: Exon 1.0ST
# NUMBER OF SAMPLES:
# TISSUE: Whole Blood
#
# NOTES -
# using samples collection: on the 1st day of MI (admission) - no controls
#
##### SET PARAMETERS #####
rm(list=ls())
options=(stringAsFactors=FALSE)
##### LOAD LIBRARIES ####
library(Biobase)
library(GEOquery)
library(ArrayExpress)
library(affy)
library(lumi)
library(WGCNA)
library(pamr)
library(sva)
library(ggplot2)
library(reshape)
library(massiR)
library(gridExtra)
library(RCurl)
library(XML)
#library(hgu133plus2.db)
#library(hgu133a.db)
#library(hgu133b.db)
library(hugene10sttranscriptcluster.db)
#library(illuminaHumanv4.db)
#library(illuminaHumanv3.db)
##### SET DIRECTORIES ####
work_dir="/media/hamel/Workspace/Dropbox/Projects/AD-classification/1.Data/6.Cardiovascular_Disease/E-GEOD-59867"
setwd(work_dir)
# create directory for raw data
dir.create(paste(work_dir,"Raw_Data", sep="/"))
raw_dir=paste(work_dir,"Raw_Data", sep="/")
# create directory for all Plots
dir.create(paste(work_dir,"Preprocessing_Plots", sep="/"))
plots_dir=paste(work_dir,"Preprocessing_Plots", sep="/")
# create directory for boxplots + density plots
dir.create(paste(plots_dir,"Boxplots_Density_plots", sep="/"))
boxplots_density_plots_dir=paste(plots_dir,"Boxplots_Density_plots", sep="/")
# create directory for PCA plots
dir.create(paste(plots_dir,"PCA_Plots", sep="/"))
pca_dir=paste(plots_dir,"PCA_Plots", sep="/")
# create directory for sample netowk plots
dir.create(paste(plots_dir,"Sample_Network_Plots", sep="/"))
sample_network_dir=paste(plots_dir,"Sample_Network_Plots", sep="/")
# create directory non-expressed threshold plots
dir.create(paste(plots_dir,"Probe_Detection_Plots", sep="/"))
probe_detection_plots_dir=paste(plots_dir,"Probe_Detection_Plots", sep="/")
# create directory for QC's expression data
dir.create(paste(work_dir,"Clean_Data", sep="/"))
clean_data_dir=paste(work_dir,"Clean_Data", sep="/")
# create directory associated papers
dir.create(paste(work_dir,"Papers", sep="/"))
##### TEMP FIX FOR getAE FUNCTION ######
# EBI changed to https - their getAE function broken. here is temp fix
https_getAE <- function (accession, path = getwd(), type = "full", extract = TRUE, local = FALSE, sourcedir = path) {
if (!local) {
baseURL = "https://www.ebi.ac.uk/arrayexpress/xml/v2/files"
xmlURL = getURL(paste(baseURL, accession, sep = "/"))
xml = xmlTreeParse(xmlURL, useInternalNodes = TRUE, isURL=FALSE)
sdrfURL = xpathSApply(xml, "/files/experiment/file[kind='sdrf' and extension='txt']/url",
xmlValue)
sdrfFile = xpathSApply(xml, "/files/experiment/file[kind='sdrf' and extension='txt']/name",
xmlValue)
idfURL = xpathSApply(xml, "/files/experiment/file[kind='idf' and extension='txt']/url",
xmlValue)
idfFile = xpathSApply(xml, "/files/experiment/file[kind='idf' and extension='txt']/name",
xmlValue)
adfURL = xpathApply(xml, "/files/experiment/file[kind='adf' and extension='txt']/url",
xmlValue)
adfFiles = xpathApply(xml, "/files/experiment/file[kind='adf' and extension='txt']/name",
xmlValue)
rawArchiveURL = xpathApply(xml, "/files/experiment/file[kind='raw' and extension='zip']/url",
xmlValue)
procArchiveURL = xpathApply(xml, "/files/experiment/file[kind='processed' and extension='zip']/url",
xmlValue)
}
else {
allfiles = list.files(sourcedir)
sdrfFile = allfiles[grep(paste(accession, ".sdrf.txt$",
sep = ""), allfiles)]
if (length(sdrfFile) == 0)
stop("SDRF file not found in directory ", sourcedir)
sdrfURL = paste("file:/", sourcedir, sdrfFile, sep = "/")
idfFile = allfiles[grep(paste(accession, ".idf.txt$",
sep = ""), allfiles)]
if (length(idfFile) == 0)
warning("IDF file not found in directory ", sourcedir)
idfURL = paste("file:/", sourcedir, idfFile, sep = "/")
ph = try(read.AnnotatedDataFrame(sdrfFile, path = sourcedir,
row.names = NULL, blank.lines.skip = TRUE, fill = TRUE,
varMetadata.char = "$"))
if (inherits(ph, "try-error")) {
warning("Unable to retrieve ADF reference from SDRF. Reading any ADF in directory.")
adfFiles = allfiles[grep(".adf.txt$", allfiles)]
}
else {
adr = unique(pData(ph)[, getSDRFcolumn("ArrayDesignREF",
varLabels(ph))])
adfFiles = paste(adr, ".adf.txt", sep = "")
}
if (all(file.exists(file.path(sourcedir, adfFiles)))) {
adfURL = paste("file:/", sourcedir, adfFiles, sep = "/")
downloadADF = FALSE
}
else {
filesURL = "https://www.ebi.ac.uk/arrayexpress/files"
adfURL = paste(filesURL, adr, adfFiles, sep = "/")
downloadADF = TRUE
}
rawArchiveURL = NULL
procArchiveURL = NULL
rawArchive = allfiles[grep(paste(accession, ".raw.[0-9]{1,}.zip",
sep = ""), allfiles)]
if (length(rawArchive) != 0)
rawArchiveURL = paste("file:/", sourcedir, rawArchive,
sep = "/")
else warning("No raw files found in directory ", sourcedir)
processedArchive = allfiles[grep(paste(accession, ".processed.[0-9]{1,}.zip",
sep = ""), allfiles)]
if (length(processedArchive) != 0)
procArchiveURL = paste("file:/", sourcedir, processedArchive,
sep = "/")
else warning("No processed data files found in directory ",
sourcedir)
}
if (length(sdrfURL) > 1) {
warning("Found two SDRF files: \n", paste(sdrfURL, "\n"))
hybSDRF = grep("hyb.sdrf", sdrfURL)
if (length(hybSDRF) > 0) {
message("Choosing ", sdrfURL[hybSDRF])
sdrfURL = sdrfURL[hybSDRF]
sdrfFile = sdrfFile[hybSDRF]
}
else {
warning("Unable to choose SDRF file. Please report experiment to miamexpress@ebi.ac.uk")
}
}
if (!local || path != sourcedir || downloadADF) {
adfFiles <- lapply(adfURL, function(url) {
filedest = paste(path, basename(url), sep = "/")
dnld = try(download.file(url, filedest, mode = "wb"))
if (inherits(dnld, "try-error") || file.info(filedest)$size ==
0) {
warning(paste(url, " does not exist or is empty. \n"),
sep = "")
adffile = NULL
}
else {
adffile = basename(filedest)
}
return(adffile)
})
if (!is.null(adfFiles))
adfFiles = unlist(adfFiles)
}
if (!local || path != sourcedir) {
sdrfFileDest = paste(path, sdrfFile, sep = "/")
dnld = try(download.file(sdrfURL, sdrfFileDest, mode = "wb"))
if (inherits(dnld, "try-error") || file.info(sdrfFileDest)$size ==
0) {
warning(paste(sdrfFile, " does not exist or is empty. The object will not have featureData or phenoData. \n"),
sep = "")
sdrfFile = NULL
adffile = NULL
}
idfFileDest = paste(path, idfFile, sep = "/")
dnld = try(download.file(idfURL, idfFileDest, mode = "wb"))
if (inherits(dnld, "try-error") || file.info(idfFileDest)$size ==
0) {
warning(paste(idfFile, " does not exist or is empty. \n"),
sep = "")
idfFile = NULL
}
rawArchive = NULL
processedArchive = NULL
if (type != "mageFilesOnly" && !is.null(rawArchiveURL) &&
(type == "full" || type == "raw")) {
message("Copying raw data files\n")
rawArchive <- lapply(rawArchiveURL, function(url) {
filedest = paste(path, basename(url), sep = "/")
dnld = try(download.file(url, filedest, mode = "wb"))
if (inherits(dnld, "try-error") || file.info(filedest)$size ==
0) {
warning(paste(url, " does not exist or is empty. \n"),
sep = "")
}
else {
return(filedest)
}
})
if (!is.null(rawArchive)) {
rawArchive = unlist(rawArchive)
rawArchive = basename(rawArchive)
}
}
if ((type != "mageFilesOnly" && type == "full" || type ==
"processed") && !is.null(procArchiveURL)) {
message("Copying processed data files\n")
processedArchive <- lapply(procArchiveURL, function(url) {
filedest = paste(path, basename(url), sep = "/")
dnld = try(download.file(url, filedest, mode = "wb"))
if (inherits(dnld, "try-error") || file.info(filedest)$size ==
0) {
warning(paste(url, " does not exist or is empty. \n"),
sep = "")
}
else {
return(filedest)
}
})
if (!is.null(processedArchive)) {
processedArchive = unlist(processedArchive)
processedArchive = basename(processedArchive)
}
}
}
rawFiles = NULL
processedFiles = NULL
if (extract) {
message("Unpacking data files")
if (!is.null(rawArchive))
rawFiles <- lapply(rawArchive, function(zipfile) {
rawfiles = extract.zip(file = paste(path, zipfile,
sep = "/"))
return(rawfiles)
})
if (!is.null(processedArchive))
processedFiles <- lapply(processedArchive, function(zipfile) {
procfiles = extract.zip(file = paste(path, zipfile,
sep = "/"))
return(procfiles)
})
if (!is.null(rawFiles))
rawFiles = unlist(rawFiles)
if (!is.null(processedFiles))
processedFiles = unlist(processedFiles)
}
res = list(path = path, rawFiles = rawFiles, rawArchive = rawArchive,
processedFiles = processedFiles, processedArchive = processedArchive,
sdrf = sdrfFile, idf = idfFile, adf = adfFiles)
return(res)
}
##### DOWNLOAD RAW DATA #####
setwd(raw_dir)
#raw data only
#data_raw=getAE("E-GEOD-59867", type = "raw")
#processed data only
# data_raw=https_getAE("E-GEOD-59867", type = "processed")
#
# #all data
# data_raw=https_getAE("E-GEOD-59867", type = "full")
#
##### CREATE R EXPRESSION OBJECT #####
# METHOD 1 - convert MAGE-TAB files into expresssion set - USING RAW DATA
#expression_data = ae2bioc(mageFiles = data_raw)
#expression_data
# # METHOD 2 - convert MAGE-TAB files into expresssion set - USING RAW DATA
#
# expression_data<-ReadAffy()
#
# METHOD 3 - convert MAGE-TAB files into expresssion set - USING PROCESSED DATA
#
cnames=getcolproc(data_raw)
cnames
expression_data=procset(data_raw, cnames[2])
#
# expression_data
# METHOD 4 - GEO - processed data
#ubnable to process through array express. used GEO instead
expression_data2 <- getGEO("GSE59867", GSEMatrix =TRUE, getGPL=FALSE)
if (length(expression_data2) > 1) idx <- grep(expression_data2@annotation, attr(expression_data2, "names")) else idx <- 1
expression_data2 <- expression_data2[[idx]]
expression_data2
head(pData(expression_data2))
head(exprs(expression_data2))[,1:5]
##### SET DATA PARAMETERS #####
##
## dataset name to save as/use
##
dataset="E-GEOD-59867"
##
## disease
##
disease="CAD"
##
## Affymetrix or Illumina
##
Microarray_platform="Affymetrix"
#Microarray_platform="Illumina"
##
## Raw or pre-processed
##
#Data_format="Raw"
Data_format="Processed"
##
## probe detection thresohld to use
##
#Probe_Detection_Threshold=0.9
#Probe_Detection_Threshold=0.8
#Probe_Detection_Threshold=0.7
Probe_Detection_Threshold=0.6
##
## expression chip to use
##
#expression_chip="hgu133plus2"
#expression_chip="hgu133a"
#expression_chip="hgu133b"
expression_chip="hugene10sttranscriptcluster"
#expression_chip="illuminaHumanv4"
#expression_chip="illuminaHumanv3"
##
## sample network threshold to use - samples less than Z.K threshold will be removed - need manual check
##
sample_network_ZK_threshold=-3
##
## massi R chip to use
##
#massi_R_chip="illumina_humanwg_6_v1"
#massi_R_chip="illumina_humanwg_6_v2"
#massi_R_chip="illumina_humanwg_6_v1"
#massi_R_chip="illumina_humanht_12"
massi_R_chip="affy_hugene_1_0_st_v1"
#massi_R_chip="affy_hg_u133_plus_2"
##
## Phenotype info
##
phenotype_data<-pData(expression_data2)
head(phenotype_data)
names(phenotype_data)
##### SUBSET TO SAMPLES OF INTEREST #####
# case + control
dim(phenotype_data)
table(phenotype_data$characteristics_ch1.1)
phenotype_data_subset<-phenotype_data[phenotype_data$characteristics_ch1.1=="samples collection: on the 1st day of MI (admission)",]
dim(phenotype_data_subset)
table(phenotype_data_subset$characteristics_ch1.1)
phenotype_data_subset<-phenotype_data
phenotype_data_subset<-phenotype_data
# extract pheno of interest
Ethnicity<-phenotype_data_subset[1]
colnames(Ethnicity)<-"Ethnicity"
Tissue<-phenotype_data_subset[1]
colnames(Tissue)<-"Tissue"
Age<-phenotype_data_subset[9]
colnames(Age)<-"Age"
Diagnosis<-phenotype_data_subset[11]
colnames(Diagnosis)<-"Diagnosis"
Gender<-phenotype_data_subset[1]
colnames(Gender)<-"Gender"
#standardise Ethnicity
Ethnicity$Ethnicity<-as.character(Ethnicity$Ethnicity)
table(Ethnicity)
Ethnicity[1]<-"Unknown"
table(Ethnicity)
#standardise Age
table(Age)
Age[1]<-"Unknown"
table(Age)
#standardise diagnosis- case control
Diagnosis$Diagnosis<-as.character(Diagnosis$Diagnosis)
table(Diagnosis)
#Diagnosis[grep("control", Diagnosis[,1]),]<-"control"
Diagnosis[grep("samples collection: on the 1st day of MI \\(admission\\)", Diagnosis[,1]),]<-"case"
table(Diagnosis)
#standardise tissue
Tissue$Tissue<-as.character(Tissue$Tissue)
table(Tissue)
Tissue[1]<-"blood"
table(Tissue)
#standardise Gender
Gender$Gender<-as.character(Gender$Gender)
table(Gender)
#Gender[grep("Female", Gender[,1]),]<-"female"
#Gender[grep("Male", Gender[,1]),]<-"male"
Gender[1]<-"Unknown"
table(Gender)
##### SUBSET EXPRESSION #####
expression_data_subset<-subset(exprs(expression_data2))
expression_data_subset<-expression_data_subset[,colnames(expression_data_subset) %in% rownames(phenotype_data_subset)]
# check same number of samples
ncol(expression_data_subset)==nrow(phenotype_data_subset)
ncol(expression_data_subset)
##### PLOTS OF RAW DATA #####
setwd(boxplots_density_plots_dir)
boxplot(expression_data_subset)
png(file="raw_data_boxplot.png")
boxplot(expression_data_subset)
dev.off()
plotDensity(expression_data_subset, logMode=F, addLegend=F)
png(file="raw_data_density_plot.png")
plotDensity(expression_data_subset, logMode=F, addLegend=F)
dev.off()
##### PRE-PROCESS - RAW ######
#
# #background correct
#
# expression_data_background_corrected<-mas5(expression_data_subset)
#
# #normalise
#
# expression_data_normalised<-rsn(log2(expression_data_subset))
#
# # set negative values to zero
#
# expression_data_normalised<-expression_data_normalised
# expression_data_normalised[expression_data_normalised<0]<-0
#
#convert to data.frame
expression_data_normalised_as_data_frame<-as.data.frame(expression_data_subset)
##### PRE-PROCESS - PROCESSED DATA #####
head(expression_data_normalised_as_data_frame)[1:5]
##### PLOTS OF PRE_PROCESSED DATA #####
# setwd(boxplots_density_plots_dir)
#
# boxplot(expression_data_normalised_as_data_frame)
# pdf(file="pre-processed_data_boxplot.pdf")
# boxplot(expression_data_normalised_as_data_frame)
# dev.off()
#
# plotDensity(expression_data_normalised_as_data_frame, logMode=F, addLegend=F)
# pdf(file="pre-processed_data_density_plot.pdf")
# plotDensity(expression_data_normalised_as_data_frame, logMode=F, addLegend=F)
# dev.off()
#
# setwd(work_dir)
##### CHECK FOR DUPLICATE SAMPLES IDs #####
anyDuplicated(rownames(phenotype_data_subset))
##### PCA PLOT 1 #####
pca_data<-function(data, legend_position){
#run PCA
pca<-prcomp(data)
# order of samples in expression data
sample_order<-colnames(data)
# merge pheno info together
pheno<-cbind(Diagnosis, Gender, Ethnicity, Age)
# subset pca_pheno to match expression data
pca_pheno<-subset(pheno, rownames(pheno) %in% colnames(data))
# match order
ordered_pca_pheno<-pca_pheno[match(sample_order, rownames(pca_pheno)),]
Diagnosis_pca_colour<-labels2colors(as.character(ordered_pca_pheno$Diagnosis))
Gender_pca_colour<-labels2colors(as.character(ordered_pca_pheno$Gender))
Ethnicity_pca_colour<-labels2colors(as.character(ordered_pca_pheno$Ethnicity))
Age_pca_colour<-labels2colors(as.character(ordered_pca_pheno$Age))
# pca plot - Diagnosis
plot(pca$rotation[,1:2], main=" PCA plot coloured by Diagnosis",col="black", pch=21,bg=Diagnosis_pca_colour)
legend(legend_position, legend=unique(ordered_pca_pheno$Diagnosis), fill=unique(Diagnosis_pca_colour), title="Diagnosis")
# pca plot - Gender
plot(pca$rotation[,1:2], main=" PCA plot coloured by Clinical Gender",col="black", pch=21,bg=Gender_pca_colour)
legend(legend_position, legend=unique(ordered_pca_pheno$Gender), fill=unique(Gender_pca_colour), title="Gender")
# pca plot - Ethnicity
plot(pca$rotation[,1:2], main=" PCA plot coloured by Ethnicity",col="black", pch=21,bg=Ethnicity_pca_colour)
legend(legend_position, legend=unique(ordered_pca_pheno$Ethnicity), fill=unique(Ethnicity_pca_colour), title="Ethnicity")
# pca plot - Age
plot(pca$rotation[,1:2], main=" PCA plot coloured by Age",col="black", pch=21,bg=Age_pca_colour)
legend(legend_position, legend=unique(ordered_pca_pheno$Age), fill=unique(Age_pca_colour), title="Age")
}
#apply function
pca_data(expression_data_normalised_as_data_frame, 'bottomright')
#plot to pdf
setwd(pca_dir)
pdf("1.PCA_plot_before_QC.pdf")
pca_data(expression_data_normalised_as_data_frame, 'bottomright')
dev.off()
##### GENDER CHECK #####
table(Gender)
# get Y choromosome genes
data(y.probes)
names(y.probes)
# which chip has most genes in massiR
for (x in names(y.probes)) {
y_chromo_probes <- data.frame(y.probes[x])
count_yes<-rownames(y_chromo_probes)%in%rownames(expression_data_normalised_as_data_frame)
print(paste(x, length(count_yes[count_yes=="TRUE"])))
}
massi_R_chip
y_chromo_probes <- data.frame(y.probes[massi_R_chip])
# extract Y chromosome genes from dataset
eset.select.out <- massi_select(expression_data_normalised_as_data_frame, y_chromo_probes)
#
massi_y_plot(eset.select.out)
# get probes with data
massi.select.out <-massi_select(expression_data_normalised_as_data_frame, y_chromo_probes, threshold=4)
# check
head(massi.select.out)[,1:5]
# run gender predict
eset.results <- massi_cluster(massi.select.out)
# check results for bad probes
massi_cluster_plot(massi.select.out, eset.results)
#extract gender prediction
predicted_gender<-(eset.results$massi.results)[c(1,5)]
rownames(predicted_gender)<-predicted_gender$ID
predicted_gender$ID<-NULL
colnames(predicted_gender)<-"Predicted_Gender"
#compare to clinical Gender
# merge
gender_comparison<-merge(Gender, predicted_gender, by="row.names")
rownames(gender_comparison)<-gender_comparison$Row.names
gender_comparison$Row.names<-NULL
colnames(gender_comparison)<-c("Clinical_Gender", "Predicted_Gender")
head(gender_comparison)
# gender miss-matches
Gender_Missmatch<-gender_comparison[gender_comparison$Clinical_Gender!=gender_comparison$Predicted_Gender,]
Gender_Missmatch
# check sex bias - should have at least 15% male samples and minimum 6 samples
dip.result <- massi_dip(eset.select.out)
# # dip test <0.08 - sex bias - change gender to unknown
# gender_comparison<-Gender
# gender_comparison$Predicted_Gender<-"Unknown"
# colnames(gender_comparison)[1]<-"Clinical_Gender"
# #separae male/female IDs - use predicted
female_samples<-subset(gender_comparison, Predicted_Gender=="female")
male_samples<-subset(gender_comparison, Predicted_Gender=="male")
#separae male/female IDs - use Clinical (use if dip.test<=0.08)
# female_samples<-subset(gender_comparison, Clinical_Gender=="female")
# male_samples<-subset(gender_comparison, Clinical_Gender=="male")
head(female_samples)
head(male_samples)
table(female_samples)
table(male_samples)
##### PROBE ID DETECTION #####
# separate case control - Factor.Value..disease. column
case_ID<-rownames(subset(Diagnosis, Diagnosis=="case"))
case_ID
case_exprs<-expression_data_normalised_as_data_frame[,colnames(expression_data_normalised_as_data_frame)%in%case_ID]
head(case_exprs)
dim(case_exprs)
# separate by gender
case_exprs_F<-case_exprs[colnames(case_exprs)%in%rownames(female_samples)]
case_exprs_M<-case_exprs[colnames(case_exprs)%in%rownames(male_samples)]
# calculate 90th percentile for each sample in each group
extract_good_probe_list<-function(dataset, probe_percentile_threshold) {
# dataset - expression dataset as dataframe
# probe_percentile_threshold - percentile at which to use as cut-off for detected probes
# number of samples in which probe must be expressed in - fixed at 0.8 - i.e 80% of samples
# calculate quantile threshold for each sample
sample_quantiles<-apply(dataset, 2, quantile, probs=c(probe_percentile_threshold))
# count length of quantile - will be number of samples
number_of_samples<-length(sample_quantiles)
# convert probes values to NA in for each sample if probe expression value below sample percentile cut-off
for (x in 1:number_of_samples) {
is.na(dataset[x]) <- dataset[x] >= sample_quantiles[x]
}
# convert to dataframe
dataset_dataframe<-as.data.frame(dataset)
# count number of NA
dataset_count<-as.data.frame(rowSums(is.na(dataset_dataframe)))
colnames(dataset_count)<-"count"
# subset good probes
good_probes<-rownames(subset(dataset_count, dataset_count$count >= (number_of_samples*0.8)))
#print threshold used
print(as.data.frame(sample_quantiles))
boxplot(as.data.frame(sample_quantiles))
# return good probes
return(good_probes)
}
# apply function to samples
case_exprs_F_expressed_probes_list<-extract_good_probe_list(case_exprs_F, Probe_Detection_Threshold)
length(case_exprs_F_expressed_probes_list)
case_exprs_M_expressed_probes_list<-extract_good_probe_list(case_exprs_M, Probe_Detection_Threshold)
length(case_exprs_M_expressed_probes_list)
# merge list of good probes from both case + control, sort and keep unique values
good_probe_list<-unique(sort(c(case_exprs_F_expressed_probes_list,
case_exprs_M_expressed_probes_list)))
length(good_probe_list)
# extract good probes from dataset
data_exprs_good_probes<-expression_data_normalised_as_data_frame[rownames(expression_data_normalised_as_data_frame)%in%good_probe_list,]
data_case_exprs_good_probes<-case_exprs[rownames(case_exprs)%in%good_probe_list,]
head(data_exprs_good_probes)[1:5]
dim(expression_data_normalised_as_data_frame)
dim(data_exprs_good_probes)
dim(data_case_exprs_good_probes)
##### PROBE DETECTION THRESHOLD PLOTS #####
# using dataframe before probe removal
# get gene symbol list for chip
Gene_symbols_probes <- mappedkeys(eval(parse(text = paste(expression_chip, "SYMBOL", sep=""))))
# Convert to a list
Gene_symbols <- as.data.frame(eval(parse(text = paste(expression_chip, "SYMBOL", sep="")))[Gene_symbols_probes])
head(Gene_symbols)
dim(Gene_symbols)
#Expressed in females only
XIST_probe_ID<-subset(Gene_symbols, symbol=="XIST")
XIST_probe_ID
#Expressed in Males only
PRKY_probe_ID<-subset(Gene_symbols, symbol=="PRKY")
PRKY_probe_ID
RPS4Y1_probe_ID<-subset(Gene_symbols, symbol=="RPS4Y1")
RPS4Y1_probe_ID
KDM5D_probe_ID<-subset(Gene_symbols, symbol=="KDM5D")
KDM5D_probe_ID
USP9Y_probe_ID<-subset(Gene_symbols, symbol=="USP9Y")
USP9Y_probe_ID
UTY_probe_ID<-subset(Gene_symbols, symbol=="UTY")
UTY_probe_ID
# HK genes expressed in all cells + males + females
MKRN1_probe_ID<-subset(Gene_symbols, symbol=="MKRN1")
MKRN1_probe_ID
ADIPOR1_probe_ID<-subset(Gene_symbols, symbol=="ADIPOR1")
ADIPOR1_probe_ID
BNIP3L_probe_ID<-subset(Gene_symbols, symbol=="BNIP3L")
BNIP3L_probe_ID
#RNF10_probe_ID<-subset(Gene_symbols, symbol=="RNF10")
#RNF10_probe_ID
# merge all genes
gene_list<-rbind(XIST_probe_ID,
PRKY_probe_ID,
RPS4Y1_probe_ID,
KDM5D_probe_ID,
USP9Y_probe_ID,
UTY_probe_ID,
MKRN1_probe_ID,
ADIPOR1_probe_ID,
BNIP3L_probe_ID)
# RNF10_probe_ID)
gene_list
# create table of genes and state if KH or gender specific
gene_table<-read.table(text =
"Gene Expressed_in
ADIPOR1 All
BNIP3L All
KDM5D Males
MKRN1 All
PRKY Males
RPS4Y1 Males
USP9Y Males
UTY Males
XIST Females" , header=T)
gene_table
#create function to plot
plot_gender_specific_genes<-function(Expression_table, Gender, genes_to_extract, threshold, boxplot_title){
#extract gene of interest
Expression_table_gene_check<-as.data.frame(t(Expression_table[rownames(Expression_table)%in% genes_to_extract$probe_id,]))
# change colnames TO GENE SYMBOL using genes to extract file
for (x in 1:dim(Expression_table_gene_check)[2]){
colnames(Expression_table_gene_check)[x]<-gene_list[genes_to_extract$probe_id==colnames(Expression_table_gene_check)[x],2]
}
# add in gender information
Expression_table_gene_check_gender<-merge(Gender, Expression_table_gene_check, by="row.names")
rownames(Expression_table_gene_check_gender)<-Expression_table_gene_check_gender$Row.names
Expression_table_gene_check_gender$Row.names<-NULL
#melt dataframe for plot
Expression_table_gene_check_gender_melt<-melt(Expression_table_gene_check_gender, by=Gender)
# change variable colun from factor to character
Expression_table_gene_check_gender_melt$variable<-as.character(Expression_table_gene_check_gender_melt$variable)
# order dataframe by variable
Expression_table_gene_check_gender_melt<-Expression_table_gene_check_gender_melt[order(Expression_table_gene_check_gender_melt$variable),]
# calculate user defined percentie threshold
sample_quantiles<-apply(Expression_table, 2, quantile, probs=threshold)
# mean of used defined threshold across samples
mean_threshold=mean(sample_quantiles)
#plot
plot1<-qplot(variable, value, colour=get(colnames(Gender)), data = Expression_table_gene_check_gender_melt, geom = c("boxplot", "jitter")) +
geom_hline(yintercept = mean_threshold) +
ggtitle(boxplot_title) +
theme(text = element_text(size=20), axis.text.x = element_text(angle=45, hjust=1)) +
labs(x="Gene",y="Expression", colour = colnames(Gender))
# 2nd legend
plot2<-tableGrob(gene_table, rows=NULL)
# plot
grid.arrange(plot1, plot2,
nrow=2,
heights=c(3,1))
}
# plot
plot_gender_specific_genes(case_exprs, gender_comparison[2], gene_list, Probe_Detection_Threshold, paste(dataset, "case_samples", sep="_"))
setwd(probe_detection_plots_dir)
# pdf("Probe_detection_threshold_based_on_CLINICAL_gender_specific_and_house_keeping_genes.pdf", height=15, width = 12)
# plot_gender_specific_genes(case_exprs, gender_comparison[1], gene_list, Probe_Detection_Threshold, paste(dataset, "case_samples", sep="_"))
# dev.off()
# if predicted gender available
pdf("Probe_detection_threshold_based_on_PREDICTED_gender_specific_and_house_keeping_genes.pdf", height=15, width = 12)
plot_gender_specific_genes(case_exprs, gender_comparison[2], gene_list, Probe_Detection_Threshold, paste(dataset, "case_samples", sep="_"))
dev.off()
##### PCA PLOT 2 #####
#plot to pdf
setwd(pca_dir)
pca_data(data_exprs_good_probes, 'bottomright')
pdf("2.PCA_plot_after_probe_detection.pdf")
pca_data(data_exprs_good_probes, 'bottomright')
dev.off()
##### SAMPLE NETWORK PLOT #####
# split data by disease + gender
data_case_exprs_good_probes_M<-data_case_exprs_good_probes[,colnames(data_case_exprs_good_probes)%in%rownames(male_samples)]
data_case_exprs_good_probes_F<-data_case_exprs_good_probes[,colnames(data_case_exprs_good_probes)%in%rownames(female_samples)]
# sample plot function - taken from steve expression pipeline
sampleNetwork_plot <- function(datExprs) {
diagnosis<-rep("sample", length(colnames(datExprs)))
gp_col <- "group"
cat(" setting up data for qc plots","\r","\n")
## expression matrix and IAC
cat(" expression matrix and IAC","\r","\n")
IAC <- cor(datExprs)
IAC_d <- 1-IAC
samle_names <- colnames(datExprs)
IAC=cor(datExprs, method="p",use="p")
diag(IAC)=0
A.IAC=((1+IAC)/2)^2 ## ADJACENCY MATRIX
cat(" fundamentalNetworkConcepts","\r","\n")
FNC=fundamentalNetworkConcepts(A.IAC) ## WGCNA
K2=FNC$ScaledConnectivity
Z.K=(K2-mean(K2))/sd(K2)
Z.C=(FNC$ClusterCoef-mean(FNC$ClusterCoef))/sd(FNC$ClusterCoef)
rho <- signif(cor.test(Z.K,Z.C,method="s")$estimate,2)
rho_pvalue <- signif(cor.test(Z.K,Z.C,method="s")$p.value,2)
# set colours
cat(" colorvec [",paste(gp_col),"]","\r","\n")
if(gp_col=="chip") { colorvec <- labels2colors(as.character(pData(eset)$Sentrix.Barcode)) }
if(gp_col=="group") { colorvec <- labels2colors(diagnosis[1]) }
mean_IAC <- mean(IAC[upper.tri(IAC)])
## samplenetwork
local(
{colLab <<- function(n,treeorder) {
if(is.leaf(n)) {
a <- attributes(n)
i <<- i+1
attr(n, "nodePar") <- c(a$nodePar, list(lab.col = colorvec[treeorder][i], lab.font = i%%3))
}
n
}
i <- 0
})
cat(" begin SampleNetwork plots","\r","\n")
group_colours<-unique(cbind(colorvec, diagnosis))
## Cluster for pics
cluster1 <- hclust(as.dist(1-A.IAC),method="average")
cluster1order <- cluster1$order
cluster2 <- as.dendrogram(cluster1,hang=0.1)
cluster3 <- dendrapply(cluster2,colLab,cluster1order)
## PLOTS
## cluster IAC
par(mfrow=c(2,2))
par(mar=c(5,6,4,2))
plot(cluster3,nodePar=list(lab.cex=1,pch=NA),
main=paste("Mean ISA = ",signif(mean(A.IAC[upper.tri(A.IAC)]),3),sep=""),
xlab="",ylab="1 - ISA",sub="",cex.main=1.8,cex.lab=1.4)
mtext(paste("distance: 1 - ISA ",sep=""),cex=0.8,line=0.2)
## Connectivity
par(mar=c(5,5,4,2))
plot(Z.K,main="Connectivity", ylab="Z.K",xaxt="n",xlab="Sample",type="n",cex.main=1.8,cex.lab=1.4)
text(Z.K,labels=samle_names,cex=0.8,col=colorvec)
abline(h=-2)
abline(h=-3)
par(mar=c(5,5,4,2))
plot(Z.K,Z.C,main="Connectivity vs ClusterCoef",xlab="Z.K",ylab="Z.C",col=colorvec ,cex.main=1.8,cex.lab=1.4)
abline(lm(Z.C~Z.K),col="black",lwd=2)
mtext(paste("rho = ",signif(cor.test(Z.K,Z.C,method="s")$estimate,2)," p = ",signif(cor.test(Z.K,Z.C,method="s")$p.value,2),sep=""),cex=0.8,line=0.2)
abline(v=-2,lty=2,col="grey")
abline(h=-2,lty=2,col="grey")
##blank plot for legend
par(mar=c(5,5,4,2))
plot(1, type="n", axes=F, xlab="", ylab="")
legend(0.6, 1.4, unique(diagnosis[1]), fill=unique(colorvec))
} #taken from steves expression pipeline
# create functio to ID outliers
names_of_outliers<-function(datExprs, threshold){
IAC = cor(datExprs, method = "p", use = "p")
diag(IAC) = 0
A.IAC = ((1 + IAC)/2)^2 ## ADJACENCY MATRIX
# fundamentalNetworkConcepts
FNC = fundamentalNetworkConcepts(A.IAC) ## WGCNA
K2 = FNC$ScaledConnectivity
Z.K = round((K2 - mean(K2))/sd(K2), 3)
Z.K_outliers <- Z.K < threshold
Z.K_outliers <- names(Z.K_outliers[Z.K_outliers == TRUE])
n_outliers <- length(Z.K_outliers)
return(Z.K_outliers)
}
# create function to run network analysis on each expression dataset, plot and remove bad samples
run_sample_network_plot<-function(dataset, threshold){
#sample network plot
sampleNetwork_plot(dataset)
#identify sample below Z.K threshold
dataset_removal_1<-names_of_outliers(dataset, threshold)
#remove samples with ZK below threshold
dataset_QC<-dataset[,!(colnames(dataset)%in%dataset_removal_1)]
#sample network plot
sampleNetwork_plot(dataset_QC)
#create empty count list to record samples removed
count<-dataset_removal_1
# reiterate above till no samples fall below threshold
while (length(dataset_removal_1)>0) {
# remove bad samples - 1st iteration removes none
dataset_QC<-dataset_QC[,!(colnames(dataset_QC)%in%dataset_removal_1)]
#identify sample below Z.K threshold
dataset_removal_1<-names_of_outliers(dataset_QC, threshold)
#record samples removed
count<-c(count, dataset_removal_1)
}
#final network plot
sampleNetwork_plot(dataset_QC)
# print to screen number of samples removed
cat("\n")
print(c("Total number of samples removed...", length(count)))
# return clean expression set
return(dataset_QC)
}
# run sample network on entorhinal Cortex - on dataframe without gender
data_case_exprs_good_probes_M_QC<-run_sample_network_plot(data_case_exprs_good_probes_M, sample_network_ZK_threshold)
data_case_exprs_good_probes_F_QC<-run_sample_network_plot(data_case_exprs_good_probes_F, sample_network_ZK_threshold)
##### PLOT SAMPLE NETWORK ANALYSIS TO PDF #####
setwd(sample_network_dir)
pdf("case_males_sample_network_analysis.pdf")
data_case_exprs_good_probes_M_QC<-run_sample_network_plot(data_case_exprs_good_probes_M, sample_network_ZK_threshold)
dev.off()
pdf("case_feamles_sample_network_analysis.pdf")
data_case_exprs_good_probes_F_QC<-run_sample_network_plot(data_case_exprs_good_probes_F, sample_network_ZK_threshold)
dev.off()
##### CREATE QC'd DATASET #####
# extract sample ID's from QC'd sample network file
# check colnames same in all dataframes - should be TRUE
all(rownames(data_case_exprs_good_probes_M_QC)==rownames(data_case_exprs_good_probes_F_QC))
# transform and cbind all dataframes
expression_data_QCd<-rbind(t(data_case_exprs_good_probes_M_QC),
t(data_case_exprs_good_probes_F_QC))
dim(expression_data_QCd)
dim(t(expression_data_normalised_as_data_frame))
##### PCA PLOT 3 #####
setwd(pca_dir)
pca_data(as.data.frame(t(expression_data_QCd)), 'bottomright')
pdf("3.PCA_plot_after_sample_removal.pdf")
pca_data(as.data.frame(t(expression_data_QCd)), 'bottomright')
dev.off()
##### CONVERT PROBE ID TO ENTREZ ID #####
# Get the probe identifiers that are mapped to an ENTREZ Gene ID using hgu133a.db
mapped_probes <- mappedkeys(eval(parse(text = paste(expression_chip, "ENTREZID", sep=""))))
# Convert to a list
probe_entrez_mapping <- as.data.frame(eval(parse(text = paste(expression_chip, "ENTREZID", sep="")))[mapped_probes])
# arrange order of column by entrezgene probe_id
probe_entrez_mapping<-probe_entrez_mapping[c(2,1)]
colnames(probe_entrez_mapping)[1]<-"entrezgene"
head(probe_entrez_mapping)
dim(probe_entrez_mapping)
#check any duplicated probe IDs
anyDuplicated(probe_entrez_mapping$probe_id)
#check any dupliacted entrezgene IDs
anyDuplicated(probe_entrez_mapping$entrezgene)
# create convert_probe_id_to_entrez_id function
convert_probe_id_to_entrez_id <- function(expression_dataset, probe_mapping_file){
# transform dataset # - removed this step
expression_dataset_t<-as.data.frame(expression_dataset)
# keep only probes which appear in probe_mapping_file
data_frame_in_probe_mapper<-expression_dataset_t[colnames(expression_dataset_t)%in%probe_mapping_file$probe_id]
# match probe id in data_frame_in_probe_mapper to that in probe_mapping_file and convert to entrez id
colnames(data_frame_in_probe_mapper)<-probe_mapping_file$entrezgene[match(colnames(data_frame_in_probe_mapper), probe_mapping_file$probe_id)]
return(data_frame_in_probe_mapper)
}
# using probe_entrez_mapping file
expression_data_QCd_entrez_id<-convert_probe_id_to_entrez_id(expression_data_QCd, probe_entrez_mapping)
dim(expression_data_QCd)
dim(expression_data_QCd_entrez_id)
length(which(duplicated(colnames(expression_data_QCd_entrez_id))))
head(expression_data_QCd_entrez_id[100:110])
##### COLLAPSE MULTIPPLE ENTREZ ID BY SELECTING ONE WITH HIGHEST AVERAGE EXPRESSION ACROSS SAMPLES ######
## create function
select_duplicate_probe_by_top_expr <- function(x) {
# transpose data frame - keep as dataframe
x_t<-as.data.frame(t(x))
# calculate mean expression per probe across samples - create new column - probe mean column
x_t$probe_mean_expression<-rowMeans(x_t)
#copy rownames (probe id) to column and truncate
x_t$trunc_entrez_id<-trunc(as.numeric(as.character(rownames(x_t))))
# order data frame by truncated probe id and then expression level
x_t<-x_t[order(x_t$trunc_entrez_id, -x_t$probe_mean_expression), ]
# remove all duplicate probe id - keep one with highest mean expression
x_t_unique<-x_t[!duplicated(x_t$trunc_entrez_id),]
#unique entrez column back to row name
rownames(x_t_unique)<-x_t_unique$trunc_entrez_id
#remove unwanted column
x_t_unique$trunc_entrez_id<-NULL
#remove unwanted column
x_t_unique$probe_mean_expression<-NULL
#transpose dataframe back
x_unique<-as.data.frame(t(x_t_unique))
return(x_unique)
}
## apply function to dataframes - check number of probes - check duplicates
expression_data_QCd_entrez_id_unique<-select_duplicate_probe_by_top_expr(expression_data_QCd_entrez_id)
dim(expression_data_QCd_entrez_id_unique)
length(which(duplicated(colnames(expression_data_QCd_entrez_id_unique))))
head(expression_data_QCd_entrez_id_unique[1:5])
##### ATTACH DIAGNOSIS AND data REGION #####
# create function to merge multiple dataframes
MyMerge <- function(x, y){
df <- merge(x, y, by= "row.names", all.x= F, all.y= F)
rownames(df) <- df$Row.names
df$Row.names <- NULL
return(df)
}
# create phenotype infor to attach - diagnosis + gender + Age + Ethnicity + Tissue
phenotype_to_attach<-Reduce(MyMerge, list(Diagnosis, gender_comparison, Age, Ethnicity, Tissue))
dim(phenotype_to_attach)
head(phenotype_to_attach)
# attach pheno to exprs table
expression_data_QCd_entrez_id_unique_pheno<-merge(phenotype_to_attach, expression_data_QCd_entrez_id_unique, by="row.names")
rownames(expression_data_QCd_entrez_id_unique_pheno)<-expression_data_QCd_entrez_id_unique_pheno$Row.names
expression_data_QCd_entrez_id_unique_pheno$Row.names<-NULL
head(expression_data_QCd_entrez_id_unique_pheno)[1:10]
# rows should be same in exprs table - should be TRUE
dim(expression_data_QCd_entrez_id_unique)[1]==dim(expression_data_QCd_entrez_id_unique_pheno)[1]
##### CONVERT ALL GENES TO entrez ID #####
# convert to entrez
full_expression_data_QCd_entrez_id<-convert_probe_id_to_entrez_id(t(expression_data_normalised_as_data_frame), probe_entrez_mapping)
length(which(duplicated(colnames(full_expression_data_QCd_entrez_id))))
# remove duplicate entrez
full_expression_data_QCd_entrez_id_unique<-select_duplicate_probe_by_top_expr(full_expression_data_QCd_entrez_id)
dim(full_expression_data_QCd_entrez_id_unique)
length(which(duplicated(colnames(full_expression_data_QCd_entrez_id_unique))))
# attach diagnosis
# attach pheno to exprs table
full_expression_data_QCd_entrez_id_unique_pheno<-merge(phenotype_to_attach, full_expression_data_QCd_entrez_id_unique, by="row.names")
rownames(full_expression_data_QCd_entrez_id_unique_pheno)<-full_expression_data_QCd_entrez_id_unique_pheno$Row.names
full_expression_data_QCd_entrez_id_unique_pheno$Row.names<-NULL
head(full_expression_data_QCd_entrez_id_unique_pheno)[1:10]
#susbet to samples in final QC
full_expression_data_QCd_entrez_id_unique_pheno<-full_expression_data_QCd_entrez_id_unique_pheno[rownames(full_expression_data_QCd_entrez_id_unique_pheno) %in% rownames(expression_data_QCd_entrez_id_unique),]
# rows should be same in exprs table - should be TRUE
dim(expression_data_QCd_entrez_id_unique)[1]==dim(full_expression_data_QCd_entrez_id_unique_pheno)[1]
#
dim(expression_data_QCd_entrez_id_unique)[1]
dim(full_expression_data_QCd_entrez_id_unique_pheno)[1]
# save
setwd(clean_data_dir)
saveRDS(full_expression_data_QCd_entrez_id_unique_pheno, file=paste(dataset, "QCd_FULL.RDS", sep="_"))
##### SUMMARY #####
print(c("dataset:", dataset), quote=F)
print(c("Disease:", disease), quote=F)
print(c("Microarray Platform:", Microarray_platform), quote=F)
print(c("Expression Chip:", expression_chip), quote=F)
print(c("Data Format:", Data_format), quote=F)
print(c("Tissue:", as.character(Tissue[1,1])), quote=F)
print(c("Case Number:", length(case_ID)), quote=F)
print(c("Control Number:", length(control_ID)), quote=F)
print(c("Probe Detection Threshold:", Probe_Detection_Threshold), quote=F)
print(c("Gender-Missmatch:", dim(Gender_Missmatch)[1]), quote=F)
print(c("Samples Removed:", dim(expression_data_normalised_as_data_frame)[2]-dim(expression_data_QCd_entrez_id_unique_pheno)[1]), quote=F)
print(c("Final Case numbers:", length(expression_data_QCd_entrez_id_unique_pheno[expression_data_QCd_entrez_id_unique_pheno$Diagnosis=="case",1])), quote=F)
print(c("Final Control Numbers:", length(expression_data_QCd_entrez_id_unique_pheno[expression_data_QCd_entrez_id_unique_pheno$Diagnosis=="control",1])), quote=F)
print(c("Initial Probe Numbers:", dim(expression_data_normalised_as_data_frame)[1]), quote=F)
print(c("Final Probe Numbers:", dim(expression_data_QCd_entrez_id_unique_pheno)[2]-6), quote=F)
print(c("Final Probe Numbers in full dataset:", dim(full_expression_data_QCd_entrez_id_unique_pheno)[2]-6), quote=F)
#write out summary report
setwd(work_dir)
cat("Processing Date:", strftime(Sys.Date(),"%Y-%m-%d"), "\n", file="Summary_Report.txt")
cat("Dataset:", dataset, "\n", file="Summary_Report.txt", append=T)
cat("Disease:", disease, "\n", file="Summary_Report.txt", append=T)
cat("Microarray Platform:", Microarray_platform, "\n", file="Summary_Report.txt", append=T)
cat("Expression Chip:", expression_chip, "\n", file="Summary_Report.txt", append=T)
cat("Data Format:", Data_format, "\n", file="Summary_Report.txt", append=T)
cat("Tissue:", as.character(Tissue[1,1]), "\n", file="Summary_Report.txt", append=T)
cat("Case Numbers before QC:", length(case_ID), "\n", file="Summary_Report.txt", append=T)
cat("Control Numbers before QC:", length(control_ID), "\n", file="Summary_Report.txt", append=T)
cat("Probe Detection Threshold:", Probe_Detection_Threshold, "\n", file="Summary_Report.txt", append=T)
cat("Gender-Missmatch:", dim(Gender_Missmatch)[1], "\n", file="Summary_Report.txt", append=T)
cat("Samples Removed:", dim(expression_data_normalised_as_data_frame)[2]-dim(expression_data_QCd_entrez_id_unique_pheno)[1], "\n", file="Summary_Report.txt", append=T)
cat("Case numbers after QC:", length(expression_data_QCd_entrez_id_unique_pheno[expression_data_QCd_entrez_id_unique_pheno$Diagnosis=="case",1]), "\n", file="Summary_Report.txt", append=T)
cat("Control Numbers after QC:", length(expression_data_QCd_entrez_id_unique_pheno[expression_data_QCd_entrez_id_unique_pheno$Diagnosis=="control",1]), "\n", file="Summary_Report.txt", append=T)
cat("Probe Numbers before QC:", dim(expression_data_normalised_as_data_frame)[1], "\n", file="Summary_Report.txt", append=T)
cat("Probe Numbers after QC:", dim(expression_data_QCd_entrez_id_unique_pheno)[2]-6, "\n", file="Summary_Report.txt", append=T)
cat("Final Probe Numbers in full dataset:", dim(full_expression_data_QCd_entrez_id_unique_pheno)[2]-6, "\n", file="Summary_Report.txt", append=T)
##### SAVE EXPRESSION DATAFRAME #####
setwd(clean_data_dir)
saveRDS(expression_data_QCd_entrez_id_unique_pheno, file=paste(dataset, "QCd.RDS", sep="_"))
##### SAVE IMAGE #####
setwd(work_dir)
save.image(file=paste(dataset, "processing_data.Rdata", sep="_"))
| /Data/CD/E-GEOD-59867_processing.R | no_license | hamelpatel/AD_microarray_gene_expression_classifier | R | false | false | 44,831 | r |
##########################################################################################################################################
#### ####
### ###
## ##
# E-GEOD-59867 PROCESSING #
## ##
### ###
#### ####
##########################################################################################################################################
# QC PIPELINE VERSION: AD_classification 1.0 single dataset, single tissue
# DATE: 12/05/2018
# ARRAY EXPRESS NUMBER: E-GEOD-59867
# DISORDER: CAD
# MICROARRAY PLATFORM: Affy
# EXPRESSION CHIP: Exon 1.0ST
# NUMBER OF SAMPLES:
# TISSUE: Whole Blood
#
# NOTES -
# using samples collection: on the 1st day of MI (admission) - no controls
#
##### SET PARAMETERS #####
rm(list=ls())
options=(stringAsFactors=FALSE)
##### LOAD LIBRARIES ####
library(Biobase)
library(GEOquery)
library(ArrayExpress)
library(affy)
library(lumi)
library(WGCNA)
library(pamr)
library(sva)
library(ggplot2)
library(reshape)
library(massiR)
library(gridExtra)
library(RCurl)
library(XML)
#library(hgu133plus2.db)
#library(hgu133a.db)
#library(hgu133b.db)
library(hugene10sttranscriptcluster.db)
#library(illuminaHumanv4.db)
#library(illuminaHumanv3.db)
##### SET DIRECTORIES ####
work_dir="/media/hamel/Workspace/Dropbox/Projects/AD-classification/1.Data/6.Cardiovascular_Disease/E-GEOD-59867"
setwd(work_dir)
# create directory for raw data
dir.create(paste(work_dir,"Raw_Data", sep="/"))
raw_dir=paste(work_dir,"Raw_Data", sep="/")
# create directory for all Plots
dir.create(paste(work_dir,"Preprocessing_Plots", sep="/"))
plots_dir=paste(work_dir,"Preprocessing_Plots", sep="/")
# create directory for boxplots + density plots
dir.create(paste(plots_dir,"Boxplots_Density_plots", sep="/"))
boxplots_density_plots_dir=paste(plots_dir,"Boxplots_Density_plots", sep="/")
# create directory for PCA plots
dir.create(paste(plots_dir,"PCA_Plots", sep="/"))
pca_dir=paste(plots_dir,"PCA_Plots", sep="/")
# create directory for sample netowk plots
dir.create(paste(plots_dir,"Sample_Network_Plots", sep="/"))
sample_network_dir=paste(plots_dir,"Sample_Network_Plots", sep="/")
# create directory non-expressed threshold plots
dir.create(paste(plots_dir,"Probe_Detection_Plots", sep="/"))
probe_detection_plots_dir=paste(plots_dir,"Probe_Detection_Plots", sep="/")
# create directory for QC's expression data
dir.create(paste(work_dir,"Clean_Data", sep="/"))
clean_data_dir=paste(work_dir,"Clean_Data", sep="/")
# create directory associated papers
dir.create(paste(work_dir,"Papers", sep="/"))
##### TEMP FIX FOR getAE FUNCTION ######
# EBI changed to https - their getAE function broken. here is temp fix
https_getAE <- function (accession, path = getwd(), type = "full", extract = TRUE, local = FALSE, sourcedir = path) {
if (!local) {
baseURL = "https://www.ebi.ac.uk/arrayexpress/xml/v2/files"
xmlURL = getURL(paste(baseURL, accession, sep = "/"))
xml = xmlTreeParse(xmlURL, useInternalNodes = TRUE, isURL=FALSE)
sdrfURL = xpathSApply(xml, "/files/experiment/file[kind='sdrf' and extension='txt']/url",
xmlValue)
sdrfFile = xpathSApply(xml, "/files/experiment/file[kind='sdrf' and extension='txt']/name",
xmlValue)
idfURL = xpathSApply(xml, "/files/experiment/file[kind='idf' and extension='txt']/url",
xmlValue)
idfFile = xpathSApply(xml, "/files/experiment/file[kind='idf' and extension='txt']/name",
xmlValue)
adfURL = xpathApply(xml, "/files/experiment/file[kind='adf' and extension='txt']/url",
xmlValue)
adfFiles = xpathApply(xml, "/files/experiment/file[kind='adf' and extension='txt']/name",
xmlValue)
rawArchiveURL = xpathApply(xml, "/files/experiment/file[kind='raw' and extension='zip']/url",
xmlValue)
procArchiveURL = xpathApply(xml, "/files/experiment/file[kind='processed' and extension='zip']/url",
xmlValue)
}
else {
allfiles = list.files(sourcedir)
sdrfFile = allfiles[grep(paste(accession, ".sdrf.txt$",
sep = ""), allfiles)]
if (length(sdrfFile) == 0)
stop("SDRF file not found in directory ", sourcedir)
sdrfURL = paste("file:/", sourcedir, sdrfFile, sep = "/")
idfFile = allfiles[grep(paste(accession, ".idf.txt$",
sep = ""), allfiles)]
if (length(idfFile) == 0)
warning("IDF file not found in directory ", sourcedir)
idfURL = paste("file:/", sourcedir, idfFile, sep = "/")
ph = try(read.AnnotatedDataFrame(sdrfFile, path = sourcedir,
row.names = NULL, blank.lines.skip = TRUE, fill = TRUE,
varMetadata.char = "$"))
if (inherits(ph, "try-error")) {
warning("Unable to retrieve ADF reference from SDRF. Reading any ADF in directory.")
adfFiles = allfiles[grep(".adf.txt$", allfiles)]
}
else {
adr = unique(pData(ph)[, getSDRFcolumn("ArrayDesignREF",
varLabels(ph))])
adfFiles = paste(adr, ".adf.txt", sep = "")
}
if (all(file.exists(file.path(sourcedir, adfFiles)))) {
adfURL = paste("file:/", sourcedir, adfFiles, sep = "/")
downloadADF = FALSE
}
else {
filesURL = "https://www.ebi.ac.uk/arrayexpress/files"
adfURL = paste(filesURL, adr, adfFiles, sep = "/")
downloadADF = TRUE
}
rawArchiveURL = NULL
procArchiveURL = NULL
rawArchive = allfiles[grep(paste(accession, ".raw.[0-9]{1,}.zip",
sep = ""), allfiles)]
if (length(rawArchive) != 0)
rawArchiveURL = paste("file:/", sourcedir, rawArchive,
sep = "/")
else warning("No raw files found in directory ", sourcedir)
processedArchive = allfiles[grep(paste(accession, ".processed.[0-9]{1,}.zip",
sep = ""), allfiles)]
if (length(processedArchive) != 0)
procArchiveURL = paste("file:/", sourcedir, processedArchive,
sep = "/")
else warning("No processed data files found in directory ",
sourcedir)
}
if (length(sdrfURL) > 1) {
warning("Found two SDRF files: \n", paste(sdrfURL, "\n"))
hybSDRF = grep("hyb.sdrf", sdrfURL)
if (length(hybSDRF) > 0) {
message("Choosing ", sdrfURL[hybSDRF])
sdrfURL = sdrfURL[hybSDRF]
sdrfFile = sdrfFile[hybSDRF]
}
else {
warning("Unable to choose SDRF file. Please report experiment to miamexpress@ebi.ac.uk")
}
}
if (!local || path != sourcedir || downloadADF) {
adfFiles <- lapply(adfURL, function(url) {
filedest = paste(path, basename(url), sep = "/")
dnld = try(download.file(url, filedest, mode = "wb"))
if (inherits(dnld, "try-error") || file.info(filedest)$size ==
0) {
warning(paste(url, " does not exist or is empty. \n"),
sep = "")
adffile = NULL
}
else {
adffile = basename(filedest)
}
return(adffile)
})
if (!is.null(adfFiles))
adfFiles = unlist(adfFiles)
}
if (!local || path != sourcedir) {
sdrfFileDest = paste(path, sdrfFile, sep = "/")
dnld = try(download.file(sdrfURL, sdrfFileDest, mode = "wb"))
if (inherits(dnld, "try-error") || file.info(sdrfFileDest)$size ==
0) {
warning(paste(sdrfFile, " does not exist or is empty. The object will not have featureData or phenoData. \n"),
sep = "")
sdrfFile = NULL
adffile = NULL
}
idfFileDest = paste(path, idfFile, sep = "/")
dnld = try(download.file(idfURL, idfFileDest, mode = "wb"))
if (inherits(dnld, "try-error") || file.info(idfFileDest)$size ==
0) {
warning(paste(idfFile, " does not exist or is empty. \n"),
sep = "")
idfFile = NULL
}
rawArchive = NULL
processedArchive = NULL
if (type != "mageFilesOnly" && !is.null(rawArchiveURL) &&
(type == "full" || type == "raw")) {
message("Copying raw data files\n")
rawArchive <- lapply(rawArchiveURL, function(url) {
filedest = paste(path, basename(url), sep = "/")
dnld = try(download.file(url, filedest, mode = "wb"))
if (inherits(dnld, "try-error") || file.info(filedest)$size ==
0) {
warning(paste(url, " does not exist or is empty. \n"),
sep = "")
}
else {
return(filedest)
}
})
if (!is.null(rawArchive)) {
rawArchive = unlist(rawArchive)
rawArchive = basename(rawArchive)
}
}
if ((type != "mageFilesOnly" && type == "full" || type ==
"processed") && !is.null(procArchiveURL)) {
message("Copying processed data files\n")
processedArchive <- lapply(procArchiveURL, function(url) {
filedest = paste(path, basename(url), sep = "/")
dnld = try(download.file(url, filedest, mode = "wb"))
if (inherits(dnld, "try-error") || file.info(filedest)$size ==
0) {
warning(paste(url, " does not exist or is empty. \n"),
sep = "")
}
else {
return(filedest)
}
})
if (!is.null(processedArchive)) {
processedArchive = unlist(processedArchive)
processedArchive = basename(processedArchive)
}
}
}
rawFiles = NULL
processedFiles = NULL
if (extract) {
message("Unpacking data files")
if (!is.null(rawArchive))
rawFiles <- lapply(rawArchive, function(zipfile) {
rawfiles = extract.zip(file = paste(path, zipfile,
sep = "/"))
return(rawfiles)
})
if (!is.null(processedArchive))
processedFiles <- lapply(processedArchive, function(zipfile) {
procfiles = extract.zip(file = paste(path, zipfile,
sep = "/"))
return(procfiles)
})
if (!is.null(rawFiles))
rawFiles = unlist(rawFiles)
if (!is.null(processedFiles))
processedFiles = unlist(processedFiles)
}
res = list(path = path, rawFiles = rawFiles, rawArchive = rawArchive,
processedFiles = processedFiles, processedArchive = processedArchive,
sdrf = sdrfFile, idf = idfFile, adf = adfFiles)
return(res)
}
##### DOWNLOAD RAW DATA #####
setwd(raw_dir)
#raw data only
#data_raw=getAE("E-GEOD-59867", type = "raw")
#processed data only
# data_raw=https_getAE("E-GEOD-59867", type = "processed")
#
# #all data
# data_raw=https_getAE("E-GEOD-59867", type = "full")
#
##### CREATE R EXPRESSION OBJECT #####
# METHOD 1 - convert MAGE-TAB files into expresssion set - USING RAW DATA
#expression_data = ae2bioc(mageFiles = data_raw)
#expression_data
# # METHOD 2 - convert MAGE-TAB files into expresssion set - USING RAW DATA
#
# expression_data<-ReadAffy()
#
# METHOD 3 - convert MAGE-TAB files into expresssion set - USING PROCESSED DATA
#
cnames=getcolproc(data_raw)
cnames
expression_data=procset(data_raw, cnames[2])
#
# expression_data
# METHOD 4 - GEO - processed data
#ubnable to process through array express. used GEO instead
expression_data2 <- getGEO("GSE59867", GSEMatrix =TRUE, getGPL=FALSE)
if (length(expression_data2) > 1) idx <- grep(expression_data2@annotation, attr(expression_data2, "names")) else idx <- 1
expression_data2 <- expression_data2[[idx]]
expression_data2
head(pData(expression_data2))
head(exprs(expression_data2))[,1:5]
##### SET DATA PARAMETERS #####
##
## dataset name to save as/use
##
dataset="E-GEOD-59867"
##
## disease
##
disease="CAD"
##
## Affymetrix or Illumina
##
Microarray_platform="Affymetrix"
#Microarray_platform="Illumina"
##
## Raw or pre-processed
##
#Data_format="Raw"
Data_format="Processed"
##
## probe detection thresohld to use
##
#Probe_Detection_Threshold=0.9
#Probe_Detection_Threshold=0.8
#Probe_Detection_Threshold=0.7
Probe_Detection_Threshold=0.6
##
## expression chip to use
##
#expression_chip="hgu133plus2"
#expression_chip="hgu133a"
#expression_chip="hgu133b"
expression_chip="hugene10sttranscriptcluster"
#expression_chip="illuminaHumanv4"
#expression_chip="illuminaHumanv3"
##
## sample network threshold to use - samples less than Z.K threshold will be removed - need manual check
##
sample_network_ZK_threshold=-3
##
## massi R chip to use
##
#massi_R_chip="illumina_humanwg_6_v1"
#massi_R_chip="illumina_humanwg_6_v2"
#massi_R_chip="illumina_humanwg_6_v1"
#massi_R_chip="illumina_humanht_12"
massi_R_chip="affy_hugene_1_0_st_v1"
#massi_R_chip="affy_hg_u133_plus_2"
##
## Phenotype info
##
phenotype_data<-pData(expression_data2)
head(phenotype_data)
names(phenotype_data)
##### SUBSET TO SAMPLES OF INTEREST #####
# case + control
dim(phenotype_data)
table(phenotype_data$characteristics_ch1.1)
phenotype_data_subset<-phenotype_data[phenotype_data$characteristics_ch1.1=="samples collection: on the 1st day of MI (admission)",]
dim(phenotype_data_subset)
table(phenotype_data_subset$characteristics_ch1.1)
phenotype_data_subset<-phenotype_data
phenotype_data_subset<-phenotype_data
# extract pheno of interest
Ethnicity<-phenotype_data_subset[1]
colnames(Ethnicity)<-"Ethnicity"
Tissue<-phenotype_data_subset[1]
colnames(Tissue)<-"Tissue"
Age<-phenotype_data_subset[9]
colnames(Age)<-"Age"
Diagnosis<-phenotype_data_subset[11]
colnames(Diagnosis)<-"Diagnosis"
Gender<-phenotype_data_subset[1]
colnames(Gender)<-"Gender"
#standardise Ethnicity
Ethnicity$Ethnicity<-as.character(Ethnicity$Ethnicity)
table(Ethnicity)
Ethnicity[1]<-"Unknown"
table(Ethnicity)
#standardise Age
table(Age)
Age[1]<-"Unknown"
table(Age)
#standardise diagnosis- case control
Diagnosis$Diagnosis<-as.character(Diagnosis$Diagnosis)
table(Diagnosis)
#Diagnosis[grep("control", Diagnosis[,1]),]<-"control"
Diagnosis[grep("samples collection: on the 1st day of MI \\(admission\\)", Diagnosis[,1]),]<-"case"
table(Diagnosis)
#standardise tissue
Tissue$Tissue<-as.character(Tissue$Tissue)
table(Tissue)
Tissue[1]<-"blood"
table(Tissue)
#standardise Gender
Gender$Gender<-as.character(Gender$Gender)
table(Gender)
#Gender[grep("Female", Gender[,1]),]<-"female"
#Gender[grep("Male", Gender[,1]),]<-"male"
Gender[1]<-"Unknown"
table(Gender)
##### SUBSET EXPRESSION #####
expression_data_subset<-subset(exprs(expression_data2))
expression_data_subset<-expression_data_subset[,colnames(expression_data_subset) %in% rownames(phenotype_data_subset)]
# check same number of samples
ncol(expression_data_subset)==nrow(phenotype_data_subset)
ncol(expression_data_subset)
##### PLOTS OF RAW DATA #####
setwd(boxplots_density_plots_dir)
boxplot(expression_data_subset)
png(file="raw_data_boxplot.png")
boxplot(expression_data_subset)
dev.off()
plotDensity(expression_data_subset, logMode=F, addLegend=F)
png(file="raw_data_density_plot.png")
plotDensity(expression_data_subset, logMode=F, addLegend=F)
dev.off()
##### PRE-PROCESS - RAW ######
#
# #background correct
#
# expression_data_background_corrected<-mas5(expression_data_subset)
#
# #normalise
#
# expression_data_normalised<-rsn(log2(expression_data_subset))
#
# # set negative values to zero
#
# expression_data_normalised<-expression_data_normalised
# expression_data_normalised[expression_data_normalised<0]<-0
#
#convert to data.frame
expression_data_normalised_as_data_frame<-as.data.frame(expression_data_subset)
##### PRE-PROCESS - PROCESSED DATA #####
head(expression_data_normalised_as_data_frame)[1:5]
##### PLOTS OF PRE_PROCESSED DATA #####
# setwd(boxplots_density_plots_dir)
#
# boxplot(expression_data_normalised_as_data_frame)
# pdf(file="pre-processed_data_boxplot.pdf")
# boxplot(expression_data_normalised_as_data_frame)
# dev.off()
#
# plotDensity(expression_data_normalised_as_data_frame, logMode=F, addLegend=F)
# pdf(file="pre-processed_data_density_plot.pdf")
# plotDensity(expression_data_normalised_as_data_frame, logMode=F, addLegend=F)
# dev.off()
#
# setwd(work_dir)
##### CHECK FOR DUPLICATE SAMPLES IDs #####
anyDuplicated(rownames(phenotype_data_subset))
##### PCA PLOT 1 #####
pca_data<-function(data, legend_position){
#run PCA
pca<-prcomp(data)
# order of samples in expression data
sample_order<-colnames(data)
# merge pheno info together
pheno<-cbind(Diagnosis, Gender, Ethnicity, Age)
# subset pca_pheno to match expression data
pca_pheno<-subset(pheno, rownames(pheno) %in% colnames(data))
# match order
ordered_pca_pheno<-pca_pheno[match(sample_order, rownames(pca_pheno)),]
Diagnosis_pca_colour<-labels2colors(as.character(ordered_pca_pheno$Diagnosis))
Gender_pca_colour<-labels2colors(as.character(ordered_pca_pheno$Gender))
Ethnicity_pca_colour<-labels2colors(as.character(ordered_pca_pheno$Ethnicity))
Age_pca_colour<-labels2colors(as.character(ordered_pca_pheno$Age))
# pca plot - Diagnosis
plot(pca$rotation[,1:2], main=" PCA plot coloured by Diagnosis",col="black", pch=21,bg=Diagnosis_pca_colour)
legend(legend_position, legend=unique(ordered_pca_pheno$Diagnosis), fill=unique(Diagnosis_pca_colour), title="Diagnosis")
# pca plot - Gender
plot(pca$rotation[,1:2], main=" PCA plot coloured by Clinical Gender",col="black", pch=21,bg=Gender_pca_colour)
legend(legend_position, legend=unique(ordered_pca_pheno$Gender), fill=unique(Gender_pca_colour), title="Gender")
# pca plot - Ethnicity
plot(pca$rotation[,1:2], main=" PCA plot coloured by Ethnicity",col="black", pch=21,bg=Ethnicity_pca_colour)
legend(legend_position, legend=unique(ordered_pca_pheno$Ethnicity), fill=unique(Ethnicity_pca_colour), title="Ethnicity")
# pca plot - Age
plot(pca$rotation[,1:2], main=" PCA plot coloured by Age",col="black", pch=21,bg=Age_pca_colour)
legend(legend_position, legend=unique(ordered_pca_pheno$Age), fill=unique(Age_pca_colour), title="Age")
}
#apply function
pca_data(expression_data_normalised_as_data_frame, 'bottomright')
#plot to pdf
setwd(pca_dir)
pdf("1.PCA_plot_before_QC.pdf")
pca_data(expression_data_normalised_as_data_frame, 'bottomright')
dev.off()
##### GENDER CHECK #####
table(Gender)
# get Y choromosome genes
data(y.probes)
names(y.probes)
# which chip has most genes in massiR
for (x in names(y.probes)) {
y_chromo_probes <- data.frame(y.probes[x])
count_yes<-rownames(y_chromo_probes)%in%rownames(expression_data_normalised_as_data_frame)
print(paste(x, length(count_yes[count_yes=="TRUE"])))
}
massi_R_chip
y_chromo_probes <- data.frame(y.probes[massi_R_chip])
# extract Y chromosome genes from dataset
eset.select.out <- massi_select(expression_data_normalised_as_data_frame, y_chromo_probes)
#
massi_y_plot(eset.select.out)
# get probes with data
massi.select.out <-massi_select(expression_data_normalised_as_data_frame, y_chromo_probes, threshold=4)
# check
head(massi.select.out)[,1:5]
# run gender predict
eset.results <- massi_cluster(massi.select.out)
# check results for bad probes
massi_cluster_plot(massi.select.out, eset.results)
#extract gender prediction
predicted_gender<-(eset.results$massi.results)[c(1,5)]
rownames(predicted_gender)<-predicted_gender$ID
predicted_gender$ID<-NULL
colnames(predicted_gender)<-"Predicted_Gender"
#compare to clinical Gender
# merge
gender_comparison<-merge(Gender, predicted_gender, by="row.names")
rownames(gender_comparison)<-gender_comparison$Row.names
gender_comparison$Row.names<-NULL
colnames(gender_comparison)<-c("Clinical_Gender", "Predicted_Gender")
head(gender_comparison)
# gender miss-matches
Gender_Missmatch<-gender_comparison[gender_comparison$Clinical_Gender!=gender_comparison$Predicted_Gender,]
Gender_Missmatch
# check sex bias - should have at least 15% male samples and minimum 6 samples
dip.result <- massi_dip(eset.select.out)
# # dip test <0.08 - sex bias - change gender to unknown
# gender_comparison<-Gender
# gender_comparison$Predicted_Gender<-"Unknown"
# colnames(gender_comparison)[1]<-"Clinical_Gender"
# #separae male/female IDs - use predicted
female_samples<-subset(gender_comparison, Predicted_Gender=="female")
male_samples<-subset(gender_comparison, Predicted_Gender=="male")
#separae male/female IDs - use Clinical (use if dip.test<=0.08)
# female_samples<-subset(gender_comparison, Clinical_Gender=="female")
# male_samples<-subset(gender_comparison, Clinical_Gender=="male")
head(female_samples)
head(male_samples)
table(female_samples)
table(male_samples)
##### PROBE ID DETECTION #####
# separate case control - Factor.Value..disease. column
case_ID<-rownames(subset(Diagnosis, Diagnosis=="case"))
case_ID
case_exprs<-expression_data_normalised_as_data_frame[,colnames(expression_data_normalised_as_data_frame)%in%case_ID]
head(case_exprs)
dim(case_exprs)
# separate by gender
case_exprs_F<-case_exprs[colnames(case_exprs)%in%rownames(female_samples)]
case_exprs_M<-case_exprs[colnames(case_exprs)%in%rownames(male_samples)]
# calculate 90th percentile for each sample in each group
extract_good_probe_list<-function(dataset, probe_percentile_threshold) {
# dataset - expression dataset as dataframe
# probe_percentile_threshold - percentile at which to use as cut-off for detected probes
# number of samples in which probe must be expressed in - fixed at 0.8 - i.e 80% of samples
# calculate quantile threshold for each sample
sample_quantiles<-apply(dataset, 2, quantile, probs=c(probe_percentile_threshold))
# count length of quantile - will be number of samples
number_of_samples<-length(sample_quantiles)
# convert probes values to NA in for each sample if probe expression value below sample percentile cut-off
for (x in 1:number_of_samples) {
is.na(dataset[x]) <- dataset[x] >= sample_quantiles[x]
}
# convert to dataframe
dataset_dataframe<-as.data.frame(dataset)
# count number of NA
dataset_count<-as.data.frame(rowSums(is.na(dataset_dataframe)))
colnames(dataset_count)<-"count"
# subset good probes
good_probes<-rownames(subset(dataset_count, dataset_count$count >= (number_of_samples*0.8)))
#print threshold used
print(as.data.frame(sample_quantiles))
boxplot(as.data.frame(sample_quantiles))
# return good probes
return(good_probes)
}
# apply function to samples
case_exprs_F_expressed_probes_list<-extract_good_probe_list(case_exprs_F, Probe_Detection_Threshold)
length(case_exprs_F_expressed_probes_list)
case_exprs_M_expressed_probes_list<-extract_good_probe_list(case_exprs_M, Probe_Detection_Threshold)
length(case_exprs_M_expressed_probes_list)
# merge list of good probes from both case + control, sort and keep unique values
good_probe_list<-unique(sort(c(case_exprs_F_expressed_probes_list,
case_exprs_M_expressed_probes_list)))
length(good_probe_list)
# extract good probes from dataset
data_exprs_good_probes<-expression_data_normalised_as_data_frame[rownames(expression_data_normalised_as_data_frame)%in%good_probe_list,]
data_case_exprs_good_probes<-case_exprs[rownames(case_exprs)%in%good_probe_list,]
head(data_exprs_good_probes)[1:5]
dim(expression_data_normalised_as_data_frame)
dim(data_exprs_good_probes)
dim(data_case_exprs_good_probes)
##### PROBE DETECTION THRESHOLD PLOTS #####
# using dataframe before probe removal
# get gene symbol list for chip
Gene_symbols_probes <- mappedkeys(eval(parse(text = paste(expression_chip, "SYMBOL", sep=""))))
# Convert to a list
Gene_symbols <- as.data.frame(eval(parse(text = paste(expression_chip, "SYMBOL", sep="")))[Gene_symbols_probes])
head(Gene_symbols)
dim(Gene_symbols)
#Expressed in females only
XIST_probe_ID<-subset(Gene_symbols, symbol=="XIST")
XIST_probe_ID
#Expressed in Males only
PRKY_probe_ID<-subset(Gene_symbols, symbol=="PRKY")
PRKY_probe_ID
RPS4Y1_probe_ID<-subset(Gene_symbols, symbol=="RPS4Y1")
RPS4Y1_probe_ID
KDM5D_probe_ID<-subset(Gene_symbols, symbol=="KDM5D")
KDM5D_probe_ID
USP9Y_probe_ID<-subset(Gene_symbols, symbol=="USP9Y")
USP9Y_probe_ID
UTY_probe_ID<-subset(Gene_symbols, symbol=="UTY")
UTY_probe_ID
# HK genes expressed in all cells + males + females
MKRN1_probe_ID<-subset(Gene_symbols, symbol=="MKRN1")
MKRN1_probe_ID
ADIPOR1_probe_ID<-subset(Gene_symbols, symbol=="ADIPOR1")
ADIPOR1_probe_ID
BNIP3L_probe_ID<-subset(Gene_symbols, symbol=="BNIP3L")
BNIP3L_probe_ID
#RNF10_probe_ID<-subset(Gene_symbols, symbol=="RNF10")
#RNF10_probe_ID
# merge all genes
gene_list<-rbind(XIST_probe_ID,
PRKY_probe_ID,
RPS4Y1_probe_ID,
KDM5D_probe_ID,
USP9Y_probe_ID,
UTY_probe_ID,
MKRN1_probe_ID,
ADIPOR1_probe_ID,
BNIP3L_probe_ID)
# RNF10_probe_ID)
gene_list
# create table of genes and state if KH or gender specific
gene_table<-read.table(text =
"Gene Expressed_in
ADIPOR1 All
BNIP3L All
KDM5D Males
MKRN1 All
PRKY Males
RPS4Y1 Males
USP9Y Males
UTY Males
XIST Females" , header=T)
gene_table
#create function to plot
plot_gender_specific_genes<-function(Expression_table, Gender, genes_to_extract, threshold, boxplot_title){
#extract gene of interest
Expression_table_gene_check<-as.data.frame(t(Expression_table[rownames(Expression_table)%in% genes_to_extract$probe_id,]))
# change colnames TO GENE SYMBOL using genes to extract file
for (x in 1:dim(Expression_table_gene_check)[2]){
colnames(Expression_table_gene_check)[x]<-gene_list[genes_to_extract$probe_id==colnames(Expression_table_gene_check)[x],2]
}
# add in gender information
Expression_table_gene_check_gender<-merge(Gender, Expression_table_gene_check, by="row.names")
rownames(Expression_table_gene_check_gender)<-Expression_table_gene_check_gender$Row.names
Expression_table_gene_check_gender$Row.names<-NULL
#melt dataframe for plot
Expression_table_gene_check_gender_melt<-melt(Expression_table_gene_check_gender, by=Gender)
# change variable colun from factor to character
Expression_table_gene_check_gender_melt$variable<-as.character(Expression_table_gene_check_gender_melt$variable)
# order dataframe by variable
Expression_table_gene_check_gender_melt<-Expression_table_gene_check_gender_melt[order(Expression_table_gene_check_gender_melt$variable),]
# calculate user defined percentie threshold
sample_quantiles<-apply(Expression_table, 2, quantile, probs=threshold)
# mean of used defined threshold across samples
mean_threshold=mean(sample_quantiles)
#plot
plot1<-qplot(variable, value, colour=get(colnames(Gender)), data = Expression_table_gene_check_gender_melt, geom = c("boxplot", "jitter")) +
geom_hline(yintercept = mean_threshold) +
ggtitle(boxplot_title) +
theme(text = element_text(size=20), axis.text.x = element_text(angle=45, hjust=1)) +
labs(x="Gene",y="Expression", colour = colnames(Gender))
# 2nd legend
plot2<-tableGrob(gene_table, rows=NULL)
# plot
grid.arrange(plot1, plot2,
nrow=2,
heights=c(3,1))
}
# plot
plot_gender_specific_genes(case_exprs, gender_comparison[2], gene_list, Probe_Detection_Threshold, paste(dataset, "case_samples", sep="_"))
setwd(probe_detection_plots_dir)
# pdf("Probe_detection_threshold_based_on_CLINICAL_gender_specific_and_house_keeping_genes.pdf", height=15, width = 12)
# plot_gender_specific_genes(case_exprs, gender_comparison[1], gene_list, Probe_Detection_Threshold, paste(dataset, "case_samples", sep="_"))
# dev.off()
# if predicted gender available
pdf("Probe_detection_threshold_based_on_PREDICTED_gender_specific_and_house_keeping_genes.pdf", height=15, width = 12)
plot_gender_specific_genes(case_exprs, gender_comparison[2], gene_list, Probe_Detection_Threshold, paste(dataset, "case_samples", sep="_"))
dev.off()
##### PCA PLOT 2 #####
#plot to pdf
setwd(pca_dir)
pca_data(data_exprs_good_probes, 'bottomright')
pdf("2.PCA_plot_after_probe_detection.pdf")
pca_data(data_exprs_good_probes, 'bottomright')
dev.off()
##### SAMPLE NETWORK PLOT #####
# split data by disease + gender
data_case_exprs_good_probes_M<-data_case_exprs_good_probes[,colnames(data_case_exprs_good_probes)%in%rownames(male_samples)]
data_case_exprs_good_probes_F<-data_case_exprs_good_probes[,colnames(data_case_exprs_good_probes)%in%rownames(female_samples)]
# sample plot function - taken from steve expression pipeline
sampleNetwork_plot <- function(datExprs) {
diagnosis<-rep("sample", length(colnames(datExprs)))
gp_col <- "group"
cat(" setting up data for qc plots","\r","\n")
## expression matrix and IAC
cat(" expression matrix and IAC","\r","\n")
IAC <- cor(datExprs)
IAC_d <- 1-IAC
samle_names <- colnames(datExprs)
IAC=cor(datExprs, method="p",use="p")
diag(IAC)=0
A.IAC=((1+IAC)/2)^2 ## ADJACENCY MATRIX
cat(" fundamentalNetworkConcepts","\r","\n")
FNC=fundamentalNetworkConcepts(A.IAC) ## WGCNA
K2=FNC$ScaledConnectivity
Z.K=(K2-mean(K2))/sd(K2)
Z.C=(FNC$ClusterCoef-mean(FNC$ClusterCoef))/sd(FNC$ClusterCoef)
rho <- signif(cor.test(Z.K,Z.C,method="s")$estimate,2)
rho_pvalue <- signif(cor.test(Z.K,Z.C,method="s")$p.value,2)
# set colours
cat(" colorvec [",paste(gp_col),"]","\r","\n")
if(gp_col=="chip") { colorvec <- labels2colors(as.character(pData(eset)$Sentrix.Barcode)) }
if(gp_col=="group") { colorvec <- labels2colors(diagnosis[1]) }
mean_IAC <- mean(IAC[upper.tri(IAC)])
## samplenetwork
local(
{colLab <<- function(n,treeorder) {
if(is.leaf(n)) {
a <- attributes(n)
i <<- i+1
attr(n, "nodePar") <- c(a$nodePar, list(lab.col = colorvec[treeorder][i], lab.font = i%%3))
}
n
}
i <- 0
})
cat(" begin SampleNetwork plots","\r","\n")
group_colours<-unique(cbind(colorvec, diagnosis))
## Cluster for pics
cluster1 <- hclust(as.dist(1-A.IAC),method="average")
cluster1order <- cluster1$order
cluster2 <- as.dendrogram(cluster1,hang=0.1)
cluster3 <- dendrapply(cluster2,colLab,cluster1order)
## PLOTS
## cluster IAC
par(mfrow=c(2,2))
par(mar=c(5,6,4,2))
plot(cluster3,nodePar=list(lab.cex=1,pch=NA),
main=paste("Mean ISA = ",signif(mean(A.IAC[upper.tri(A.IAC)]),3),sep=""),
xlab="",ylab="1 - ISA",sub="",cex.main=1.8,cex.lab=1.4)
mtext(paste("distance: 1 - ISA ",sep=""),cex=0.8,line=0.2)
## Connectivity
par(mar=c(5,5,4,2))
plot(Z.K,main="Connectivity", ylab="Z.K",xaxt="n",xlab="Sample",type="n",cex.main=1.8,cex.lab=1.4)
text(Z.K,labels=samle_names,cex=0.8,col=colorvec)
abline(h=-2)
abline(h=-3)
par(mar=c(5,5,4,2))
plot(Z.K,Z.C,main="Connectivity vs ClusterCoef",xlab="Z.K",ylab="Z.C",col=colorvec ,cex.main=1.8,cex.lab=1.4)
abline(lm(Z.C~Z.K),col="black",lwd=2)
mtext(paste("rho = ",signif(cor.test(Z.K,Z.C,method="s")$estimate,2)," p = ",signif(cor.test(Z.K,Z.C,method="s")$p.value,2),sep=""),cex=0.8,line=0.2)
abline(v=-2,lty=2,col="grey")
abline(h=-2,lty=2,col="grey")
##blank plot for legend
par(mar=c(5,5,4,2))
plot(1, type="n", axes=F, xlab="", ylab="")
legend(0.6, 1.4, unique(diagnosis[1]), fill=unique(colorvec))
} #taken from steves expression pipeline
# create functio to ID outliers
names_of_outliers<-function(datExprs, threshold){
IAC = cor(datExprs, method = "p", use = "p")
diag(IAC) = 0
A.IAC = ((1 + IAC)/2)^2 ## ADJACENCY MATRIX
# fundamentalNetworkConcepts
FNC = fundamentalNetworkConcepts(A.IAC) ## WGCNA
K2 = FNC$ScaledConnectivity
Z.K = round((K2 - mean(K2))/sd(K2), 3)
Z.K_outliers <- Z.K < threshold
Z.K_outliers <- names(Z.K_outliers[Z.K_outliers == TRUE])
n_outliers <- length(Z.K_outliers)
return(Z.K_outliers)
}
# create function to run network analysis on each expression dataset, plot and remove bad samples
run_sample_network_plot<-function(dataset, threshold){
#sample network plot
sampleNetwork_plot(dataset)
#identify sample below Z.K threshold
dataset_removal_1<-names_of_outliers(dataset, threshold)
#remove samples with ZK below threshold
dataset_QC<-dataset[,!(colnames(dataset)%in%dataset_removal_1)]
#sample network plot
sampleNetwork_plot(dataset_QC)
#create empty count list to record samples removed
count<-dataset_removal_1
# reiterate above till no samples fall below threshold
while (length(dataset_removal_1)>0) {
# remove bad samples - 1st iteration removes none
dataset_QC<-dataset_QC[,!(colnames(dataset_QC)%in%dataset_removal_1)]
#identify sample below Z.K threshold
dataset_removal_1<-names_of_outliers(dataset_QC, threshold)
#record samples removed
count<-c(count, dataset_removal_1)
}
#final network plot
sampleNetwork_plot(dataset_QC)
# print to screen number of samples removed
cat("\n")
print(c("Total number of samples removed...", length(count)))
# return clean expression set
return(dataset_QC)
}
# run sample network on entorhinal Cortex - on dataframe without gender
data_case_exprs_good_probes_M_QC<-run_sample_network_plot(data_case_exprs_good_probes_M, sample_network_ZK_threshold)
data_case_exprs_good_probes_F_QC<-run_sample_network_plot(data_case_exprs_good_probes_F, sample_network_ZK_threshold)
##### PLOT SAMPLE NETWORK ANALYSIS TO PDF #####
setwd(sample_network_dir)
pdf("case_males_sample_network_analysis.pdf")
data_case_exprs_good_probes_M_QC<-run_sample_network_plot(data_case_exprs_good_probes_M, sample_network_ZK_threshold)
dev.off()
pdf("case_feamles_sample_network_analysis.pdf")
data_case_exprs_good_probes_F_QC<-run_sample_network_plot(data_case_exprs_good_probes_F, sample_network_ZK_threshold)
dev.off()
##### CREATE QC'd DATASET #####
# extract sample ID's from QC'd sample network file
# check colnames same in all dataframes - should be TRUE
all(rownames(data_case_exprs_good_probes_M_QC)==rownames(data_case_exprs_good_probes_F_QC))
# transform and cbind all dataframes
expression_data_QCd<-rbind(t(data_case_exprs_good_probes_M_QC),
t(data_case_exprs_good_probes_F_QC))
dim(expression_data_QCd)
dim(t(expression_data_normalised_as_data_frame))
##### PCA PLOT 3 #####
setwd(pca_dir)
pca_data(as.data.frame(t(expression_data_QCd)), 'bottomright')
pdf("3.PCA_plot_after_sample_removal.pdf")
pca_data(as.data.frame(t(expression_data_QCd)), 'bottomright')
dev.off()
##### CONVERT PROBE ID TO ENTREZ ID #####
# Get the probe identifiers that are mapped to an ENTREZ Gene ID using hgu133a.db
mapped_probes <- mappedkeys(eval(parse(text = paste(expression_chip, "ENTREZID", sep=""))))
# Convert to a list
probe_entrez_mapping <- as.data.frame(eval(parse(text = paste(expression_chip, "ENTREZID", sep="")))[mapped_probes])
# arrange order of column by entrezgene probe_id
probe_entrez_mapping<-probe_entrez_mapping[c(2,1)]
colnames(probe_entrez_mapping)[1]<-"entrezgene"
head(probe_entrez_mapping)
dim(probe_entrez_mapping)
#check any duplicated probe IDs
anyDuplicated(probe_entrez_mapping$probe_id)
#check any dupliacted entrezgene IDs
anyDuplicated(probe_entrez_mapping$entrezgene)
# create convert_probe_id_to_entrez_id function
convert_probe_id_to_entrez_id <- function(expression_dataset, probe_mapping_file){
# transform dataset # - removed this step
expression_dataset_t<-as.data.frame(expression_dataset)
# keep only probes which appear in probe_mapping_file
data_frame_in_probe_mapper<-expression_dataset_t[colnames(expression_dataset_t)%in%probe_mapping_file$probe_id]
# match probe id in data_frame_in_probe_mapper to that in probe_mapping_file and convert to entrez id
colnames(data_frame_in_probe_mapper)<-probe_mapping_file$entrezgene[match(colnames(data_frame_in_probe_mapper), probe_mapping_file$probe_id)]
return(data_frame_in_probe_mapper)
}
# using probe_entrez_mapping file
expression_data_QCd_entrez_id<-convert_probe_id_to_entrez_id(expression_data_QCd, probe_entrez_mapping)
dim(expression_data_QCd)
dim(expression_data_QCd_entrez_id)
length(which(duplicated(colnames(expression_data_QCd_entrez_id))))
head(expression_data_QCd_entrez_id[100:110])
##### COLLAPSE MULTIPPLE ENTREZ ID BY SELECTING ONE WITH HIGHEST AVERAGE EXPRESSION ACROSS SAMPLES ######
## create function
select_duplicate_probe_by_top_expr <- function(x) {
# transpose data frame - keep as dataframe
x_t<-as.data.frame(t(x))
# calculate mean expression per probe across samples - create new column - probe mean column
x_t$probe_mean_expression<-rowMeans(x_t)
#copy rownames (probe id) to column and truncate
x_t$trunc_entrez_id<-trunc(as.numeric(as.character(rownames(x_t))))
# order data frame by truncated probe id and then expression level
x_t<-x_t[order(x_t$trunc_entrez_id, -x_t$probe_mean_expression), ]
# remove all duplicate probe id - keep one with highest mean expression
x_t_unique<-x_t[!duplicated(x_t$trunc_entrez_id),]
#unique entrez column back to row name
rownames(x_t_unique)<-x_t_unique$trunc_entrez_id
#remove unwanted column
x_t_unique$trunc_entrez_id<-NULL
#remove unwanted column
x_t_unique$probe_mean_expression<-NULL
#transpose dataframe back
x_unique<-as.data.frame(t(x_t_unique))
return(x_unique)
}
## apply function to dataframes - check number of probes - check duplicates
expression_data_QCd_entrez_id_unique<-select_duplicate_probe_by_top_expr(expression_data_QCd_entrez_id)
dim(expression_data_QCd_entrez_id_unique)
length(which(duplicated(colnames(expression_data_QCd_entrez_id_unique))))
head(expression_data_QCd_entrez_id_unique[1:5])
##### ATTACH DIAGNOSIS AND data REGION #####
# create function to merge multiple dataframes
MyMerge <- function(x, y){
df <- merge(x, y, by= "row.names", all.x= F, all.y= F)
rownames(df) <- df$Row.names
df$Row.names <- NULL
return(df)
}
# create phenotype infor to attach - diagnosis + gender + Age + Ethnicity + Tissue
phenotype_to_attach<-Reduce(MyMerge, list(Diagnosis, gender_comparison, Age, Ethnicity, Tissue))
dim(phenotype_to_attach)
head(phenotype_to_attach)
# attach pheno to exprs table
expression_data_QCd_entrez_id_unique_pheno<-merge(phenotype_to_attach, expression_data_QCd_entrez_id_unique, by="row.names")
rownames(expression_data_QCd_entrez_id_unique_pheno)<-expression_data_QCd_entrez_id_unique_pheno$Row.names
expression_data_QCd_entrez_id_unique_pheno$Row.names<-NULL
head(expression_data_QCd_entrez_id_unique_pheno)[1:10]
# rows should be same in exprs table - should be TRUE
dim(expression_data_QCd_entrez_id_unique)[1]==dim(expression_data_QCd_entrez_id_unique_pheno)[1]
##### CONVERT ALL GENES TO entrez ID #####
# convert to entrez
full_expression_data_QCd_entrez_id<-convert_probe_id_to_entrez_id(t(expression_data_normalised_as_data_frame), probe_entrez_mapping)
length(which(duplicated(colnames(full_expression_data_QCd_entrez_id))))
# remove duplicate entrez
full_expression_data_QCd_entrez_id_unique<-select_duplicate_probe_by_top_expr(full_expression_data_QCd_entrez_id)
dim(full_expression_data_QCd_entrez_id_unique)
length(which(duplicated(colnames(full_expression_data_QCd_entrez_id_unique))))
# attach diagnosis
# attach pheno to exprs table
full_expression_data_QCd_entrez_id_unique_pheno<-merge(phenotype_to_attach, full_expression_data_QCd_entrez_id_unique, by="row.names")
rownames(full_expression_data_QCd_entrez_id_unique_pheno)<-full_expression_data_QCd_entrez_id_unique_pheno$Row.names
full_expression_data_QCd_entrez_id_unique_pheno$Row.names<-NULL
head(full_expression_data_QCd_entrez_id_unique_pheno)[1:10]
#susbet to samples in final QC
full_expression_data_QCd_entrez_id_unique_pheno<-full_expression_data_QCd_entrez_id_unique_pheno[rownames(full_expression_data_QCd_entrez_id_unique_pheno) %in% rownames(expression_data_QCd_entrez_id_unique),]
# rows should be same in exprs table - should be TRUE
dim(expression_data_QCd_entrez_id_unique)[1]==dim(full_expression_data_QCd_entrez_id_unique_pheno)[1]
#
dim(expression_data_QCd_entrez_id_unique)[1]
dim(full_expression_data_QCd_entrez_id_unique_pheno)[1]
# save
setwd(clean_data_dir)
saveRDS(full_expression_data_QCd_entrez_id_unique_pheno, file=paste(dataset, "QCd_FULL.RDS", sep="_"))
##### SUMMARY #####
print(c("dataset:", dataset), quote=F)
print(c("Disease:", disease), quote=F)
print(c("Microarray Platform:", Microarray_platform), quote=F)
print(c("Expression Chip:", expression_chip), quote=F)
print(c("Data Format:", Data_format), quote=F)
print(c("Tissue:", as.character(Tissue[1,1])), quote=F)
print(c("Case Number:", length(case_ID)), quote=F)
print(c("Control Number:", length(control_ID)), quote=F)
print(c("Probe Detection Threshold:", Probe_Detection_Threshold), quote=F)
print(c("Gender-Missmatch:", dim(Gender_Missmatch)[1]), quote=F)
print(c("Samples Removed:", dim(expression_data_normalised_as_data_frame)[2]-dim(expression_data_QCd_entrez_id_unique_pheno)[1]), quote=F)
print(c("Final Case numbers:", length(expression_data_QCd_entrez_id_unique_pheno[expression_data_QCd_entrez_id_unique_pheno$Diagnosis=="case",1])), quote=F)
print(c("Final Control Numbers:", length(expression_data_QCd_entrez_id_unique_pheno[expression_data_QCd_entrez_id_unique_pheno$Diagnosis=="control",1])), quote=F)
print(c("Initial Probe Numbers:", dim(expression_data_normalised_as_data_frame)[1]), quote=F)
print(c("Final Probe Numbers:", dim(expression_data_QCd_entrez_id_unique_pheno)[2]-6), quote=F)
print(c("Final Probe Numbers in full dataset:", dim(full_expression_data_QCd_entrez_id_unique_pheno)[2]-6), quote=F)
#write out summary report
setwd(work_dir)
cat("Processing Date:", strftime(Sys.Date(),"%Y-%m-%d"), "\n", file="Summary_Report.txt")
cat("Dataset:", dataset, "\n", file="Summary_Report.txt", append=T)
cat("Disease:", disease, "\n", file="Summary_Report.txt", append=T)
cat("Microarray Platform:", Microarray_platform, "\n", file="Summary_Report.txt", append=T)
cat("Expression Chip:", expression_chip, "\n", file="Summary_Report.txt", append=T)
cat("Data Format:", Data_format, "\n", file="Summary_Report.txt", append=T)
cat("Tissue:", as.character(Tissue[1,1]), "\n", file="Summary_Report.txt", append=T)
cat("Case Numbers before QC:", length(case_ID), "\n", file="Summary_Report.txt", append=T)
cat("Control Numbers before QC:", length(control_ID), "\n", file="Summary_Report.txt", append=T)
cat("Probe Detection Threshold:", Probe_Detection_Threshold, "\n", file="Summary_Report.txt", append=T)
cat("Gender-Missmatch:", dim(Gender_Missmatch)[1], "\n", file="Summary_Report.txt", append=T)
cat("Samples Removed:", dim(expression_data_normalised_as_data_frame)[2]-dim(expression_data_QCd_entrez_id_unique_pheno)[1], "\n", file="Summary_Report.txt", append=T)
cat("Case numbers after QC:", length(expression_data_QCd_entrez_id_unique_pheno[expression_data_QCd_entrez_id_unique_pheno$Diagnosis=="case",1]), "\n", file="Summary_Report.txt", append=T)
cat("Control Numbers after QC:", length(expression_data_QCd_entrez_id_unique_pheno[expression_data_QCd_entrez_id_unique_pheno$Diagnosis=="control",1]), "\n", file="Summary_Report.txt", append=T)
cat("Probe Numbers before QC:", dim(expression_data_normalised_as_data_frame)[1], "\n", file="Summary_Report.txt", append=T)
cat("Probe Numbers after QC:", dim(expression_data_QCd_entrez_id_unique_pheno)[2]-6, "\n", file="Summary_Report.txt", append=T)
cat("Final Probe Numbers in full dataset:", dim(full_expression_data_QCd_entrez_id_unique_pheno)[2]-6, "\n", file="Summary_Report.txt", append=T)
##### SAVE EXPRESSION DATAFRAME #####
setwd(clean_data_dir)
saveRDS(expression_data_QCd_entrez_id_unique_pheno, file=paste(dataset, "QCd.RDS", sep="_"))
##### SAVE IMAGE #####
setwd(work_dir)
save.image(file=paste(dataset, "processing_data.Rdata", sep="_"))
|
\name{getReadsInRange}
\alias{getReadsInRange}
\title{
Retrieves Reads in Specified Range
}
\description{
'getReadsInRange' retrieves reads in range based on arguments refSeq,
refStart, refEnd and idx.
}
\usage{
getReadsInRange(cmpH5, refSeq, refStart = 1, refEnd = NA, idx = 1:nrow(cmpH5))
}
\arguments{
\item{cmpH5}{
An object of class \code{PacBioCmpH5}.
}
\item{refSeq}{
Reference sequence from which to retrieve reads.
}
\item{refStart}{
Reference start point from which to retrieve reads.
}
\item{refEnd}{
Reference end point from which to retrieve reads.
}
\item{idx}{
The indices of the alignments to return.
}
}
\value{
'getReadsInRange' returns a vector of integers.
}
\examples{
require(pbh5)
cmpH5 <- PacBioCmpH5(system.file("h5_files", "aligned_reads.cmp.h5",
package = "pbh5"))
readsInRange <- getReadsInRange(cmpH5, 1, 100, 200)
class(readsInRange)
head(readsInRange)
}
\keyword{datasets}
| /man/getReadsInRange.Rd | no_license | pb-jlee/R-pbh5 | R | false | false | 956 | rd | \name{getReadsInRange}
\alias{getReadsInRange}
\title{
Retrieves Reads in Specified Range
}
\description{
'getReadsInRange' retrieves reads in range based on arguments refSeq,
refStart, refEnd and idx.
}
\usage{
getReadsInRange(cmpH5, refSeq, refStart = 1, refEnd = NA, idx = 1:nrow(cmpH5))
}
\arguments{
\item{cmpH5}{
An object of class \code{PacBioCmpH5}.
}
\item{refSeq}{
Reference sequence from which to retrieve reads.
}
\item{refStart}{
Reference start point from which to retrieve reads.
}
\item{refEnd}{
Reference end point from which to retrieve reads.
}
\item{idx}{
The indices of the alignments to return.
}
}
\value{
'getReadsInRange' returns a vector of integers.
}
\examples{
require(pbh5)
cmpH5 <- PacBioCmpH5(system.file("h5_files", "aligned_reads.cmp.h5",
package = "pbh5"))
readsInRange <- getReadsInRange(cmpH5, 1, 100, 200)
class(readsInRange)
head(readsInRange)
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{ROIcoeffs}
\alias{ROIcoeffs}
\title{Compute the Return on Investment (ROI) surface coefficients from population probabilities}
\usage{
ROIcoeffs(probabilities, As = 5:20, Ls = (diff(range(As)) + 1):1)
}
\arguments{
\item{probabilities, }{a \code{\link[base]{data.frame}} (or \code{\link[data.table]{data.table}}) with the probabilities resulting from \link{nPxA}. Rows must correspond to ages, starting with age 1}
\item{As, }{the starting age(s) to consider}
\item{Ls, }{the maximum number of tests for each age; should either be an integer per age or a single integer for all ages.
The default behavior computes the number of tests (for each age) that makes the maximum of `As` the maximum testing age
Note: results will also be provided for shorter testing intervals, as the intermediate coefficients are calculated as part
of computing the value at the maximum \code{L}}
}
\value{
a \code{\link[base]{data.frame}} (\code{\link[data.table]{data.table}}, if available) with columns:
\describe{
\item{A}{integer; the age when routine test-then-vaccinate strategy starts (from \code{As})}
\item{L}{integer; the maximum number of tests for routine test-then-vaccinate strategy (from \code{Ls})}
\item{vacfrac}{numeric; the fraction of individuals participating in this strategy that get vaccinated}
\item{pri.offset}{numeric; the (additive) reduction in \code{vacfrac} if using the ordinal test}
\item{Sfrac}{numeric; the proportion experiencing second infection costs}
\item{Fresp}{numeric; the F/S cost fraction term, when comparing vaccination with and without testing}
\item{Sgain}{numeric; the S term, when comparing vaccination with and without testing}
}
}
\description{
Compute the Return on Investment (ROI) surface coefficients from population probabilities
}
\details{
computes the coefficients for the economic calculations
}
\examples{
require(denvax);
data(morrison2010) # has counts by age
fit <- with(morrison2010, serofit(sero=Seropositive, N=Number, age.min=Age))
m2010pop <- synthetic.pop(fit, runs = 10, popsize = 10) # small sample size for example run time
m2010lh <- nPxA(m2010pop)
rc <- ROIcoeffs(m2010lh, As=5:10, Ls=5)
pp <- par()
par(mfrow=c(1, 2))
rcs <- subset(rc, A==10 & L < 11)
with(rcs, plot(
L, aveTests, type="l",
xlab="Max # of Tests Allowed",
ylab="Ave # of Tests Administered",
main="Starting @ Age 10",
ylim=c(1, 3)
))
rcs <- subset(rc, A==5 & L < 11)
with(rcs, plot(
L, aveTests, type="l",
xlab="Max # of Tests Allowed",
ylab="",
main="Starting @ Age 5",
ylim=c(1, 3)
))
par(pp)
}
| /man/ROIcoeffs.Rd | no_license | cran/denvax | R | false | true | 2,648 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{ROIcoeffs}
\alias{ROIcoeffs}
\title{Compute the Return on Investment (ROI) surface coefficients from population probabilities}
\usage{
ROIcoeffs(probabilities, As = 5:20, Ls = (diff(range(As)) + 1):1)
}
\arguments{
\item{probabilities, }{a \code{\link[base]{data.frame}} (or \code{\link[data.table]{data.table}}) with the probabilities resulting from \link{nPxA}. Rows must correspond to ages, starting with age 1}
\item{As, }{the starting age(s) to consider}
\item{Ls, }{the maximum number of tests for each age; should either be an integer per age or a single integer for all ages.
The default behavior computes the number of tests (for each age) that makes the maximum of `As` the maximum testing age
Note: results will also be provided for shorter testing intervals, as the intermediate coefficients are calculated as part
of computing the value at the maximum \code{L}}
}
\value{
a \code{\link[base]{data.frame}} (\code{\link[data.table]{data.table}}, if available) with columns:
\describe{
\item{A}{integer; the age when routine test-then-vaccinate strategy starts (from \code{As})}
\item{L}{integer; the maximum number of tests for routine test-then-vaccinate strategy (from \code{Ls})}
\item{vacfrac}{numeric; the fraction of individuals participating in this strategy that get vaccinated}
\item{pri.offset}{numeric; the (additive) reduction in \code{vacfrac} if using the ordinal test}
\item{Sfrac}{numeric; the proportion experiencing second infection costs}
\item{Fresp}{numeric; the F/S cost fraction term, when comparing vaccination with and without testing}
\item{Sgain}{numeric; the S term, when comparing vaccination with and without testing}
}
}
\description{
Compute the Return on Investment (ROI) surface coefficients from population probabilities
}
\details{
computes the coefficients for the economic calculations
}
\examples{
require(denvax);
data(morrison2010) # has counts by age
fit <- with(morrison2010, serofit(sero=Seropositive, N=Number, age.min=Age))
m2010pop <- synthetic.pop(fit, runs = 10, popsize = 10) # small sample size for example run time
m2010lh <- nPxA(m2010pop)
rc <- ROIcoeffs(m2010lh, As=5:10, Ls=5)
pp <- par()
par(mfrow=c(1, 2))
rcs <- subset(rc, A==10 & L < 11)
with(rcs, plot(
L, aveTests, type="l",
xlab="Max # of Tests Allowed",
ylab="Ave # of Tests Administered",
main="Starting @ Age 10",
ylim=c(1, 3)
))
rcs <- subset(rc, A==5 & L < 11)
with(rcs, plot(
L, aveTests, type="l",
xlab="Max # of Tests Allowed",
ylab="",
main="Starting @ Age 5",
ylim=c(1, 3)
))
par(pp)
}
|
%
% Copyright 2007-2018 The OpenMx Project
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
\name{vech}
\alias{vech}
\title{Half-vectorization}
\description{
This function returns the half-vectorization of an input matrix as a column vector.
}
\usage{
vech(x)
}
\arguments{
\item{x}{an input matrix.}
}
\details{
The half-vectorization of an input matrix consists of the elements in the lower triangle of the matrix, including the elements along the diagonal of the matrix, as a column vector. The column vector is created by traversing the matrix in column-major order.
}
\seealso{
\code{\link{vech2full}}, \code{\link{vechs}}, \code{\link{rvectorize}}, \code{\link{cvectorize}}
}
\examples{
vech(matrix(1:9, 3, 3))
vech(matrix(1:12, 3, 4))
}
| /man/vech.Rd | no_license | mileysmiley/OpenMx | R | false | false | 1,285 | rd | %
% Copyright 2007-2018 The OpenMx Project
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
\name{vech}
\alias{vech}
\title{Half-vectorization}
\description{
This function returns the half-vectorization of an input matrix as a column vector.
}
\usage{
vech(x)
}
\arguments{
\item{x}{an input matrix.}
}
\details{
The half-vectorization of an input matrix consists of the elements in the lower triangle of the matrix, including the elements along the diagonal of the matrix, as a column vector. The column vector is created by traversing the matrix in column-major order.
}
\seealso{
\code{\link{vech2full}}, \code{\link{vechs}}, \code{\link{rvectorize}}, \code{\link{cvectorize}}
}
\examples{
vech(matrix(1:9, 3, 3))
vech(matrix(1:12, 3, 4))
}
|
a<-read.table("household_power_consumption.txt",header = TRUE,sep=";")
sub_1<-a$Sub_metering_1[a$Date=="1/2/2007"|a$Date=="2/2/2007"]
sub_2<-a$Sub_metering_2[a$Date=="1/2/2007"|a$Date=="2/2/2007"]
sub_3<-a$Sub_metering_3[a$Date=="1/2/2007"|a$Date=="2/2/2007"]
su1<-as.numeric(as.character(sub_1))
su2<-as.numeric(as.character(sub_2))
su3<-as.numeric(as.character(sub_3))
ymax<-max(su1
vol<-a$Voltage[a$Date=="1/2/2007"|a$Date=="2/2/2007"]
voltage<-as.numeric(as.character(vol))
glob<-a$Global_active_power[a$Date=="1/2/2007"|a$Date=="2/2/2007"]
global<-as.numeric(as.character(glob))
png(file="plot4.png")
par(mfrow=c(2,2))
plot(global,type = "l",xlab = "", ylab = "Global Reactive Power")
v1 <- c(0,1440,2880)
v2<-c("Thu","Fri","Sat")
axis(side = 1,at = v1,labels = v2,tck=-.02,font = 1,cex.axis = 1)
plot(voltage,type = "l",xlab = "datetime")
v1 <- c(0,1440,2880)
v2<-c("Thu","Fri","Sat")
axis(side = 1,at = v1,labels = v2,tck=-.02,font = 1,cex.axis = 1)
plot(su1,type = "l", col="black",ylim=c(0,ymax),xaxt="n",xlab = "",ylab = "Energy sub metering")
par(new=TRUE)
plot(su2,type = "l", col="red",ylim=c(0,ymax),xaxt="n",xlab = "",ylab = "Energy sub metering")
par(new=TRUE)
plot(su3,type = "l", col="blue",ylim=c(0,ymax),xaxt="n",xlab = "",ylab = "Energy sub metering")
v1 <- c(0,1440,2880)
v2<-c("Thu","Fri","Sat")
axis(side = 1,at = v1,labels = v2,tck=-.02,font = 1,cex.axis = 1)
legend("topright",lty=c(1,1,1),lwd=c(1,1,1),col=c("black","red","blue"),
legend=c("sub_metering_1","sub_metering_2","sub_metering_3"),cex=0.9,bty="n")
reac<-a$Global_reactive_power[a$Date=="1/2/2007"|a$Date=="2/2/2007"]
reactive<-as.numeric(as.character(reac))
plot(reactive,type = "l",xlab = "",ylab = "Global_reactive_power",xaxt="n")
v1 <- c(0,1440,2880)
v2<-c("Thu","Fri","Sat")
axis(side = 1,at = v1,labels = v2,tck=-.02,font = 1,cex.axis = 1)
dev.off() | /plot4.R | no_license | Preeti-gupta02/ExData_Plotting1 | R | false | false | 1,850 | r | a<-read.table("household_power_consumption.txt",header = TRUE,sep=";")
sub_1<-a$Sub_metering_1[a$Date=="1/2/2007"|a$Date=="2/2/2007"]
sub_2<-a$Sub_metering_2[a$Date=="1/2/2007"|a$Date=="2/2/2007"]
sub_3<-a$Sub_metering_3[a$Date=="1/2/2007"|a$Date=="2/2/2007"]
su1<-as.numeric(as.character(sub_1))
su2<-as.numeric(as.character(sub_2))
su3<-as.numeric(as.character(sub_3))
ymax<-max(su1
vol<-a$Voltage[a$Date=="1/2/2007"|a$Date=="2/2/2007"]
voltage<-as.numeric(as.character(vol))
glob<-a$Global_active_power[a$Date=="1/2/2007"|a$Date=="2/2/2007"]
global<-as.numeric(as.character(glob))
png(file="plot4.png")
par(mfrow=c(2,2))
plot(global,type = "l",xlab = "", ylab = "Global Reactive Power")
v1 <- c(0,1440,2880)
v2<-c("Thu","Fri","Sat")
axis(side = 1,at = v1,labels = v2,tck=-.02,font = 1,cex.axis = 1)
plot(voltage,type = "l",xlab = "datetime")
v1 <- c(0,1440,2880)
v2<-c("Thu","Fri","Sat")
axis(side = 1,at = v1,labels = v2,tck=-.02,font = 1,cex.axis = 1)
plot(su1,type = "l", col="black",ylim=c(0,ymax),xaxt="n",xlab = "",ylab = "Energy sub metering")
par(new=TRUE)
plot(su2,type = "l", col="red",ylim=c(0,ymax),xaxt="n",xlab = "",ylab = "Energy sub metering")
par(new=TRUE)
plot(su3,type = "l", col="blue",ylim=c(0,ymax),xaxt="n",xlab = "",ylab = "Energy sub metering")
v1 <- c(0,1440,2880)
v2<-c("Thu","Fri","Sat")
axis(side = 1,at = v1,labels = v2,tck=-.02,font = 1,cex.axis = 1)
legend("topright",lty=c(1,1,1),lwd=c(1,1,1),col=c("black","red","blue"),
legend=c("sub_metering_1","sub_metering_2","sub_metering_3"),cex=0.9,bty="n")
reac<-a$Global_reactive_power[a$Date=="1/2/2007"|a$Date=="2/2/2007"]
reactive<-as.numeric(as.character(reac))
plot(reactive,type = "l",xlab = "",ylab = "Global_reactive_power",xaxt="n")
v1 <- c(0,1440,2880)
v2<-c("Thu","Fri","Sat")
axis(side = 1,at = v1,labels = v2,tck=-.02,font = 1,cex.axis = 1)
dev.off() |
# Code for exercise 1
# This should produce three plots
# Histogram for earconch measurements.
# histo_earconch.png
# Side-by-side box plots of the male and female earconch measurements.
# box_plot_earconch_gender.png
# Side-by-side histograms of the male and female earconch measurements.
# histo_earconch_gender.png
earconch_plots <- function() {
conchhist<-ggplot(possum, aes(x=earconch))+geom_histogram(color="white")
ggsave("conchhist.png",conchhist) #single histogram
conchbox<-ggplot(possum, aes(x=sex, y=earconch))+geom_boxplot()+
labs(title="Box Plot of Ear Conchs of Males and Females")
ggsave("conchbox.png",conchbox) #side by side box plots
conchtwohist<-ggplot(possum, aes(x=earconch))+geom_histogram(color="white")+
facet_grid(. ~ sex) +
labs(title="Histogram of Ear Conchs of Males (m) and Females (f)")
ggsave("conchtwohist.png",conchtwohist) #side by side histograms
}
earconch_plots() | /ExploratoryDataAnalysis/HistogramsInGGPlot.R | no_license | ConorOSullivan/Coursework | R | false | false | 966 | r | # Code for exercise 1
# This should produce three plots
# Histogram for earconch measurements.
# histo_earconch.png
# Side-by-side box plots of the male and female earconch measurements.
# box_plot_earconch_gender.png
# Side-by-side histograms of the male and female earconch measurements.
# histo_earconch_gender.png
earconch_plots <- function() {
conchhist<-ggplot(possum, aes(x=earconch))+geom_histogram(color="white")
ggsave("conchhist.png",conchhist) #single histogram
conchbox<-ggplot(possum, aes(x=sex, y=earconch))+geom_boxplot()+
labs(title="Box Plot of Ear Conchs of Males and Females")
ggsave("conchbox.png",conchbox) #side by side box plots
conchtwohist<-ggplot(possum, aes(x=earconch))+geom_histogram(color="white")+
facet_grid(. ~ sex) +
labs(title="Histogram of Ear Conchs of Males (m) and Females (f)")
ggsave("conchtwohist.png",conchtwohist) #side by side histograms
}
earconch_plots() |
library(dplyr)
library(ggplot2)
library(visdat)
library(reshape2)
library(ggcorrplot)
credit <- read.csv("Credit_Bureau.csv", header = T)
demo <- read.csv("demogs.csv", header = T)
#=========================#
# Remove Duplicates #
#=========================#
demo <- demo %>% group_by(Application.ID) %>% filter(n() == 1)
credit <- credit %>% group_by(Application.ID) %>% filter(n() == 1)
# Merge Data
merge_data <- merge(demo, credit, by=c("Application.ID", "Performance.Tag"))
## Search Missing Data
merge_data <- merge_data %>% filter(!is.na(Performance.Tag))
merge_data <- na.omit(merge_data)
table(merge_data$Performance.Tag)
merge_data %>% group_by(Performance.Tag) %>% summarise(n()/nrow(merge_data))
#=========================#
# Age #
#=========================#
merge_data$Age %>% boxplot()
merge_data[(which(merge_data$Age < 18)), ]$Age <- 18
# Create Bins
age_bin <- function(age=3){
if(age > 17 && age < 21)
return ("18-20")
else if(age > 20 && age < 26)
return ("21-25")
else if(age > 25 && age < 31)
return ("26-30")
else if(age > 30 && age < 36)
return ("31-35")
else if(age > 35 && age < 41)
return ("36-40")
else if(age > 40 && age < 46)
return ("41-45")
else if(age > 45 && age < 51)
return ("46-50")
else if(age > 50 && age < 56)
return ("51-55")
else if(age > 55 && age < 61)
return ("56-60")
else if(age > 60 && age < 66)
return ("61-65")
}
merge_data$Age_Bin <- merge_data$Age %>% sapply(age_bin) %>% as.factor()
summary(merge_data$Age_Bin)
attributes(merge_data$Age_Bin)
#=========================#
# Martial Status #
#=========================#
#Check for NA variable
merge_data$Marital.Status..at.the.time.of.application. %>% is.na() %>% sum()
#Check and Remove for Empty String
merge_data <- merge_data %>% filter(Marital.Status..at.the.time.of.application. != "")
merge_data$Marital.Status..at.the.time.of.application. <- as.factor(merge_data$Marital.Status..at.the.time.of.application.)
attributes(merge_data$Marital.Status..at.the.time.of.application.)
#=========================#
# Profession #
#=========================#
#Check for NA variable
merge_data$Profession %>% is.na() %>% sum()
merge_data %>% count(Profession)
# Remove empty
merge_data <- merge_data %>% filter(Profession != "")
merge_data$Profession <- as.factor(merge_data$Profession)
merge_data %>% count(Profession)
#=========================#
# Income #
#=========================#
#Check for NA variable
merge_data$Income %>% is.na() %>% sum()
merge_data <- merge_data %>% filter(Income >= 0)
merge_data$Income %>% boxplot()
#=========================#
# Outstanding Balance #
#=========================#
# Check for NA variable
merge_data$Outstanding.Balance %>% is.na() %>% sum()
merge_data <- merge_data %>% filter(!is.na(Outstanding.Balance))
summary(merge_data$Outstanding.Balance)
merge_data$Outstanding.Balance %>% boxplot()
#================================#
# Avg CC Util #
#================================#
#remove NAs
merge_data <- merge_data %>% filter(!is.na(Avgas.CC.Utilization.in.last.12.months))
summary(merge_data$No.of.times.30.DPD.or.worse.in.last.12.months)
#correlation
merge_data_correlation <- cor(merge_data %>% select_if(is.numeric), method = "pearson", use = "complete.obs")
merge_data_correlation
ggcorrplot(merge_data_correlation, lab = T, hc.order = T, type = "lower")
#================================#
# Others #
#================================#
merge_data$Education <- merge_data$Education %>% as.factor()
merge_data$Gender <- merge_data$Gender %>% as.factor()
#================================#
# EDA Distrib./Visualization #
#================================#
## Income
merge_data$Income %>% boxplot()
# Profession vs Income
ggplot(merge_data, aes(Profession, Income, fill=Profession)) + geom_boxplot()
labs(x="Age Buckets", y="Frequency in 1000s", fill="Performance Tag", title="Age Bucket wise Performance Tag Frequency")
# Income vs Credit Usage
ggplot(merge_data, aes(Income, Avgas.CC.Utilization.in.last.12.months)) + geom_smooth()
# Income vs Average Debt
ggplot(merge_data %>% group_by(Income) %>% summarize(meanDebt = mean(Outstanding.Balance)), aes(Income, meanDebt)) + geom_smooth()
# Income Distribution
ggplot(merge_data, aes(Income)) + geom_histogram(binwidth = 10)
# Income vs Performance
ggplot(merge_data, aes(x = as.factor(Performance.Tag), y = Income, fill = as.factor(Performance.Tag))) + geom_boxplot()
### Age
# Age Distribution on data
ggplot(merge_data, aes(Age_Bin, fill = Age_Bin)) + geom_bar()
# Age vs Income
ggplot(merge_data, aes(x = Age_Bin, y = Income, fill = Age_Bin)) + geom_boxplot()
ggplot(merge_data, aes(x = Age, y = Income)) + geom_smooth()
# Age vs Performance
ggplot(merge_data, aes(x = Age_Bin, fill = as.factor(Performance.Tag))) +
geom_bar() +
scale_y_continuous(
name = "Number of Applications in thousands",
labels = function(x) x / 1000
)
## Average Use of CC
# Avgas.CC.Utilization.in.last.12.months vs Performance
ggplot(merge_data, aes(x = as.factor(Performance.Tag), y = Avgas.CC.Utilization.in.last.12.months, fill = as.factor(Performance.Tag))) + geom_boxplot()
#Effect of Past Late Payments vs Performance
ggplot(merge_data, aes(Performance.Tag, No.of.times.30.DPD.or.worse.in.last.12.months, fill = as.factor(Performance.Tag))) + geom_boxplot()
ggplot(merge_data, aes(Performance.Tag, No.of.times.90.DPD.or.worse.in.last.12.months, fill = as.factor(Performance.Tag))) + geom_boxplot()
#Effect of Credit check
ggplot(merge_data, aes(as.factor(Performance.Tag), No.of.Inquiries.in.last.12.months..excluding.home...auto.loans., fill = as.factor(Performance.Tag))) + geom_boxplot()
# Performance vs Outstanding Balance
merge_data$Outstanding.Balance %>% boxplot()
ggplot(merge_data, aes(x = as.factor(Performance.Tag), y = Outstanding.Balance/1000, fill = as.factor(Performance.Tag))) + geom_boxplot()
# Marital Status
ggplot(merge_data, aes(x = Marital.Status..at.the.time.of.application., fill = as.factor(Performance.Tag))) +
geom_bar(stat="count") + scale_y_log10()
# Number of Dependents
ggplot(merge_data, aes(x = Marital.Status..at.the.time.of.application., y = No.of.dependents)) +
geom_boxplot()
ggplot(merge_data, aes(x = Age_Bin, y = log(Outstanding.Balance))) + geom_boxplot()
ggplot(merge_data, aes(x = Marital.Status..at.the.time.of.application., y = Income, fill = Marital.Status..at.the.time.of.application.)) + geom_boxplot()
# Correlation Matrix
ggcorrplot(cor(merge_data[,c("Performance.Tag", "Income","Age", "Avgas.CC.Utilization.in.last.12.months","Outstanding.Balance", "No.of.times.30.DPD.or.worse.in.last.12.months", "Presence.of.open.home.loan", "Presence.of.open.auto.loan", "Total.No.of.Trades")]), lab = T)
# Correlation Matrix of Financial Information
ggcorrplot(cor(merge_data[, c(2, 7 ,seq(13,29,1))]), lab = T)
#=====================================#
# Data Visual. and EDA
#=====================================#
#=====================================#
# Split Data Set into Train and Test #
#=====================================#
set.seed(129)
sample_size <- floor(0.75 * nrow(merge_data))
train_ind <- sample(seq_len(nrow(merge_data)), size = sample_size)
data_train <- merge_data[train_ind,]
data_test <- merge_data[-train_ind,]
#==================================#
# Logistic Regression #
#==================================#
formula <- (Performance.Tag ~ Income_Bin + Avg_CC_Utilization_12_months + Outstanding_Balance + No.of.times.90.DPD.or.worse.in.last.12.months + No.of.Inquiries.in.last.12.months..excluding.home...auto.loans. + Marital.Status..at.the.time.of.application. + Age_Bin)
lrm_model <- glm(formula, data = data_train, family = "binomial")
summary(lrm_model)
data_train$predictions <- predict(lrm_model, type = "response")
data_train$predictions <- ifelse(data_train$predictions > mean(data_train$Performance.Tag), 1, 0)
mean(data_train$Performance.Tag == data_train$predictions)
data_test$predictions <- predict(lrm_model, newdata = data_test, type = "response")
data_test$predictions <- ifelse(data_test$predictions > mean(data_test$Performance.Tag), 1, 0)
mean(data_test$Performance.Tag == data_test$predictions)
#==================================#
# Stepwise Regression model
#==================================#
# Specify a null model with no predictors
null_model <- glm(Performance.Tag ~ 1, data = data_train, family = "binomial")
# Specify the full model using all of the potential predictors
full_model <- glm(Performance.Tag ~ ., data = data_train, family = "binomial")
step_model <- step(null_model, scope = list(lower = null_model, upper = full_model), direction = "forward")
summary(step_model)
step_prob <- predict(step_model, type="response", probability =TRUE)
mean(data_train$Performance.Tag == step_prob)
library(pROC)
ROC <- roc(data_train$Performance.Tag, step_prob)
plot(ROC, col = "red")
auc(ROC)
#==================================#
# Classification Tree #
#==================================#
new_formula <- glm(formula = Performance.Tag ~ No.of.times.30.DPD.or.worse.in.last.12.months +
No.of.PL.trades.opened.in.last.12.months + Avgas.CC.Utilization.in.last.12.months +
No.of.Inquiries.in.last.12.months..excluding.home...auto.loans. +
Total.No.of.Trades + No.of.Inquiries.in.last.6.months..excluding.home...auto.loans. +
No.of.months.in.current.company + Outstanding.Balance + No.of.times.90.DPD.or.worse.in.last.12.months +
No.of.months.in.current.residence, family = "binomial", data = data_train)
library(rpart)
tree_model <- rpart(Performance.Tag ~ Age_Bin + No.of.times.30.DPD.or.worse.in.last.12.months + Income + Outstanding.Balance + Avgas.CC.Utilization.in.last.12.months , data = data_test, method = "class", control= rpart.control(cp =0, maxdepth=10))
summary(tree_model)
library(rpart.plot)
rpart.plot(tree_model)
## Find the correlation
merge_data_mini <- merge_data[,c(2,3,7,9,5,18,19,27,28)]
correl <- cor(merge_data[,c(2,3,7,18,19,27,28)], method = "pearson", use = "complete.obs")
ggcorrplot(correl, hc.order = T, type = "lower")
| /Pre_A01.R | no_license | YemoAde/ModelComparison-LoanDataset | R | false | false | 10,330 | r | library(dplyr)
library(ggplot2)
library(visdat)
library(reshape2)
library(ggcorrplot)
credit <- read.csv("Credit_Bureau.csv", header = T)
demo <- read.csv("demogs.csv", header = T)
#=========================#
# Remove Duplicates #
#=========================#
demo <- demo %>% group_by(Application.ID) %>% filter(n() == 1)
credit <- credit %>% group_by(Application.ID) %>% filter(n() == 1)
# Merge Data
merge_data <- merge(demo, credit, by=c("Application.ID", "Performance.Tag"))
## Search Missing Data
merge_data <- merge_data %>% filter(!is.na(Performance.Tag))
merge_data <- na.omit(merge_data)
table(merge_data$Performance.Tag)
merge_data %>% group_by(Performance.Tag) %>% summarise(n()/nrow(merge_data))
#=========================#
# Age #
#=========================#
merge_data$Age %>% boxplot()
merge_data[(which(merge_data$Age < 18)), ]$Age <- 18
# Create Bins
age_bin <- function(age=3){
if(age > 17 && age < 21)
return ("18-20")
else if(age > 20 && age < 26)
return ("21-25")
else if(age > 25 && age < 31)
return ("26-30")
else if(age > 30 && age < 36)
return ("31-35")
else if(age > 35 && age < 41)
return ("36-40")
else if(age > 40 && age < 46)
return ("41-45")
else if(age > 45 && age < 51)
return ("46-50")
else if(age > 50 && age < 56)
return ("51-55")
else if(age > 55 && age < 61)
return ("56-60")
else if(age > 60 && age < 66)
return ("61-65")
}
merge_data$Age_Bin <- merge_data$Age %>% sapply(age_bin) %>% as.factor()
summary(merge_data$Age_Bin)
attributes(merge_data$Age_Bin)
#=========================#
# Martial Status #
#=========================#
#Check for NA variable
merge_data$Marital.Status..at.the.time.of.application. %>% is.na() %>% sum()
#Check and Remove for Empty String
merge_data <- merge_data %>% filter(Marital.Status..at.the.time.of.application. != "")
merge_data$Marital.Status..at.the.time.of.application. <- as.factor(merge_data$Marital.Status..at.the.time.of.application.)
attributes(merge_data$Marital.Status..at.the.time.of.application.)
#=========================#
# Profession #
#=========================#
#Check for NA variable
merge_data$Profession %>% is.na() %>% sum()
merge_data %>% count(Profession)
# Remove empty
merge_data <- merge_data %>% filter(Profession != "")
merge_data$Profession <- as.factor(merge_data$Profession)
merge_data %>% count(Profession)
#=========================#
# Income #
#=========================#
#Check for NA variable
merge_data$Income %>% is.na() %>% sum()
merge_data <- merge_data %>% filter(Income >= 0)
merge_data$Income %>% boxplot()
#=========================#
# Outstanding Balance #
#=========================#
# Check for NA variable
merge_data$Outstanding.Balance %>% is.na() %>% sum()
merge_data <- merge_data %>% filter(!is.na(Outstanding.Balance))
summary(merge_data$Outstanding.Balance)
merge_data$Outstanding.Balance %>% boxplot()
#================================#
# Avg CC Util #
#================================#
#remove NAs
merge_data <- merge_data %>% filter(!is.na(Avgas.CC.Utilization.in.last.12.months))
summary(merge_data$No.of.times.30.DPD.or.worse.in.last.12.months)
#correlation
merge_data_correlation <- cor(merge_data %>% select_if(is.numeric), method = "pearson", use = "complete.obs")
merge_data_correlation
ggcorrplot(merge_data_correlation, lab = T, hc.order = T, type = "lower")
#================================#
# Others #
#================================#
merge_data$Education <- merge_data$Education %>% as.factor()
merge_data$Gender <- merge_data$Gender %>% as.factor()
#================================#
# EDA Distrib./Visualization #
#================================#
## Income
merge_data$Income %>% boxplot()
# Profession vs Income
ggplot(merge_data, aes(Profession, Income, fill=Profession)) + geom_boxplot()
labs(x="Age Buckets", y="Frequency in 1000s", fill="Performance Tag", title="Age Bucket wise Performance Tag Frequency")
# Income vs Credit Usage
ggplot(merge_data, aes(Income, Avgas.CC.Utilization.in.last.12.months)) + geom_smooth()
# Income vs Average Debt
ggplot(merge_data %>% group_by(Income) %>% summarize(meanDebt = mean(Outstanding.Balance)), aes(Income, meanDebt)) + geom_smooth()
# Income Distribution
ggplot(merge_data, aes(Income)) + geom_histogram(binwidth = 10)
# Income vs Performance
ggplot(merge_data, aes(x = as.factor(Performance.Tag), y = Income, fill = as.factor(Performance.Tag))) + geom_boxplot()
### Age
# Age Distribution on data
ggplot(merge_data, aes(Age_Bin, fill = Age_Bin)) + geom_bar()
# Age vs Income
ggplot(merge_data, aes(x = Age_Bin, y = Income, fill = Age_Bin)) + geom_boxplot()
ggplot(merge_data, aes(x = Age, y = Income)) + geom_smooth()
# Age vs Performance
ggplot(merge_data, aes(x = Age_Bin, fill = as.factor(Performance.Tag))) +
geom_bar() +
scale_y_continuous(
name = "Number of Applications in thousands",
labels = function(x) x / 1000
)
## Average Use of CC
# Avgas.CC.Utilization.in.last.12.months vs Performance
ggplot(merge_data, aes(x = as.factor(Performance.Tag), y = Avgas.CC.Utilization.in.last.12.months, fill = as.factor(Performance.Tag))) + geom_boxplot()
#Effect of Past Late Payments vs Performance
ggplot(merge_data, aes(Performance.Tag, No.of.times.30.DPD.or.worse.in.last.12.months, fill = as.factor(Performance.Tag))) + geom_boxplot()
ggplot(merge_data, aes(Performance.Tag, No.of.times.90.DPD.or.worse.in.last.12.months, fill = as.factor(Performance.Tag))) + geom_boxplot()
#Effect of Credit check
ggplot(merge_data, aes(as.factor(Performance.Tag), No.of.Inquiries.in.last.12.months..excluding.home...auto.loans., fill = as.factor(Performance.Tag))) + geom_boxplot()
# Performance vs Outstanding Balance
merge_data$Outstanding.Balance %>% boxplot()
ggplot(merge_data, aes(x = as.factor(Performance.Tag), y = Outstanding.Balance/1000, fill = as.factor(Performance.Tag))) + geom_boxplot()
# Marital Status
ggplot(merge_data, aes(x = Marital.Status..at.the.time.of.application., fill = as.factor(Performance.Tag))) +
geom_bar(stat="count") + scale_y_log10()
# Number of Dependents
ggplot(merge_data, aes(x = Marital.Status..at.the.time.of.application., y = No.of.dependents)) +
geom_boxplot()
ggplot(merge_data, aes(x = Age_Bin, y = log(Outstanding.Balance))) + geom_boxplot()
ggplot(merge_data, aes(x = Marital.Status..at.the.time.of.application., y = Income, fill = Marital.Status..at.the.time.of.application.)) + geom_boxplot()
# Correlation Matrix
ggcorrplot(cor(merge_data[,c("Performance.Tag", "Income","Age", "Avgas.CC.Utilization.in.last.12.months","Outstanding.Balance", "No.of.times.30.DPD.or.worse.in.last.12.months", "Presence.of.open.home.loan", "Presence.of.open.auto.loan", "Total.No.of.Trades")]), lab = T)
# Correlation Matrix of Financial Information
ggcorrplot(cor(merge_data[, c(2, 7 ,seq(13,29,1))]), lab = T)
#=====================================#
# Data Visual. and EDA
#=====================================#
#=====================================#
# Split Data Set into Train and Test #
#=====================================#
set.seed(129)
sample_size <- floor(0.75 * nrow(merge_data))
train_ind <- sample(seq_len(nrow(merge_data)), size = sample_size)
data_train <- merge_data[train_ind,]
data_test <- merge_data[-train_ind,]
#==================================#
# Logistic Regression #
#==================================#
formula <- (Performance.Tag ~ Income_Bin + Avg_CC_Utilization_12_months + Outstanding_Balance + No.of.times.90.DPD.or.worse.in.last.12.months + No.of.Inquiries.in.last.12.months..excluding.home...auto.loans. + Marital.Status..at.the.time.of.application. + Age_Bin)
lrm_model <- glm(formula, data = data_train, family = "binomial")
summary(lrm_model)
data_train$predictions <- predict(lrm_model, type = "response")
data_train$predictions <- ifelse(data_train$predictions > mean(data_train$Performance.Tag), 1, 0)
mean(data_train$Performance.Tag == data_train$predictions)
data_test$predictions <- predict(lrm_model, newdata = data_test, type = "response")
data_test$predictions <- ifelse(data_test$predictions > mean(data_test$Performance.Tag), 1, 0)
mean(data_test$Performance.Tag == data_test$predictions)
#==================================#
# Stepwise Regression model
#==================================#
# Specify a null model with no predictors
null_model <- glm(Performance.Tag ~ 1, data = data_train, family = "binomial")
# Specify the full model using all of the potential predictors
full_model <- glm(Performance.Tag ~ ., data = data_train, family = "binomial")
step_model <- step(null_model, scope = list(lower = null_model, upper = full_model), direction = "forward")
summary(step_model)
step_prob <- predict(step_model, type="response", probability =TRUE)
mean(data_train$Performance.Tag == step_prob)
library(pROC)
ROC <- roc(data_train$Performance.Tag, step_prob)
plot(ROC, col = "red")
auc(ROC)
#==================================#
# Classification Tree #
#==================================#
new_formula <- glm(formula = Performance.Tag ~ No.of.times.30.DPD.or.worse.in.last.12.months +
No.of.PL.trades.opened.in.last.12.months + Avgas.CC.Utilization.in.last.12.months +
No.of.Inquiries.in.last.12.months..excluding.home...auto.loans. +
Total.No.of.Trades + No.of.Inquiries.in.last.6.months..excluding.home...auto.loans. +
No.of.months.in.current.company + Outstanding.Balance + No.of.times.90.DPD.or.worse.in.last.12.months +
No.of.months.in.current.residence, family = "binomial", data = data_train)
library(rpart)
tree_model <- rpart(Performance.Tag ~ Age_Bin + No.of.times.30.DPD.or.worse.in.last.12.months + Income + Outstanding.Balance + Avgas.CC.Utilization.in.last.12.months , data = data_test, method = "class", control= rpart.control(cp =0, maxdepth=10))
summary(tree_model)
library(rpart.plot)
rpart.plot(tree_model)
## Find the correlation
merge_data_mini <- merge_data[,c(2,3,7,9,5,18,19,27,28)]
correl <- cor(merge_data[,c(2,3,7,18,19,27,28)], method = "pearson", use = "complete.obs")
ggcorrplot(correl, hc.order = T, type = "lower")
|
rm(list=ls())
################################################################################
####################load packages, functions and result SE object ##############
################################################################################
# load packages
# devtools::install_github(repo="krumsieklab/maplet@v1.0.1", subdir="maplet")
library(shiny)
library(shinyWidgets)
library(maplet)
library(shinydashboard)
library(shinyjs)
library(tidyverse)
library(DT)
library(plotly)
library(openxlsx)
library(readxl)
library(RColorBrewer)
# refer help functions
source("help_functions.R")
# load SE with fixed name
load("SE.Rdata")
# extract object names from result SE 'D'
obj_name <- get_obj_name(D)
# define pathway annotation column (extracted from corresponding stat_bar
pwvar <- mtm_res_get_entries(D, c("plots", "stats"))[[1]]$args$group_col
# define threshold for significance (extracted from corresponding stat_bar plot)
#alpha <- mtm_res_get_entries(D, c("plots", "stats"))[[1]]$args$feat_filter[[3]]
# get pathway annotations
rd <- get_pathway_annotations(D, pwvar)
################################################################################
########################## Define UI for Shiny application #####################
################################################################################
ui <- fluidPage(
# set appearance customization -------------------------------------------------
theme = "bootstrap.css",
includeCSS("www/style.css"),
setBackgroundColor("#FFFFFF"),# set canvas background color
div(style = "padding: 1px 0px; width: '100%'",
titlePanel(
title = "",
windowTitle = "Maplet"
)
),
# remove shiny "red" warning messages on GUI
tags$style(type="text/css",
".shiny-output-error { visibility: hidden; }",
".shiny-output-error:before { visibility: hidden; }"
),
# adjust tab height
tags$head(
tags$style(HTML(' .navbar {
height: 80px;
min-height:25px !important;
}
.navbar-nav > li > a, .navbar-brand {
padding-top:1px !important;
padding-bottom:1px !important;
height: 80px;
}'))),
navbarPage(
# embed Maplet logo and title
title = div(img(src='logo.png',
style="float:left; margin-top: 5px; padding-right:20px;padding-bottom:5px",
height = 60),
"Krumsiek Lab",
tags$script(HTML("var header = $('.navbar > .container-fluid');header.append('<div style=\"float:right\"><a href=\"https://weill.cornell.edu\"><img src=\"wcm2.png\" alt=\"logo\" style=\"float:right;height:50px;margin-top: 10px; padding-right:1px; \"> </a></div>');console.log(header)")),
windowTitle = "Maplet"),
# sticky tabs while scrolling main panel
position = c("fixed-top"),
# Define layout of Module-Real-Time Pipeline(coded as mod6) ----------------------------------------------------
tabPanel(HTML(paste("Real-Time", "Pipeline", sep = "<br/>")),
# Sidebar layout with input and output definitions ----
dashboardPage(
dashboardHeader(disable = TRUE),
dashboardSidebar(disable = TRUE),
dashboardBody(
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
id = "mod6_panel1",
style = "margin-left: -25px; margin-top: 45px; margin-bottom: 5px; width: 80%; ",
tags$p(
HTML("<b>Real-Time Pipeline</b> starts with original data, creates a pipeline and download it to local."
)),
tags$p(
HTML("Pipeline is constrained to run in order of <b>Data Loading->Preprocessing->Differential Analysis</b> and no section should be skipped."
)),
tags$p(
HTML("The result SE object is dependent on the <b>instant parameters</b>."
)),
# Input: Select a file ----
fileInput("file1", "Uploading File",
multiple = FALSE,
accept = c(".xlsx"),
width = "300px"),
# Input: Checkbox if file has header ----
checkboxInput("header", "Header", TRUE),
tags$hr(),
box(solidHeader = T, collapsible = T, collapsed = TRUE,
title="Sheets for Dimensions", width = "220px",
checkboxInput("mod6_assay_in_row", "Samples in rows?", FALSE),
tags$p(HTML("Assay sheet:")),
uiOutput("mod6_assay_sheet"),
tags$p(HTML("rowData sheet:")),
uiOutput("mod6_rowdata_sheet"),
tags$p(HTML("colData sheet:")),
uiOutput("mod6_coldata_sheet"),
tags$p(HTML("Click to show the original data, but this investigation is not necessary for pipeline.")),
actionButton("mod6_go", "Investigate")
),
tags$hr(),
box(solidHeader = T, collapsible = T, collapsed = TRUE,
title="Data Loading", width = "220px",
tags$p(HTML("ID column in assay:")),
uiOutput("mod6_assay_id_column"),
tags$p(HTML("ID column in rowData:")),
uiOutput("mod6_rowdata_id_column"),
tags$p(HTML("ID column in colData:")),
uiOutput("mod6_coldata_id_column"),
tags$p(HTML("Run to see log text of data loading.")),
actionButton("mod6_go_load", "Run", width = "110px")
),
tags$hr(),
box(solidHeader = T, collapsible = T, collapsed = TRUE,
title="Preprocessing", width = "220px",
tags$p(HTML("Max % missingness per feature:")),
numericInput("mod6_filter_feat_max", label = NULL,
value = 1,
min = 0,
max = 1,
step = 0.1,
width = "220px"),
tags$p(HTML("Max % missingness per feature (normalization):")),
numericInput("mod6_feat_max_norm", label = NULL,
value = 1,
min = 0,
max = 1,
step = 0.1,
width = "220px"),
tags$p(HTML("Max % missingness per sample:")),
numericInput("mod6_filter_sample_max", label = NULL,
value = 1,
min = 0,
max = 1,
step = 0.1,
width = "220px"),
tags$p(HTML("Sample coloring column:")),
uiOutput("mod6_pre_sample_color_column"),
tags$p(HTML("Batch column:")),
uiOutput("mod6_pre_batch_column"),
tags$p(HTML("PCA/UMAP coloring column:")),
uiOutput("mod6_pre_pca_color_column"),
tags$p(HTML("Heatmap annotation column:")),
uiOutput("mod6_pre_heatmap_anno_column"),
tags$p(HTML("Heatmap annotation row:")),
uiOutput("mod6_pre_heatmap_anno_row"),
tags$p(HTML("Run to see log text of data loading and preprocessing. This step may cost a few seconds to run.")),
actionButton("mod6_go_preprocess", "Run", width = "110px")
),
tags$hr(),
box(solidHeader = T, collapsible = T, collapsed = TRUE,
title="Differential Analysis", width = "220px",
tags$p(HTML("Outcome variable:")),
uiOutput("mod6_outcome"),
checkboxInput("mod6_outcome_binary", "Binary outcome?", FALSE),
tags$p(HTML("Type of analysis:")),
selectInput("mod6_analysis_type", label = NULL,
width = "220px",
choices = c("lm","pearson","spearman","kendall"),
selected = "lm"),
tags$p(HTML("Multiple testing correction:")),
selectInput("mod6_mult_test_method", label = NULL,
width = "220px",
choices = c("BH","bonferroni","BY"),
selected = "BH"),
tags$p(HTML("Significance threshold:")),
numericInput("mod6_sig_threshold", label = NULL,
value = 0.05,
min = 0,
max = 1,
step = 0.01,
width = "220px"),
tags$p(HTML("Pathway aggregation in barplot:")),
uiOutput("mod6_group_col_barplot"),
tags$p(HTML("Barplot coloring column:")),
uiOutput("mod6_color_col_barplot"),
tags$p(HTML("Run to see log text of data loading, preprocessing and differential analysis. This step may cost a few seconds to run.")),
actionButton("mod6_go_differ", "Run", width = "110px")
)
),
# Main panel for displaying outputs ----
mainPanel(
id = "mod6_panel2",
style = "overflow-y: auto; max-height: 100vh; position: absolute; left: 28%",
br(),
br(),
br(),
# Output: Data file ----
tags$p(HTML("Downloading SE.Rdata may cost more than one minute. Please wait for the prompt.")),
downloadButton("download_se", "Download result SE .Rdata"),
br(),
br(),
uiOutput("mod6_main_panel")
)
)
)
)
),
# Define layout of Module-Annotations Explorer(coded as mod5) ----------------------------------------------------
tabPanel(HTML(paste("Annotations", "Explorer", sep = "<br/>")),
sidebarLayout(
sidebarPanel(id = "mod5_panel1",
# sidebar autoscroll with main panel
style = "margin-left: -25px; margin-top: 45px; margin-bottom: 5px; position:fixed; width: 20%; height: 100%;",
tags$p(
HTML("<b>Annotations Explorer</b> creates tables, distribution plots, or other graphics to explore the SE object."
)),
radioButtons("mod5_dimension", "Select one dimension:",
choices = list("Column Data" = "col",
"Row Data" = "row")
),
br(),
uiOutput("mod5_dimension_ui"),
br(),
tags$p(
HTML("<b>Hint:<br></b>Outputs are delayed untill you click 'UPDATE' button after selection."
)),
br(),
# delay the output
actionButton("mod5_go", "Update")
),
mainPanel(id = "mod5_panel2",
br(),
br(),
br(),
style = "overflow-y: auto; position: absolute; left: 25%",
uiOutput("mod5_output_ui")
)
)
),
# Define layout of Module-2D Projection(coded as mod3) ----------------------------------------------------
tabPanel(HTML(paste("2D", "Projection", sep = "<br/>")),
sidebarLayout(
sidebarPanel(id = "mod3_panel1",
# sidebar autoscroll with main panel
style = "margin-left: -25px; margin-top: 45px; margin-bottom: 5px; position:fixed; width: 20%; height: 100%;",
tags$p(
HTML("<b>2D Projection</b> generates an interactive 2D projection of PCA/UMAP."
)),
tags$p(
HTML("It displays a drop-down menu of all colData columns for coloring."
)),
# select one plot type
radioButtons("mod3_select_plot", "Select one plot type:",
choices = list("PCA" = "pca",
"UMAP" = "umap")
),
# function argument
uiOutput("mod3_pca_data"),
# select coloring colData and factor it
uiOutput("mod3_plot_argument"),
br(),
tags$p(
HTML("<b>Hint:<br></b>Outputs are delayed untill you click 'UPDATE' button after selection."
)),
br(),
# delay the output
actionButton("mod3_go", "Update")
),
mainPanel(id = "mod3_panel2",
br(),
br(),
br(),
style = "overflow-y: auto; position: absolute; left: 25%",
# plotly
downloadButton("mod3_download_plotly", "download plotly"),
plotlyOutput('mod3_plot', height = 700)
)
)
),
# Define layout of Module-All Results Explorer(coded as mod1) ----------------------------------------------------
tabPanel(HTML(paste("All Results", "Explorer", sep = "<br/>")),
sidebarLayout(
sidebarPanel(id = "mod1_panel1",
# sidebar auto-scrolling with main panel
style = "margin-left: -25px; margin-top: 45px; margin-bottom: 5px; position:fixed; width: 20%; height: 100%;",
tags$p(
HTML("<b>All Results Explorer</b> extracts all the result objects one at a time."
)),
tags$p(
HTML("Users can assess results in a drop-down menu that offers a list of a stat_name and a plot type (e.g. missingness, pval)."
)),
br(),
# select plot type or stats table
radioButtons("mod1_radio", "Select output type:",
choices = list("Plot" = "plots",
"Table" = "stats"),
selected = "stats"
),
br(),
# define one UI object to select stat_name
uiOutput("mod1_select_statname_ui"),
br(),
# define one UI object to select output type
uiOutput("mod1_select_object_ui"),
br(),
tags$p(
HTML("<b>Hint:<br></b>Outputs are delayed untill you click 'UPDATE' button after selection. Some plots such as box plot or multiple plots may cost dozens of seconds to show up."
)),
# delay the output
actionButton("mod1_go", "Update")
),
mainPanel(id = "mod1_panel2",
# scrollable panel
style = "overflow-y: auto; position: absolute; left: 25%",
br(),
br(),
br(),
# dynamic number of plots
uiOutput('mod1_output')
)
)
),
# Define layout of Module-Feature Results Explorer(coded as mod4) --------------------------------------------------
tabPanel(HTML(paste("Feature Results", "Explorer", sep = "<br/>")),
sidebarLayout(
sidebarPanel(
id = "mod4_panel1",
# sidebar autoscroll with main panel
style = "margin-left: -25px; margin-top: 45px; margin-bottom: 5px; position:fixed; width: 20%; height: 100%;",
tags$p(
HTML(
"<b>Feature Results Explorer</b> requires collection on all statistical results in a table given one metabolite name."
)
),
tags$p(
HTML(
"When clicking on one row, it should display interactive plots following the same orders in Module 2."
)
),
# select one metabolite
selectInput(
"mod4_metabolite",
"Select one metabolite:",
width = "220px",
choices = arrange(mtm_res_get_entries(D, c("stats", "univ"))[[1]]$output$table, var)$var,
selected = ""
),
br(),
checkboxInput("mod4.categorical",
"Treat as categorical",
value = FALSE
),
br(),
tags$p(
HTML("<b>Hint:<br></b>Outputs are delayed untill you click 'UPDATE' button after selection."
)),
# delay the output
actionButton("mod4_go", "Update")
),
mainPanel(
id = "mod4_panel2",
br(),
br(),
br(),
style = "overflow-y: auto; position: absolute; left: 25%",
# stats table
dataTableOutput('mod4_table'),
br(),
br(),
# volcano plotly
uiOutput("mod4.p1"),
br(),
br(),
# box/scatter plotly
uiOutput("mod4.p.ui"),
uiOutput("mod4.p2")
)
)
),
# Define layout of Module-Pathway Results Explorer(coded as mod2) ----------------------------------------------------
tabPanel(HTML(paste("Pathway Results", "Explorer", sep = "<br/>")),
sidebarLayout(
sidebarPanel(
id = "mod2_panel1",
# sidebar auto-scrolling with main panel
style = "margin-left: -25px; margin-top: 45px; margin-bottom: 5px; position:fixed; width: 20%; height: 100%;",
tags$p(
HTML(
"<b>Pathway Results Explorer:</b> Displays a series of interactive plots at different granularities given a SE and a statname."
)
),
tags$p(
HTML(
"StatsBar plot -> Equalizer/Volcano plot -> Box/Scatter plot."
)
),
br(),
selectInput(
"mod2.stat",
"Select one stat name:",
choices = distinct(obj_name[obj_name$V1 == "plots" &
obj_name$V2 == "stats",],
stat_name)$stat_name
),
br(),
radioButtons(
"mod2.plot1",
"Select plot1 type:",
choices = list("Bar" = "bar",
"Not Bar plot" = "null"),
selected = "bar"
),
br(),
radioButtons(
"mod2.plot2",
"Select plot2 type:",
choices = list("Equalizer" = "equalizer",
"Volcano" = "volcano"),
selected = "volcano"
),
tags$hr(),
radioButtons(
"mod2.plot3",
"Select plot3 type:",
choices = list("Box" = "box",
"Scatter" = "scatter"),
selected = "scatter"
),
checkboxInput("mod2.categorical",
"Treat as categorical",
value = FALSE
),
br(),
tags$p(
HTML("<b>Hint:<br></b>Outputs are delayed untill you click 'UPDATE' button after selection."
)),
actionButton("mod2_go", "Update")
),
mainPanel(
id = "mod2_panel2",
style = "overflow-y: auto; position: absolute; left: 25%",
br(),
br(),
br(),
# Bar plot or not
uiOutput("mod2.p1"),
br(),
# equalizer or volcano
uiOutput("mod2.p2"),
br(),
# box or scatter
uiOutput("mod2.p3"),
br()
)
))
)
)
################################################################################
################ Define server logic required to draw outputs ##################
################################################################################
server <- function(input, output) {
# Define rendering logic of control widgets in Module-Real-Time Pipeline (coded as mod6)----------------------
# control widget of selecting file
output$mod6_assay_sheet <- renderUI({
req(input$file1)
selectInput("assay_sheet", label = NULL,
width = "220px",
choices = getSheetNames(as.character(input$file1$datapath))
)
})
# control widget of dimensions
df_assay <- reactive({
read_excel(as.character(input$file1$datapath),
col_names = input$header,
sheet=input$assay_sheet)
})
output$mod6_rowdata_sheet <- renderUI({
req(input$file1)
selectInput("rowdata_sheet", label = NULL,
width = "220px",
choices = getSheetNames(as.character(input$file1$datapath))
)
})
df_rowdata <- reactive({
read_excel(as.character(input$file1$datapath),
col_names = input$header,
sheet=input$rowdata_sheet)
})
output$mod6_coldata_sheet <- renderUI({
req(input$file1)
selectInput("coldata_sheet", label = NULL,
width = "220px",
choices = getSheetNames(as.character(input$file1$datapath))
)
})
df_coldata <- reactive({
read_excel(as.character(input$file1$datapath),
col_names = input$header,
sheet=input$coldata_sheet)
})
# control widget of data loading
output$mod6_assay_id_column <- renderUI({
selectInput("assay_id_column", label = NULL,
width = "220px",
choices = colnames(df_assay())
)
})
output$mod6_rowdata_id_column <- renderUI({
selectInput("rowdata_id_column", label = NULL,
width = "220px",
choices = colnames(df_rowdata())
)
})
output$mod6_coldata_id_column <- renderUI({
selectInput("coldata_id_column", label = NULL,
width = "220px",
choices = colnames(df_coldata())
)
})
# control widget of preprocessing
output$mod6_pre_sample_color_column <- renderUI({
selectInput("pre_sample_color_column", label = NULL,
width = "220px",
choices = colnames(df_coldata())
)
})
output$mod6_pre_batch_column <- renderUI({
selectInput("pre_batch_column", label = NULL,
width = "220px",
selected=NULL,
choices = colnames(df_coldata())
)
})
output$mod6_pre_pca_color_column <- renderUI({
selectInput("pre_pca_color_column", label = NULL,
width = "220px",
multiple=TRUE,
selected=NULL,
choices = colnames(df_coldata())
)
})
output$mod6_pre_heatmap_anno_column <- renderUI({
selectInput("pre_heatmap_anno_column", label = NULL,
width = "220px",
multiple=TRUE,
selected=NULL,
choices = colnames(df_coldata())
)
})
output$mod6_pre_heatmap_anno_row <- renderUI({
selectInput("pre_heatmap_anno_row", label = NULL,
width = "220px",
multiple=TRUE,
selected=NULL,
choices = colnames(df_rowdata())
)
})
# control widget of differential analysis
output$mod6_outcome <- renderUI({
selectInput("outcome", label = NULL,
width = "220px",
choices = colnames(df_coldata())
)
})
output$mod6_group_col_barplot <- renderUI({
selectInput("group_col_barplot", label = NULL,
width = "220px",
selected=NULL,
choices = colnames(df_rowdata())
)
})
output$mod6_color_col_barplot <- renderUI({
selectInput("color_col_barplot", label = NULL,
width = "220px",
selected=NULL,
choices = colnames(df_rowdata())
)
})
# Define rendering logic of outputs in Module-Real-Time Pipeline(coded as mod6) ------------------------------
# record the file path of selected file
mod6_filepath <-
eventReactive(input$mod6_go, ## delayed output
{c(input$file1$datapath)
})
# print table when clicking "investigate" button
observeEvent(input$mod6_go,{
output$mod6_main_panel <- renderUI({
list(dataTableOutput("mod6_assay"),
br(),br(),
dataTableOutput("mod6_rowdata"),
br(),br(),
dataTableOutput("mod6_coldata"))
})
})
# render logic of the table
output$mod6_assay <- renderDataTable({
table <- read_excel(as.character(mod6_filepath()),
col_names = input$header,
sheet=input$assay_sheet)
datatable(table,
caption="Original Assay Data",
options = list(
# limit number of rows
pageLength = 10,
lengthMenu = c(10, 20, 50),
autoWidth = TRUE
))
})
output$mod6_rowdata <- renderDataTable({
table <- read_excel(as.character(mod6_filepath()),
col_names = input$header,
sheet=input$rowdata_sheet)
datatable(table,
caption="Original rowData",
options = list(
# limit number of rows
pageLength = 10,
lengthMenu = c(10, 20, 50),
autoWidth = TRUE
))
})
output$mod6_coldata <- renderDataTable({
table <- read_excel(as.character(mod6_filepath()),
col_names = input$header,
sheet=input$coldata_sheet)
datatable(table,
caption="Original colData",
options = list(
# limit number of rows
pageLength = 10,
lengthMenu = c(10, 20, 50),
autoWidth = TRUE
))
})
observeEvent(input$mod6_go_load,{
# define main panel for loading section
output$mod6_main_panel <- renderUI({
tagAppendAttributes(verbatimTextOutput("log_load"),
style="white-space:pre-wrap;")
})
})
observeEvent(input$mod6_go_preprocess,{
# define main panel for preprocessing section
output$mod6_main_panel <- renderUI({
tagAppendAttributes(verbatimTextOutput("log_preprocess"),
style="white-space:pre-wrap;")
})
})
observeEvent(input$mod6_go_differ,{
# define main panel of differential analysis
output$mod6_main_panel <- renderUI({
tagAppendAttributes(verbatimTextOutput("log_differ"),
style="white-space:pre-wrap;")
})
})
# get loading SE
D_load <- reactive({
## loading D
file_data <- as.character(input$file1$datapath)
D <-
mt_load_xls(file=file_data,
sheet=input$assay_sheet,
samples_in_row=input$mod6_assay_in_row,
id_col=input$assay_id_column) %>%
mt_anno_xls(file=file_data,
sheet=input$rowdata_sheet,
anno_type="features",
anno_id_col=input$rowdata_id_column,
data_id_col = "name") %>%
mt_anno_xls(file=file_data,
sheet=input$coldata_sheet,
anno_type="samples",
anno_id_col =input$coldata_id_column,
data_id_col ="sample") %>%
mt_reporting_data() %>%
{.}
## return D
D
})
# get proprocessing SE
D_preprocess <- reactive({
## preprocessing D
D <- D_load() %>%
mt_reporting_heading(heading = "Preprocessing", lvl=1) %>%
mt_reporting_heading(heading = "Filtering", lvl = 2) %>%
mt_plots_missingness(feat_max=input$mod6_filter_feat_max,samp_max = input$mod6_filter_sample_max) %>%
mt_pre_filter_missingness(feat_max = input$mod6_filter_feat_max, samp_max = input$mod6_filter_sample_max) %>%
mt_plots_missingness(feat_max=input$mod6_filter_feat_max, samp_max = input$mod6_filter_sample_max) %>%
mt_anno_missingness(anno_type = "samples", out_col = "missing") %>%
mt_anno_missingness(anno_type = "features", out_col = "missing") %>%
mt_reporting_heading(heading = "Normalization", lvl = 2) %>%
mt_plots_sample_boxplot(color=!!sym(input$pre_sample_color_column), title = "Original", plot_logged = T) %>%
{.}
if(!is.null(input$pre_batch_column)){
D %<>%
mt_pre_batch_median(batch_col = input$pre_batch_column)
}
D <- D %>%
mt_plots_sample_boxplot(color=!!sym(input$pre_sample_color_column), title = "After batch correction", plot_logged = T) %>%
mt_pre_norm_quot(feat_max = input$mod6_feat_max_norm) %>%
mt_plots_dilution_factor(in_col=input$pre_sample_color_column) %>%
mt_plots_sample_boxplot(color=!!sym(input$pre_sample_color_column), title = "After normalization", plot_logged = T) %>%
mt_pre_trans_log() %>%
mt_pre_impute_knn() %>%
mt_plots_sample_boxplot(color=!!sym(input$pre_sample_color_column), title = "After imputation", plot_logged = T) %>%
mt_pre_outlier_detection_univariate() %>%
mt_reporting_data() %>%
mt_reporting_heading(heading = "Global Statistics", lvl = 1) %>%
{.}
## add PCA/UMAP plots
lapply(input$pre_pca_color_column, function(x){
D <<- D %>%
mt_plots_pca(scale_data = T, title = sprintf("scaled PCA - %s",x), color=!!sym(x), size=2.5, ggadd=scale_size_identity()) %>%
mt_plots_umap(scale_data = T, title = sprintf("scaled UMAP - %s",x), color=!!sym(x), size=2.5, ggadd=scale_size_identity()) %>%
{.}
}) %>% invisible
## add heatmap
D %<>%
mt_plots_heatmap(scale_data = T, annotation_col = input$pre_heatmap_anno_column, annotation_row = input$pre_heatmap_anno_row,
clustering_method = "ward.D2", fontsize = 5, cutree_rows = 3, cutree_cols = 3, color=gplots::bluered(101)) %>%
{.}
## return D
D
})
## get differential analysis SE
D_differ <- reactive({
# Differential analysis D
D <- D_preprocess() %>%
mt_reporting_heading(heading = "Statistical Analysis", lvl = 1) %>%
diff_analysis_func(var=input$outcome,
binary=input$mod6_outcome_binary,
analysis_type=input$mod6_analysis_type,
mult_test_method=input$mod6_mult_test_method,
alpha=input$mod6_sig_threshold,
group_col_barplot=input$group_col_barplot,
color_col_barplot=input$color_col_barplot) %>%
{.}
## return D
D
})
# render logic of the log text of data loading
output$log_load <- renderPrint({
get_log_text(D_load())
})
# render logic of the log text of preprocessing
output$log_preprocess <- renderPrint({
# loading log
text_load <- get_log_text(D_load())
# preprocessing log
text_preprocess <- get_log_text(D_preprocess())
# paste log text
str <- paste(text_load, text_preprocess, sep = "\n")
cat(str)
})
# render logic of the log text of differential analysis
output$log_differ <- renderPrint({
# loading log
text_load <- get_log_text(D_load())
# preprocessing log
text_preprocess <- get_log_text(D_preprocess())
# differential analysis log
text_differ <- get_log_text(D_differ())
# paste log text
str <- paste(text_load, text_preprocess, text_differ, sep = "\n")
cat(str)
})
# download SE button
# https://mastering-shiny.org/action-transfer.html
output$download_se <- downloadHandler(
filename = function() {
paste0("SE_", Sys.Date(), ".Rdata")
},
content = function(fname) {
## loading D
file_data <- as.character(input$file1$datapath)
D <-
mt_load_xls(file=file_data,
sheet=input$assay_sheet,
samples_in_row=input$mod6_assay_in_row,
id_col=input$assay_id_column) %>%
mt_anno_xls(file=file_data,
sheet=input$rowdata_sheet,
anno_type="features",
anno_id_col=input$rowdata_id_column,
data_id_col = "name") %>%
mt_anno_xls(file=file_data,
sheet=input$coldata_sheet,
anno_type="samples",
anno_id_col =input$coldata_id_column,
data_id_col ="sample") %>%
mt_reporting_data() %>%
{.}
## preprocessing D
D <- D %>%
mt_reporting_heading(heading = "Preprocessing", lvl=1) %>%
mt_reporting_heading(heading = "Filtering", lvl = 2) %>%
mt_plots_missingness(feat_max=input$mod6_filter_feat_max,samp_max = input$mod6_filter_sample_max) %>%
mt_pre_filter_missingness(feat_max = input$mod6_filter_feat_max, samp_max = input$mod6_filter_sample_max) %>%
mt_plots_missingness(feat_max=input$mod6_filter_feat_max, samp_max = input$mod6_filter_sample_max) %>%
mt_anno_missingness(anno_type = "samples", out_col = "missing") %>%
mt_anno_missingness(anno_type = "features", out_col = "missing") %>%
mt_reporting_heading(heading = "Normalization", lvl = 2) %>%
mt_plots_sample_boxplot(color=!!sym(input$pre_sample_color_column), title = "Original", plot_logged = T) %>%
{.}
if(!is.null(input$pre_batch_column)){
D %<>%
mt_pre_batch_median(batch_col = input$pre_batch_column)
}
D <- D %>%
mt_plots_sample_boxplot(color=!!sym(input$pre_sample_color_column), title = "After batch correction", plot_logged = T) %>%
mt_pre_norm_quot(feat_max = input$mod6_feat_max_norm) %>%
mt_plots_dilution_factor(in_col=input$pre_sample_color_column) %>%
mt_plots_sample_boxplot(color=!!sym(input$pre_sample_color_column), title = "After normalization", plot_logged = T) %>%
mt_pre_trans_log() %>%
mt_pre_impute_knn() %>%
mt_plots_sample_boxplot(color=!!sym(input$pre_sample_color_column), title = "After imputation", plot_logged = T) %>%
mt_pre_outlier_detection_univariate() %>%
mt_reporting_data() %>%
mt_reporting_heading(heading = "Global Statistics", lvl = 1) %>%
{.}
## add PCA/UMAP plots
lapply(input$pre_pca_color_column, function(x){
D <<- D %>%
mt_plots_pca(scale_data = T, title = sprintf("scaled PCA - %s",x), color=!!sym(x), size=2.5, ggadd=scale_size_identity()) %>%
mt_plots_umap(scale_data = T, title = sprintf("scaled UMAP - %s",x), color=!!sym(x), size=2.5, ggadd=scale_size_identity()) %>%
{.}
}) %>% invisible
## add heatmap
D %<>%
mt_plots_heatmap(scale_data = T, annotation_col = input$pre_heatmap_anno_column, annotation_row = input$pre_heatmap_anno_row,
clustering_method = "ward.D2", fontsize = 5, cutree_rows = 3, cutree_cols = 3, color=gplots::bluered(101)) %>%
{.}
# Differential analysis D
D <- D %>%
mt_reporting_heading(heading = "Statistical Analysis", lvl = 1) %>%
diff_analysis_func(var=input$outcome,
binary=input$mod6_outcome_binary,
analysis_type=input$mod6_analysis_type,
mult_test_method=input$mod6_mult_test_method,
alpha=input$mod6_sig_threshold,
group_col_barplot=input$group_col_barplot,
color_col_barplot=input$color_col_barplot) %>%
{.}
# write Rdata to local
save(D, file=fname)
}
)
# Define rendering logic of control widgets in Module-Annotations Explorer(coded as mod5) ----------------------
output$mod5_dimension_ui <- renderUI({
switch(input$mod5_dimension,
"col"=list(selectInput("mod5_var1_select",
"Select the primary variable:",
choices = names(colData(D)),
selected = "Age",
width = "220px"),
checkboxInput("mod5_var1_type",
"Continuous",
value = TRUE),
tags$hr(),
selectInput("mod5_var2_select",
"Select the secondary variable:",
choices = names(colData(D)),
selected = "sample",
width = "220px"),
checkboxInput("mod5_var2_type",
"Continuous",
value = TRUE),
tags$hr(),
selectInput("mod5_select_hover",
"Select hovering text:",
choices = names(colData(D)),
selected = names(colData(D))[1],
width = "220px",
multiple=TRUE)
),
"row"=selectInput("mod5_rowdata_plot",
"Select one plot for row data:",
choices = c("SUPER_PATHWAY"),
width = "220px")
)
})
output$mod5_output_ui <- renderUI({
switch(input$mod5_dimension,
"col"=list(downloadButton("mod5_download_plotly", "download plotly"),
plotlyOutput('mod5_plot', height = 600)),
"row"=list(fluidRow(
splitLayout(style = "border: 1px", cellWidths = c(1000, 1000),
downloadButton("mod5_download_plotly", "download plotly"),
downloadButton("mod5_download_plotly2", "download plotly")
)
),
fluidRow(
splitLayout(style = "height:600px; border: 1px", cellWidths = c(1000, 1000),
plotlyOutput('mod5_plot', height = 600),
plotlyOutput('mod5_plot2', height = 600)
)
))
)
})
# Define rendering logic of outputs in Module-Annotations Explorer(coded as mod5) ------------------------------
mod5_input <- eventReactive(input$mod5_go,{
c(input$mod5_var1_select,
input$mod5_var1_type,
input$mod5_var2_select,
input$mod5_var2_type,
input$mod5_rowdata_plot)
})
output$mod5_plot <- renderPlotly({
session_store$mod5_plotly <- switch(input$mod5_dimension,
"col"=
if(mod5_input()[2]==TRUE & mod5_input()[4]==TRUE){
mod5_scatter(D, x=mod5_input()[3],
y=mod5_input()[1],
hover = input$mod5_select_hover)
} else if(mod5_input()[2]==TRUE & mod5_input()[4]==FALSE) {
mod5_boxplot(D, x=mod5_input()[3],
x_cate = FALSE,
y=mod5_input()[1],
y_cate = TRUE,
fill=mod5_input()[3],
hover=input$mod5_select_hover)
} else if(mod5_input()[2]==FALSE & mod5_input()[4]==TRUE) {
mod5_boxplot(D, x=mod5_input()[1],
x_cate = FALSE,
y=mod5_input()[3],
y_cate = TRUE,
fill=mod5_input()[1],
hover=input$mod5_select_hover)
} else {
mod5_barplot(D, x=mod5_input()[3],
fill=mod5_input()[1],
hover = input$mod5_select_hover)
},
"row"=
rowData(D) %>%
data.frame %>%
dplyr::rename(var=mod5_input()[5]) %>%
dplyr::group_by(var) %>%
dplyr::summarise(count=n()) %>%
plot_ly(labels = ~var,
values = ~count,
type = 'pie',
textposition = 'inside',
source="mod5-click",
title="<b>Distribution of Super Pathway</b>") %>%
layout(autosize = F, width = 1000, height = 500,
uniformtext=list(minsize=12, mode='hide'),
legend = list(x = 1,
y = .5,
tracegroupgap = 5)
)
)
session_store$mod5_plotly
}
)
# download button
output$mod5_download_plotly <- downloadHandler(
filename = function() {
paste("data-", Sys.Date(), ".html", sep = "")
},
content = function(file) {
saveWidget(as_widget(session_store$mod5_plotly), file, selfcontained = TRUE)
}
)
## to see the stored data of clicking
# output$info <- renderPrint({
# d5 <- event_data("plotly_click", source = "mod5-click")
# if(!is.null(d5)){
# d5
# }
# })
output$mod5_plot2 <- renderPlotly({
d5 <- event_data("plotly_click", source = "mod5-click")
pie_dat <- as.data.frame(rowData(D))
if (!is.null(d5)){
lvls <- rev(pie_dat$SUPER_PATHWAY)
label <- lvls[round(as.numeric(d5$pointNumber))+1]
session_store$mod5_plot2 <-
pie_dat[pie_dat$SUPER_PATHWAY == label, ] %>%
dplyr::rename(var="SUB_PATHWAY") %>%
dplyr::group_by(var) %>%
dplyr::summarise(count=n()) %>%
plot_ly(labels = ~var,
values = ~count,
type = 'pie',
textposition = 'inside',
title=paste0("<b>Distribution of Sub Pathway in Specified Super Pathway - </b>", label)
) %>%
layout(autosize = F, width = 1000, height = 500,
uniformtext=list(minsize=12, mode='hide'),
legend = list(x = 1,
y = .5,
tracegroupgap = 5)
)
session_store$mod5_plot2
}
})
# download button
output$mod5_download_plotly2 <- downloadHandler(
filename = function() {
paste("data-", Sys.Date(), ".html", sep = "")
},
content = function(file) {
saveWidget(as_widget(session_store$mod5_plotly2), file, selfcontained = TRUE)
}
)
# Define rendering logic of control widgets in Module-Annotations Explorer(coded as mod3) ------------------------
output$mod3_pca_data <- renderUI({
if(input$mod3_select_plot=="pca"){
selectInput("mod3_pca_data_type", "Select data type for PCA:",
width = "220px",
choices = c("scores", "loadings"),
selected = "scores"
)
} else {
NULL
}
})
# create intermediate var to indicate coloring widgets
inter_var <- reactive({
if (input$mod3_select_plot=="pca" & input$mod3_pca_data_type=="scores") {
"pca-scores"
} else if(input$mod3_select_plot=="pca" & input$mod3_pca_data_type=="loadings"){
"pca-loadings"
} else {
"umap"
}
})
# create reactive plotting argument for PCA/UMAP
output$mod3_plot_argument <- renderUI({
switch(
inter_var(),
"pca-scores"=list(
checkboxInput("mod3_scale_data", "Scaled data",
value = TRUE
),
selectInput("mod3_select_colData",
"Select one coloring variable:",
choices = names(colData(D)),
selected = "BOX.NUMBER",
width = "220px"
),
checkboxInput("mod3_checkbox_factor",
"Categorical Coloring",
value = FALSE
),
selectInput("mod3_select_hover",
"Select hovering text:",
# selectInput coerces its output to character
# https://github.com/rstudio/shiny/issues/2367
# choices = setNames(seq_along(colData(D)), names(colData(D))),
choices = names(colData(D)),
selected = "sample",
width = "220px",
multiple=TRUE
)
),
"pca-loadings"=list(
checkboxInput("mod3_scale_data", "Scaled data",
value = TRUE
),
selectInput("mod3_select_colData",
"Select one coloring variable:",
choices = names(rowData(D)),
selected = "SUPER_PATHWAY",
width = "220px"
),
checkboxInput("mod3_checkbox_factor",
"Categorical Coloring",
value = FALSE
),
selectInput("mod3_select_hover",
"Select hovering text:",
# choices = setNames(seq_along(rowData(D)), names(rowData(D))),
choices = names(rowData(D)),
selected = "name",
width = "220px",
multiple=TRUE
)
),
"umap"=list(numericInput("mod3_umap_n_neighbors",
"Number of neighbors for UMAP:",
value = 15,
width = "220px"
),
checkboxInput("mod3_scale_data", "Scaled data",
value = TRUE
),
selectInput("mod3_select_colData",
"Select one coloring variable:",
choices = names(colData(D)),
selected = "BOX.NUMBER",
width = "220px"
),
checkboxInput("mod3_checkbox_factor",
"Categorical Coloring",
value = FALSE
),
selectInput("mod3_select_hover",
"Select hovering text:",
# choices = setNames(seq_along(colData(D)), names(colData(D))),
choices = names(colData(D)),
selected = "sample",
width = "220px",
multiple=TRUE
)
)
)
})
# create reactive inputs list
mod3_input_object <- eventReactive(input$mod3_go,
{c(input$mod3_select_plot,
input$mod3_select_colData,
input$mod3_scale_data,
input$mod3_checkbox_factor,
input$mod3_pca_data_type,
input$mod3_umap_n_neighbors)}
)
# Define rendering logic of outputs in Module-Annotations Explorer(coded as mod3) --------------------------------
# render pca/umap of mod3
output$mod3_plot <- renderPlotly({
session_store$mod3_plotly <- if (mod3_input_object()[1]=="pca"){
mod3_plots_pca(D = D,
scale_data = mod3_input_object()[3],
color = mod3_input_object()[2],
categorizing=mod3_input_object()[4],
data_type = mod3_input_object()[5],
hover = input$mod3_select_hover
)
} else {
mod3_plots_umap(D = D,
scale_data = mod3_input_object()[3],
color = mod3_input_object()[2],
categorizing=mod3_input_object()[4],
n_neighbors = as.numeric(mod3_input_object()[6]),
hover = input$mod3_select_hover
)
}
session_store$mod3_plotly
})
# download button
output$mod3_download_plotly <- downloadHandler(
filename = function() {
paste("data-", Sys.Date(), ".html", sep = "")
},
content = function(file) {
saveWidget(as_widget(session_store$mod3_plotly), file, selfcontained = TRUE)
}
)
# Define rendering logic of control widgets in Module-All Results Explorer(coded as mod1) ------------------------
# create stat_name list dependent on radio button
output$mod1_select_statname_ui <- renderUI({
selectInput("mod1_select_statname", "Select one stat name:",
width = "220px",
choices = dplyr::distinct(obj_name[obj_name$V1==input$mod1_radio, ], stat_name)$stat_name
)
})
# create object list dependent on radio button and stat_name
output$mod1_select_object_ui <- renderUI({
if (input$mod1_radio=="stats"){
NULL
} else {
selectInput("mod1_select_object", "Select one object:",
width = "220px",
choices = dplyr::distinct(obj_name[obj_name$stat_name==input$mod1_select_statname&obj_name$V1==input$mod1_radio, ], V2)$V2
)
}
})
# create indicator of box plot output
box_switch <- reactive({
if (input$mod1_select_object=="box"){
"box_plot"
} else {
"non_box_plot"
}
})
## get the order of selected stat_name
ord <- reactive({
# assign a data frame of all the object names of box plots
# filter() cannot run in Shiny, use subset() instead
box_obj_name <- subset(obj_name, V1=="plots"&V2=="box")
box_output_order <- box_obj_name %>%
dplyr::mutate(order=seq(from=1, to=n()))
if(input$mod1_select_statname %in% box_output_order$stat_name){
box_output_order[box_output_order$stat_name==input$mod1_select_statname, ]$order
} else {
1
}
})
# create reactive inputs list
mod1_input_object <- eventReactive(input$mod1_go, ## delayed output
{c(input$mod1_radio,
input$mod1_select_statname,
input$mod1_select_object)}
)
# Define rendering logic of outputs in Module-All Results Explorer(coded as mod1) --------------------------------
# Insert the right number of plot output objects into UI
output$mod1_output_plot <- renderUI({
## limit plots to specified stat_name
obj_name <- subset(obj_name, V1==mod1_input_object()[1])
obj_name <- subset(obj_name, V2==mod1_input_object()[3])
output_order <- obj_name %>%
dplyr::mutate(order=seq(from=1, to=n()))
output_order <- subset(output_order, stat_name==mod1_input_object()[2])
plots <- list()
for(plot_i in seq_along(output_order$order)){
plots[[plot_i]] <- mtm_res_get_entries(D, c(mod1_input_object()[1], mod1_input_object()[3]))[[output_order$order[plot_i]]]
}
# there are multiple plots
len_i <- length(plots)
# some plots have multiple objects
len_j <- length(plots[[1]]$output)
# name every plot object in UI
mod1_plot_output_list <- lapply(1:(len_i*len_j), function(i) {
plotname <- paste("Plot", i, sep="")
# locate the row in the `plots`
row_n <- ceiling(i/len_j)
## set dynamic height of box scatter plots based on output2
height <- if(plots[[1]]$fun[2]=="box"&plots[[1]]$fun[3]=="scatter"&!is.null(plots[[row_n]]$output2)){
as.numeric(plots[[row_n]]$output2)*150
} else {
560
}
plotOutput(plotname, height = height, width = 850)
})
# Convert the list to a tagList - this is necessary for the list of items
# to display properly.
do.call(tagList, mod1_plot_output_list)
})
# Call renderPlot for each one. Plots are only actually generated when they
# are visible on the web page.
# get the max number of objects
num_df <- subset(obj_name, V1=="plots")
num_df <- num_df %>%
dplyr::group_by(V2, stat_name) %>%
dplyr::summarise(cnt_sum=sum(cnt))
max_plot <- max(num_df$cnt_sum)
for (i in 1:max_plot) {
# Need local so that each item gets its own number. Without it, the value
# of i in the renderPlot() will be the same across all instances, because
# of when the expression is evaluated.
local({
my_i <- i
plotname <- paste("Plot", my_i, sep="")
output[[plotname]] <- renderPlot({
## limit plots to specified stat_name
obj_name <- subset(obj_name, V1==mod1_input_object()[1])
obj_name <- subset(obj_name, V2==mod1_input_object()[3])
output_order <- obj_name %>%
dplyr::mutate(order=seq(from=1, to=n()))
output_order <- subset(output_order, stat_name==mod1_input_object()[2])
plots <- list()
for(plot_i in seq_along(output_order$order)){
plots[[plot_i]] <- mtm_res_get_entries(D, c(mod1_input_object()[1], mod1_input_object()[3]))[[output_order$order[plot_i]]]$output
}
# there are multiple plots
len_i <- length(plots)
# some plots have multiple objects
len_j <- length(plots[[1]])
# locate the row in the `plots`
row_n <- ceiling(my_i/len_j)
# locate the column in the `plots`
col_n <- ifelse((my_i %% len_j)==0, len_j, (my_i %% len_j))
# render the plot object in each loop
plots[[row_n]][col_n]
})
})
}
# render stats table of Mod1
output$mod1_output_table <- renderDataTable({
table <- data.frame(var=row.names(rowData(D)), rowData(D)) %>%
left_join(mtm_get_stat_by_name(D, mod1_input_object()[2]),
by=c("var"="var")
) %>%
dplyr::select(c(2, 20:26))
## put interested columns ahead
table <- if ('term' %in% names(table)) {
table %>%
dplyr::select(name, statistic, p.value, p.adj, term, dplyr::everything()) %>%
## scientific notation
dplyr::mutate(statistic=formatC(statistic, format = "E", digits = 2),
p.value=formatC(p.value, format = "E", digits = 2),
p.adj=formatC(p.adj, format = "E", digits = 2),
estimate=formatC(estimate, format = "E", digits = 2),
std.error=formatC(std.error, format = "E", digits = 2)
)
} else {
table %>%
dplyr::select(name, statistic, p.value, p.adj, dplyr::everything())
}
datatable(table,
options = list(
# limit number of rows
pageLength = 10,
lengthMenu = c(10, 20, 50),
## set column width
autoWidth = TRUE,
columnDefs = list(list(width = '100px', targets = c(2:4))),
scrollX = TRUE
))
})
# render plots or table
output$mod1_output <- renderUI({
switch(
mod1_input_object()[1],
"plots" = uiOutput("mod1_output_plot"),
"stats" = dataTableOutput("mod1_output_table")
)
})
# Define rendering logic of outputs in Module-Feature Results Explorer(coded as mod4) --------------------------------
# Module 4: general reactive stats table
mod4_metabolite_table <-
eventReactive(input$mod4_go,
{
table <- data.frame()
# assign an object of all stats tables
table_stats <- mtm_res_get_entries(D, c("stats", "univ"))
# assign an object of all stats plots
plot_stats <- mtm_res_get_entries(D, c("plots", "stats"))
for (i in 2:length(table_stats)) {
tab <- table_stats[[i]]$output$table %>%
dplyr::mutate(`stat name` = plot_stats[[i - 1]]$args$stat_list)
table <- rbind(table, tab)
}
table <- table %>%
dplyr::select(var, statistic, p.value, p.adj, `stat name`, estimate, std.error) %>%
dplyr::mutate(
statistic = formatC(statistic, format = "E", digits = 2),
p.value = formatC(p.value, format = "E", digits = 2),
p.adj = formatC(p.adj, format = "E", digits = 2),
estimate = formatC(estimate, format = "E", digits = 2),
std.error = formatC(std.error, format = "E", digits = 2)
) %>%
dplyr::filter(var == input$mod4_metabolite) %>%
dplyr::rename("name" = var)
})
# Module 4: output the stats table
output$mod4_table <- renderDataTable({
datatable(mod4_metabolite_table(),
selection = "single",
options = list(
dom = 't',
# limit number of rows
pageLength = 10,
lengthMenu = c(10, 20, 50)
)
)
})
observe({
if (!is.null(input$mod4_table_rows_selected)) {
session_store$mod4.tb.row <- input$mod4_table_rows_selected
}
})
# mod4: extract the stat_name
stat_name_selected <- reactive({
mod4_metabolite_table() %>%
dplyr::slice(round(as.numeric(session_store$mod4.tb.row))) %>%
dplyr::select(`stat name`)
})
# Module 4: volcano plot
output$mod4.p1 <- renderUI({
if (!is.null(session_store$mod4.tb.row)) {
list(
downloadButton("mod4_download_plotly_volcano", "download volcano plot"),
plotlyOutput('mod4_volcano', height = 800)
)
}
})
# Module 4: volcano plot by using stat_name
output$mod4_volcano <- renderPlotly({
# Get volcano data set
data_vol <- get_data_by_name(D, "stat_name", "volcano", stat_name_selected())
isSelected <- input$mod4_metabolite
# Set the legend color column
data_vol[, "isSelected"] <- ifelse(data_vol$var==isSelected, TRUE, FALSE)
highlight_point <- data_vol[data_vol$isSelected==TRUE, ]
plot <- data_vol %>%
ggplot(aes(x = statistic, y = p.value, color = isSelected, label = name)) +
geom_point() +
geom_point(data=highlight_point, size = 3) +
scale_y_continuous(trans = reverselog_trans(10),
breaks = scales::trans_breaks("log10", function(x) 10^x),
labels = scales::trans_format("log10", scales::math_format(10^.x))) +
labs(y = "p-value (10^(-y))") +
ggtitle(paste0(stat_name_selected(), "-", isSelected)) +
scale_color_manual(values=c("#999999", "red"))
session_store$mod4.vol <- ggplotly(plot, source = "mod4_sub_vol") %>%
layout(legend = list(orientation = 'h',
xanchor = "center",
x = 0.5,
y = -0.2,
title = list(text='<b> isSelected </b>')))
session_store$mod4.vol
})
# Module 4: volcano plot - html file
output$mod4_download_plotly_volcano <- downloadHandler(
filename = function() {
paste("data-", Sys.Date(), ".html", sep = "")
},
content = function(file) {
saveWidget(as_widget(session_store$mod4.vol), file, selfcontained = TRUE)
}
)
# Module 4: box/scatter plot
output$mod4.p2 <- renderUI({
d <- event_data("plotly_click", source = "mod4_sub_vol")
if (!is.null(d)) {
download.name <- ifelse(
input$mod4.box.or.scatter == "box",
"download box plot",
"download scatter plot"
)
list(
downloadButton("mod4_download_box_scatter", download.name),
plotOutput("mod4.box.scatter", height = 600)
)
}
})
# Module 4: box/scatter - ui
output$mod4.p.ui <- renderUI({
d <- event_data("plotly_click", source = "mod4_sub_vol")
if (!is.null(d)) {
radioButtons(
"mod4.box.or.scatter",
"Select plot type:",
choices = list("Box" = "box",
"Scatter" = "scatter"),
selected = "scatter"
)
}
})
# Module 4: box/scatter plot
output$mod4.box.scatter <- renderPlot({
# Get the data set
data <- D %>%
maplet:::mti_format_se_samplewise() %>%
tidyr::gather(var, value, dplyr::one_of(rownames(D)))
d <- event_data("plotly_click", source = "mod4_sub_vol")
if (!is.null(d)) {
data_vol <- get_data_by_name(D, "stat_name", "volcano", stat_name_selected())
# set the column curveNumber by color legend
isSelected <- input$mod4_metabolite
data_vol[, "curveNumber"] <- ifelse(data_vol$var==isSelected, 1, 0)
data_vol_true <- data_vol[data_vol$curveNumber==1, ]
data_vol_false <- data_vol[data_vol$curveNumber==0, ]
# By using click info (curveNumber & ponitNumber) to get the metabolite name
metabolite <- ifelse(d$curveNumber == 1,
data_vol_true[d$pointNumber + 1, ]$var[1],
data_vol_false[d$pointNumber + 1, ]$var[1])
term <- data_vol$term[1]
# Filter the data by metabolite name
data <- data[data$var == metabolite, ]
# Treat as categorical or not?
if (input$mod4.categorical) {
data[, term] <- factor(data[, term])
} else {
data[, term] <- as.numeric(data[, term])
}
# Draw the plot
if (input$mod4.box.or.scatter == "scatter") {
plot <- data %>%
ggplot(aes(x = !!sym(term), y = value)) +
geom_point(size = 3) +
geom_smooth(method = "lm", se = T, color = "black") +
ggtitle(metabolite)
} else {
plot <- data %>%
ggplot(aes(x = !!sym(term), y = value)) +
geom_boxplot() +
geom_jitter(size = 3, width = 0.2) +
ggtitle(metabolite)
}
}
session_store$mod4.box.scatter <- if (is.null(plot)) NULL else plot
session_store$mod4.box.scatter
})
# Module 4: scatter/box plot - png file
output$mod4_download_box_scatter <- downloadHandler(
filename = function() {
paste("data-", Sys.Date(), ".png", sep = "")
},
content = function(file) {
device <- function(..., width, height) grDevices::png(..., width = width, height = height, res = 300, units = "in")
ggsave(file, plot = session_store$mod4.box.scatter, device = device)
}
)
# Define rendering logic of outputs in Module-Pathway Results Explorer(coded as mod2) --------------------------------
# Module 2: create reactive inputs list
mod2_input_object <- eventReactive(input$mod2_go,
{c(input$mod2.stat,
input$mod2.plot1,
input$mod2.plot2,
input$mod2.plot3)}
)
# Module 2: store reactive output plots
session_store <- reactiveValues()
# Module 2: plot 1
output$mod2.p1 <- renderUI({
inputs <- mod2_input_object()
switch(
inputs[2],
"bar" = list(downloadButton("download_plotly_bar",
"download bar plot"),
plotlyOutput("mod2.bar", height = 600)),
"null" = NULL
)
})
# Module 2: plot 1 - bar plot
output$mod2.bar <- renderPlotly({
inputs <- mod2_input_object()
plots <- mtm_res_get_entries(D, c("plots", "stats"))
for (i in seq_along(plots)) {
if (plots[[i]]$args$stat_list == inputs[1]) {
plot <- plots[[i]]$output[[1]]
}
}
session_store$mod2.bar <- ggplotly(plot, source = "sub_bar") %>%
layout(legend = list(orientation = 'h', xanchor = "center", x = 0.5, y = -0.3))
# render plotly graph
session_store$mod2.bar
})
# Module 2: plot 1 - bar plot - html file
output$download_plotly_bar <- downloadHandler(
filename = function() {
paste("data-", Sys.Date(), ".html", sep = "")
},
content = function(file) {
# export plotly html widget as a temp file to download.
saveWidget(as_widget(session_store$mod2.bar), file, selfcontained = TRUE)
}
)
# Module 2: plot 2
output$mod2.p2 <- renderUI({
inputs <- mod2_input_object()
d <- event_data("plotly_click", source = "sub_bar")
vol_list <- list(
downloadButton("download_plotly_volcano",
"download volcano plot"),
plotlyOutput("mod2.vol", height = 600)
)
# equalizer/bar -> bar/null -> plot
plot2 <- switch(inputs[3],
"equalizer" = switch(
inputs[2],
"bar" = if (!is.null(d)) {
list(
downloadButton("download_plotly_eq",
"download equalizer plot"),
plotlyOutput("mod2.equal", height = 600)
)
},
"null" = list(
downloadButton("download_plotly_eq",
"download equalizer plot"),
uiOutput("mod2.equal.ui"),
plotlyOutput("mod2.equal", height = 600)
)
),
"volcano" = switch(inputs[2],
"bar" = if (!is.null(d)) {
vol_list
},
"null" = vol_list))
})
# Module 2: plot 2 - volcano plot
output$mod2.vol <- renderPlotly({
inputs <- mod2_input_object()
d <- event_data("plotly_click", source = "sub_bar")
# get the threshold for significance
alpha <- get_threshold_for_p_adj(D, inputs[1])
legend_name <- paste0("p.adj < ", alpha)
if (!is.null(d)) {
# D:SE object, inputs: sidebar value, legend_name: legend name
# d: click info for bar plot, pwvar: SUB_PATWAY/PATTHWAY, alpha: significant value (ex. p.adj < 0.1)
plot <- mod2_plot_vol(D, inputs, legend_name, d, pwvar, alpha)
} else {
plot <- mod2_plot_vol(D, inputs, legend_name, NULL, pwvar, alpha)
}
session_store$mod2.vol <- ggplotly(plot, source = "sub_vol") %>%
layout(legend = list(orientation = 'h',
xanchor = "center",
x = 0.5,
y = -0.2,
title = list(text=paste0('<b> ', legend_name, ' </b>'))))
session_store$mod2.vol
})
# Module 2: plot 2 - volcano plot - html file
output$download_plotly_volcano <- downloadHandler(
filename = function() {
paste("data-", Sys.Date(), ".html", sep = "")
},
content = function(file) {
saveWidget(as_widget(session_store$mod2.vol), file, selfcontained = TRUE)
}
)
# Module 2: plot 2 - equalizer plot - not bar
output$mod2.equal.ui <- renderUI({
inputs <- mod2_input_object()
data_bar <- get_data_by_name(D, "stat_list", "stats", inputs[1])
subpathways <- data_bar$name
selectInput(
"mod2.equal.path",
"Select one pathway name:",
choices = c(unique(unlist(subpathways))),
selected = ""
)
})
# Module 2: plot 2 - equalizer plot
output$mod2.equal <- renderPlotly({
inputs <- mod2_input_object()
# get click info for bar plot
d <- event_data("plotly_click", source = "sub_bar")
# get the threshold for significance
alpha <- get_threshold_for_p_adj(D, inputs[1])
if (inputs[2] == "null") {
# D:SE object, inputs: sidebar value, rd: pathway annotations
# alpha: significant value (ex. p.adj < 0.1), pwvar: SUB_PATWAY/PATTHWAY,
# path_name: pathway name for equalizer plot, d: click info for bar plot
plot <- mod2_plot_eq(D, inputs, rd, alpha, pwvar, input$mod2.equal.path, NULL)
} else {
plot <- mod2_plot_eq(D, inputs, rd, alpha, pwvar, NULL, d)
}
session_store$mod2.eq <- if (is.null(plot)) plotly_empty() else ggplotly(plot, source = "sub_eq")
session_store$mod2.eq
})
# Module 2: plot 2 - equalizer plot - html file
output$download_plotly_eq <- downloadHandler(
filename = function() {
paste("data-", Sys.Date(), ".html", sep = "")
},
content = function(file) {
saveWidget(as_widget(session_store$mod2.eq), file, selfcontained = TRUE)
}
)
# Module 2: plot 3 - box/scatter plot
output$mod2.p3 <- renderUI({
inputs <- mod2_input_object()
d.eq <- event_data("plotly_click", source = "sub_eq")
d.vol <- event_data("plotly_click", source = "sub_vol")
download.name <- ifelse(inputs[4]=="box", "download box plot", "download scatter plot")
plot.list <- list(
downloadButton("download_plotly_box_scatter", download.name),
plotOutput("mod2.box.scatter", height = 600)
)
if (!is.null(d.eq) | !is.null(d.vol)) {
plot.list
}
})
# Module 2: plot 3 - box/scatter plot
output$mod2.box.scatter <- renderPlot({
inputs <- mod2_input_object()
# Get the data set
data <- D %>%
maplet:::mti_format_se_samplewise() %>%
tidyr::gather(var, value, dplyr::one_of(rownames(D)))
# get the click info for bar/equalizer/volcano if available
d.bar <- event_data("plotly_click", source = "sub_bar")
d.eq <- event_data("plotly_click", source = "sub_eq")
d.vol <- event_data("plotly_click", source = "sub_vol")
# get the threshold for significance
alpha <- get_threshold_for_p_adj(D, inputs[1])
plot <- mod2_plot_box_scatter(D, # SE object
inputs, # sidebar inputs
d.bar, # click info for bar plot
d.eq, # click info for equalizer plot
d.vol, # click info for volcano plot
rd, # pathway annotations
pwvar, # pathway annotation column
input$mod2.equal.path, # pathway name if plot2 is "equalizer"
alpha, # significant value (ex. p.adj < 0.1)
input$mod2.categorical, # if treated categorical
data) # data for box/scatter plot
session_store$mod2.box.scatter <- if (is.null(plot)) NULL else plot
session_store$mod2.box.scatter
})
# Module 2: plot 3 - scatter/box plot - html file
output$download_plotly_box_scatter <- downloadHandler(
filename = function() {
paste("data-", Sys.Date(), ".png", sep = "")
},
content = function(file) {
device <- function(..., width, height) grDevices::png(..., width = width, height = height, res = 300, units = "in")
ggsave(file, plot = session_store$mod2.box.scatter, device = device)
}
)
}
# Run the application
shinyApp(ui = ui, server = server)
| /Capstone/Code/app.R | no_license | ZhuXiangNEU/Maplet-Capstone | R | false | false | 75,443 | r | rm(list=ls())
################################################################################
####################load packages, functions and result SE object ##############
################################################################################
# load packages
# devtools::install_github(repo="krumsieklab/maplet@v1.0.1", subdir="maplet")
library(shiny)
library(shinyWidgets)
library(maplet)
library(shinydashboard)
library(shinyjs)
library(tidyverse)
library(DT)
library(plotly)
library(openxlsx)
library(readxl)
library(RColorBrewer)
# refer help functions
source("help_functions.R")
# load SE with fixed name
load("SE.Rdata")
# extract object names from result SE 'D'
obj_name <- get_obj_name(D)
# define pathway annotation column (extracted from corresponding stat_bar
pwvar <- mtm_res_get_entries(D, c("plots", "stats"))[[1]]$args$group_col
# define threshold for significance (extracted from corresponding stat_bar plot)
#alpha <- mtm_res_get_entries(D, c("plots", "stats"))[[1]]$args$feat_filter[[3]]
# get pathway annotations
rd <- get_pathway_annotations(D, pwvar)
################################################################################
########################## Define UI for Shiny application #####################
################################################################################
ui <- fluidPage(
# set appearance customization -------------------------------------------------
theme = "bootstrap.css",
includeCSS("www/style.css"),
setBackgroundColor("#FFFFFF"),# set canvas background color
div(style = "padding: 1px 0px; width: '100%'",
titlePanel(
title = "",
windowTitle = "Maplet"
)
),
# remove shiny "red" warning messages on GUI
tags$style(type="text/css",
".shiny-output-error { visibility: hidden; }",
".shiny-output-error:before { visibility: hidden; }"
),
# adjust tab height
tags$head(
tags$style(HTML(' .navbar {
height: 80px;
min-height:25px !important;
}
.navbar-nav > li > a, .navbar-brand {
padding-top:1px !important;
padding-bottom:1px !important;
height: 80px;
}'))),
navbarPage(
# embed Maplet logo and title
title = div(img(src='logo.png',
style="float:left; margin-top: 5px; padding-right:20px;padding-bottom:5px",
height = 60),
"Krumsiek Lab",
tags$script(HTML("var header = $('.navbar > .container-fluid');header.append('<div style=\"float:right\"><a href=\"https://weill.cornell.edu\"><img src=\"wcm2.png\" alt=\"logo\" style=\"float:right;height:50px;margin-top: 10px; padding-right:1px; \"> </a></div>');console.log(header)")),
windowTitle = "Maplet"),
# sticky tabs while scrolling main panel
position = c("fixed-top"),
# Define layout of Module-Real-Time Pipeline(coded as mod6) ----------------------------------------------------
tabPanel(HTML(paste("Real-Time", "Pipeline", sep = "<br/>")),
# Sidebar layout with input and output definitions ----
dashboardPage(
dashboardHeader(disable = TRUE),
dashboardSidebar(disable = TRUE),
dashboardBody(
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
id = "mod6_panel1",
style = "margin-left: -25px; margin-top: 45px; margin-bottom: 5px; width: 80%; ",
tags$p(
HTML("<b>Real-Time Pipeline</b> starts with original data, creates a pipeline and download it to local."
)),
tags$p(
HTML("Pipeline is constrained to run in order of <b>Data Loading->Preprocessing->Differential Analysis</b> and no section should be skipped."
)),
tags$p(
HTML("The result SE object is dependent on the <b>instant parameters</b>."
)),
# Input: Select a file ----
fileInput("file1", "Uploading File",
multiple = FALSE,
accept = c(".xlsx"),
width = "300px"),
# Input: Checkbox if file has header ----
checkboxInput("header", "Header", TRUE),
tags$hr(),
box(solidHeader = T, collapsible = T, collapsed = TRUE,
title="Sheets for Dimensions", width = "220px",
checkboxInput("mod6_assay_in_row", "Samples in rows?", FALSE),
tags$p(HTML("Assay sheet:")),
uiOutput("mod6_assay_sheet"),
tags$p(HTML("rowData sheet:")),
uiOutput("mod6_rowdata_sheet"),
tags$p(HTML("colData sheet:")),
uiOutput("mod6_coldata_sheet"),
tags$p(HTML("Click to show the original data, but this investigation is not necessary for pipeline.")),
actionButton("mod6_go", "Investigate")
),
tags$hr(),
box(solidHeader = T, collapsible = T, collapsed = TRUE,
title="Data Loading", width = "220px",
tags$p(HTML("ID column in assay:")),
uiOutput("mod6_assay_id_column"),
tags$p(HTML("ID column in rowData:")),
uiOutput("mod6_rowdata_id_column"),
tags$p(HTML("ID column in colData:")),
uiOutput("mod6_coldata_id_column"),
tags$p(HTML("Run to see log text of data loading.")),
actionButton("mod6_go_load", "Run", width = "110px")
),
tags$hr(),
box(solidHeader = T, collapsible = T, collapsed = TRUE,
title="Preprocessing", width = "220px",
tags$p(HTML("Max % missingness per feature:")),
numericInput("mod6_filter_feat_max", label = NULL,
value = 1,
min = 0,
max = 1,
step = 0.1,
width = "220px"),
tags$p(HTML("Max % missingness per feature (normalization):")),
numericInput("mod6_feat_max_norm", label = NULL,
value = 1,
min = 0,
max = 1,
step = 0.1,
width = "220px"),
tags$p(HTML("Max % missingness per sample:")),
numericInput("mod6_filter_sample_max", label = NULL,
value = 1,
min = 0,
max = 1,
step = 0.1,
width = "220px"),
tags$p(HTML("Sample coloring column:")),
uiOutput("mod6_pre_sample_color_column"),
tags$p(HTML("Batch column:")),
uiOutput("mod6_pre_batch_column"),
tags$p(HTML("PCA/UMAP coloring column:")),
uiOutput("mod6_pre_pca_color_column"),
tags$p(HTML("Heatmap annotation column:")),
uiOutput("mod6_pre_heatmap_anno_column"),
tags$p(HTML("Heatmap annotation row:")),
uiOutput("mod6_pre_heatmap_anno_row"),
tags$p(HTML("Run to see log text of data loading and preprocessing. This step may cost a few seconds to run.")),
actionButton("mod6_go_preprocess", "Run", width = "110px")
),
tags$hr(),
box(solidHeader = T, collapsible = T, collapsed = TRUE,
title="Differential Analysis", width = "220px",
tags$p(HTML("Outcome variable:")),
uiOutput("mod6_outcome"),
checkboxInput("mod6_outcome_binary", "Binary outcome?", FALSE),
tags$p(HTML("Type of analysis:")),
selectInput("mod6_analysis_type", label = NULL,
width = "220px",
choices = c("lm","pearson","spearman","kendall"),
selected = "lm"),
tags$p(HTML("Multiple testing correction:")),
selectInput("mod6_mult_test_method", label = NULL,
width = "220px",
choices = c("BH","bonferroni","BY"),
selected = "BH"),
tags$p(HTML("Significance threshold:")),
numericInput("mod6_sig_threshold", label = NULL,
value = 0.05,
min = 0,
max = 1,
step = 0.01,
width = "220px"),
tags$p(HTML("Pathway aggregation in barplot:")),
uiOutput("mod6_group_col_barplot"),
tags$p(HTML("Barplot coloring column:")),
uiOutput("mod6_color_col_barplot"),
tags$p(HTML("Run to see log text of data loading, preprocessing and differential analysis. This step may cost a few seconds to run.")),
actionButton("mod6_go_differ", "Run", width = "110px")
)
),
# Main panel for displaying outputs ----
mainPanel(
id = "mod6_panel2",
style = "overflow-y: auto; max-height: 100vh; position: absolute; left: 28%",
br(),
br(),
br(),
# Output: Data file ----
tags$p(HTML("Downloading SE.Rdata may cost more than one minute. Please wait for the prompt.")),
downloadButton("download_se", "Download result SE .Rdata"),
br(),
br(),
uiOutput("mod6_main_panel")
)
)
)
)
),
# Define layout of Module-Annotations Explorer(coded as mod5) ----------------------------------------------------
tabPanel(HTML(paste("Annotations", "Explorer", sep = "<br/>")),
sidebarLayout(
sidebarPanel(id = "mod5_panel1",
# sidebar autoscroll with main panel
style = "margin-left: -25px; margin-top: 45px; margin-bottom: 5px; position:fixed; width: 20%; height: 100%;",
tags$p(
HTML("<b>Annotations Explorer</b> creates tables, distribution plots, or other graphics to explore the SE object."
)),
radioButtons("mod5_dimension", "Select one dimension:",
choices = list("Column Data" = "col",
"Row Data" = "row")
),
br(),
uiOutput("mod5_dimension_ui"),
br(),
tags$p(
HTML("<b>Hint:<br></b>Outputs are delayed untill you click 'UPDATE' button after selection."
)),
br(),
# delay the output
actionButton("mod5_go", "Update")
),
mainPanel(id = "mod5_panel2",
br(),
br(),
br(),
style = "overflow-y: auto; position: absolute; left: 25%",
uiOutput("mod5_output_ui")
)
)
),
# Define layout of Module-2D Projection(coded as mod3) ----------------------------------------------------
tabPanel(HTML(paste("2D", "Projection", sep = "<br/>")),
sidebarLayout(
sidebarPanel(id = "mod3_panel1",
# sidebar autoscroll with main panel
style = "margin-left: -25px; margin-top: 45px; margin-bottom: 5px; position:fixed; width: 20%; height: 100%;",
tags$p(
HTML("<b>2D Projection</b> generates an interactive 2D projection of PCA/UMAP."
)),
tags$p(
HTML("It displays a drop-down menu of all colData columns for coloring."
)),
# select one plot type
radioButtons("mod3_select_plot", "Select one plot type:",
choices = list("PCA" = "pca",
"UMAP" = "umap")
),
# function argument
uiOutput("mod3_pca_data"),
# select coloring colData and factor it
uiOutput("mod3_plot_argument"),
br(),
tags$p(
HTML("<b>Hint:<br></b>Outputs are delayed untill you click 'UPDATE' button after selection."
)),
br(),
# delay the output
actionButton("mod3_go", "Update")
),
mainPanel(id = "mod3_panel2",
br(),
br(),
br(),
style = "overflow-y: auto; position: absolute; left: 25%",
# plotly
downloadButton("mod3_download_plotly", "download plotly"),
plotlyOutput('mod3_plot', height = 700)
)
)
),
# Define layout of Module-All Results Explorer(coded as mod1) ----------------------------------------------------
tabPanel(HTML(paste("All Results", "Explorer", sep = "<br/>")),
sidebarLayout(
sidebarPanel(id = "mod1_panel1",
# sidebar auto-scrolling with main panel
style = "margin-left: -25px; margin-top: 45px; margin-bottom: 5px; position:fixed; width: 20%; height: 100%;",
tags$p(
HTML("<b>All Results Explorer</b> extracts all the result objects one at a time."
)),
tags$p(
HTML("Users can assess results in a drop-down menu that offers a list of a stat_name and a plot type (e.g. missingness, pval)."
)),
br(),
# select plot type or stats table
radioButtons("mod1_radio", "Select output type:",
choices = list("Plot" = "plots",
"Table" = "stats"),
selected = "stats"
),
br(),
# define one UI object to select stat_name
uiOutput("mod1_select_statname_ui"),
br(),
# define one UI object to select output type
uiOutput("mod1_select_object_ui"),
br(),
tags$p(
HTML("<b>Hint:<br></b>Outputs are delayed untill you click 'UPDATE' button after selection. Some plots such as box plot or multiple plots may cost dozens of seconds to show up."
)),
# delay the output
actionButton("mod1_go", "Update")
),
mainPanel(id = "mod1_panel2",
# scrollable panel
style = "overflow-y: auto; position: absolute; left: 25%",
br(),
br(),
br(),
# dynamic number of plots
uiOutput('mod1_output')
)
)
),
# Define layout of Module-Feature Results Explorer(coded as mod4) --------------------------------------------------
tabPanel(HTML(paste("Feature Results", "Explorer", sep = "<br/>")),
sidebarLayout(
sidebarPanel(
id = "mod4_panel1",
# sidebar autoscroll with main panel
style = "margin-left: -25px; margin-top: 45px; margin-bottom: 5px; position:fixed; width: 20%; height: 100%;",
tags$p(
HTML(
"<b>Feature Results Explorer</b> requires collection on all statistical results in a table given one metabolite name."
)
),
tags$p(
HTML(
"When clicking on one row, it should display interactive plots following the same orders in Module 2."
)
),
# select one metabolite
selectInput(
"mod4_metabolite",
"Select one metabolite:",
width = "220px",
choices = arrange(mtm_res_get_entries(D, c("stats", "univ"))[[1]]$output$table, var)$var,
selected = ""
),
br(),
checkboxInput("mod4.categorical",
"Treat as categorical",
value = FALSE
),
br(),
tags$p(
HTML("<b>Hint:<br></b>Outputs are delayed untill you click 'UPDATE' button after selection."
)),
# delay the output
actionButton("mod4_go", "Update")
),
mainPanel(
id = "mod4_panel2",
br(),
br(),
br(),
style = "overflow-y: auto; position: absolute; left: 25%",
# stats table
dataTableOutput('mod4_table'),
br(),
br(),
# volcano plotly
uiOutput("mod4.p1"),
br(),
br(),
# box/scatter plotly
uiOutput("mod4.p.ui"),
uiOutput("mod4.p2")
)
)
),
# Define layout of Module-Pathway Results Explorer(coded as mod2) ----------------------------------------------------
tabPanel(HTML(paste("Pathway Results", "Explorer", sep = "<br/>")),
sidebarLayout(
sidebarPanel(
id = "mod2_panel1",
# sidebar auto-scrolling with main panel
style = "margin-left: -25px; margin-top: 45px; margin-bottom: 5px; position:fixed; width: 20%; height: 100%;",
tags$p(
HTML(
"<b>Pathway Results Explorer:</b> Displays a series of interactive plots at different granularities given a SE and a statname."
)
),
tags$p(
HTML(
"StatsBar plot -> Equalizer/Volcano plot -> Box/Scatter plot."
)
),
br(),
selectInput(
"mod2.stat",
"Select one stat name:",
choices = distinct(obj_name[obj_name$V1 == "plots" &
obj_name$V2 == "stats",],
stat_name)$stat_name
),
br(),
radioButtons(
"mod2.plot1",
"Select plot1 type:",
choices = list("Bar" = "bar",
"Not Bar plot" = "null"),
selected = "bar"
),
br(),
radioButtons(
"mod2.plot2",
"Select plot2 type:",
choices = list("Equalizer" = "equalizer",
"Volcano" = "volcano"),
selected = "volcano"
),
tags$hr(),
radioButtons(
"mod2.plot3",
"Select plot3 type:",
choices = list("Box" = "box",
"Scatter" = "scatter"),
selected = "scatter"
),
checkboxInput("mod2.categorical",
"Treat as categorical",
value = FALSE
),
br(),
tags$p(
HTML("<b>Hint:<br></b>Outputs are delayed untill you click 'UPDATE' button after selection."
)),
actionButton("mod2_go", "Update")
),
mainPanel(
id = "mod2_panel2",
style = "overflow-y: auto; position: absolute; left: 25%",
br(),
br(),
br(),
# Bar plot or not
uiOutput("mod2.p1"),
br(),
# equalizer or volcano
uiOutput("mod2.p2"),
br(),
# box or scatter
uiOutput("mod2.p3"),
br()
)
))
)
)
################################################################################
################ Define server logic required to draw outputs ##################
################################################################################
server <- function(input, output) {
# Define rendering logic of control widgets in Module-Real-Time Pipeline (coded as mod6)----------------------
# control widget of selecting file
output$mod6_assay_sheet <- renderUI({
req(input$file1)
selectInput("assay_sheet", label = NULL,
width = "220px",
choices = getSheetNames(as.character(input$file1$datapath))
)
})
# control widget of dimensions
df_assay <- reactive({
read_excel(as.character(input$file1$datapath),
col_names = input$header,
sheet=input$assay_sheet)
})
output$mod6_rowdata_sheet <- renderUI({
req(input$file1)
selectInput("rowdata_sheet", label = NULL,
width = "220px",
choices = getSheetNames(as.character(input$file1$datapath))
)
})
df_rowdata <- reactive({
read_excel(as.character(input$file1$datapath),
col_names = input$header,
sheet=input$rowdata_sheet)
})
output$mod6_coldata_sheet <- renderUI({
req(input$file1)
selectInput("coldata_sheet", label = NULL,
width = "220px",
choices = getSheetNames(as.character(input$file1$datapath))
)
})
df_coldata <- reactive({
read_excel(as.character(input$file1$datapath),
col_names = input$header,
sheet=input$coldata_sheet)
})
# control widget of data loading
output$mod6_assay_id_column <- renderUI({
selectInput("assay_id_column", label = NULL,
width = "220px",
choices = colnames(df_assay())
)
})
output$mod6_rowdata_id_column <- renderUI({
selectInput("rowdata_id_column", label = NULL,
width = "220px",
choices = colnames(df_rowdata())
)
})
output$mod6_coldata_id_column <- renderUI({
selectInput("coldata_id_column", label = NULL,
width = "220px",
choices = colnames(df_coldata())
)
})
# control widget of preprocessing
output$mod6_pre_sample_color_column <- renderUI({
selectInput("pre_sample_color_column", label = NULL,
width = "220px",
choices = colnames(df_coldata())
)
})
output$mod6_pre_batch_column <- renderUI({
selectInput("pre_batch_column", label = NULL,
width = "220px",
selected=NULL,
choices = colnames(df_coldata())
)
})
output$mod6_pre_pca_color_column <- renderUI({
selectInput("pre_pca_color_column", label = NULL,
width = "220px",
multiple=TRUE,
selected=NULL,
choices = colnames(df_coldata())
)
})
output$mod6_pre_heatmap_anno_column <- renderUI({
selectInput("pre_heatmap_anno_column", label = NULL,
width = "220px",
multiple=TRUE,
selected=NULL,
choices = colnames(df_coldata())
)
})
output$mod6_pre_heatmap_anno_row <- renderUI({
selectInput("pre_heatmap_anno_row", label = NULL,
width = "220px",
multiple=TRUE,
selected=NULL,
choices = colnames(df_rowdata())
)
})
# control widget of differential analysis
output$mod6_outcome <- renderUI({
selectInput("outcome", label = NULL,
width = "220px",
choices = colnames(df_coldata())
)
})
output$mod6_group_col_barplot <- renderUI({
selectInput("group_col_barplot", label = NULL,
width = "220px",
selected=NULL,
choices = colnames(df_rowdata())
)
})
output$mod6_color_col_barplot <- renderUI({
selectInput("color_col_barplot", label = NULL,
width = "220px",
selected=NULL,
choices = colnames(df_rowdata())
)
})
# Define rendering logic of outputs in Module-Real-Time Pipeline(coded as mod6) ------------------------------
# record the file path of selected file
mod6_filepath <-
eventReactive(input$mod6_go, ## delayed output
{c(input$file1$datapath)
})
# print table when clicking "investigate" button
observeEvent(input$mod6_go,{
output$mod6_main_panel <- renderUI({
list(dataTableOutput("mod6_assay"),
br(),br(),
dataTableOutput("mod6_rowdata"),
br(),br(),
dataTableOutput("mod6_coldata"))
})
})
# render logic of the table
output$mod6_assay <- renderDataTable({
table <- read_excel(as.character(mod6_filepath()),
col_names = input$header,
sheet=input$assay_sheet)
datatable(table,
caption="Original Assay Data",
options = list(
# limit number of rows
pageLength = 10,
lengthMenu = c(10, 20, 50),
autoWidth = TRUE
))
})
output$mod6_rowdata <- renderDataTable({
table <- read_excel(as.character(mod6_filepath()),
col_names = input$header,
sheet=input$rowdata_sheet)
datatable(table,
caption="Original rowData",
options = list(
# limit number of rows
pageLength = 10,
lengthMenu = c(10, 20, 50),
autoWidth = TRUE
))
})
output$mod6_coldata <- renderDataTable({
table <- read_excel(as.character(mod6_filepath()),
col_names = input$header,
sheet=input$coldata_sheet)
datatable(table,
caption="Original colData",
options = list(
# limit number of rows
pageLength = 10,
lengthMenu = c(10, 20, 50),
autoWidth = TRUE
))
})
observeEvent(input$mod6_go_load,{
# define main panel for loading section
output$mod6_main_panel <- renderUI({
tagAppendAttributes(verbatimTextOutput("log_load"),
style="white-space:pre-wrap;")
})
})
observeEvent(input$mod6_go_preprocess,{
# define main panel for preprocessing section
output$mod6_main_panel <- renderUI({
tagAppendAttributes(verbatimTextOutput("log_preprocess"),
style="white-space:pre-wrap;")
})
})
observeEvent(input$mod6_go_differ,{
# define main panel of differential analysis
output$mod6_main_panel <- renderUI({
tagAppendAttributes(verbatimTextOutput("log_differ"),
style="white-space:pre-wrap;")
})
})
# get loading SE
D_load <- reactive({
## loading D
file_data <- as.character(input$file1$datapath)
D <-
mt_load_xls(file=file_data,
sheet=input$assay_sheet,
samples_in_row=input$mod6_assay_in_row,
id_col=input$assay_id_column) %>%
mt_anno_xls(file=file_data,
sheet=input$rowdata_sheet,
anno_type="features",
anno_id_col=input$rowdata_id_column,
data_id_col = "name") %>%
mt_anno_xls(file=file_data,
sheet=input$coldata_sheet,
anno_type="samples",
anno_id_col =input$coldata_id_column,
data_id_col ="sample") %>%
mt_reporting_data() %>%
{.}
## return D
D
})
# get proprocessing SE
D_preprocess <- reactive({
## preprocessing D
D <- D_load() %>%
mt_reporting_heading(heading = "Preprocessing", lvl=1) %>%
mt_reporting_heading(heading = "Filtering", lvl = 2) %>%
mt_plots_missingness(feat_max=input$mod6_filter_feat_max,samp_max = input$mod6_filter_sample_max) %>%
mt_pre_filter_missingness(feat_max = input$mod6_filter_feat_max, samp_max = input$mod6_filter_sample_max) %>%
mt_plots_missingness(feat_max=input$mod6_filter_feat_max, samp_max = input$mod6_filter_sample_max) %>%
mt_anno_missingness(anno_type = "samples", out_col = "missing") %>%
mt_anno_missingness(anno_type = "features", out_col = "missing") %>%
mt_reporting_heading(heading = "Normalization", lvl = 2) %>%
mt_plots_sample_boxplot(color=!!sym(input$pre_sample_color_column), title = "Original", plot_logged = T) %>%
{.}
if(!is.null(input$pre_batch_column)){
D %<>%
mt_pre_batch_median(batch_col = input$pre_batch_column)
}
D <- D %>%
mt_plots_sample_boxplot(color=!!sym(input$pre_sample_color_column), title = "After batch correction", plot_logged = T) %>%
mt_pre_norm_quot(feat_max = input$mod6_feat_max_norm) %>%
mt_plots_dilution_factor(in_col=input$pre_sample_color_column) %>%
mt_plots_sample_boxplot(color=!!sym(input$pre_sample_color_column), title = "After normalization", plot_logged = T) %>%
mt_pre_trans_log() %>%
mt_pre_impute_knn() %>%
mt_plots_sample_boxplot(color=!!sym(input$pre_sample_color_column), title = "After imputation", plot_logged = T) %>%
mt_pre_outlier_detection_univariate() %>%
mt_reporting_data() %>%
mt_reporting_heading(heading = "Global Statistics", lvl = 1) %>%
{.}
## add PCA/UMAP plots
lapply(input$pre_pca_color_column, function(x){
D <<- D %>%
mt_plots_pca(scale_data = T, title = sprintf("scaled PCA - %s",x), color=!!sym(x), size=2.5, ggadd=scale_size_identity()) %>%
mt_plots_umap(scale_data = T, title = sprintf("scaled UMAP - %s",x), color=!!sym(x), size=2.5, ggadd=scale_size_identity()) %>%
{.}
}) %>% invisible
## add heatmap
D %<>%
mt_plots_heatmap(scale_data = T, annotation_col = input$pre_heatmap_anno_column, annotation_row = input$pre_heatmap_anno_row,
clustering_method = "ward.D2", fontsize = 5, cutree_rows = 3, cutree_cols = 3, color=gplots::bluered(101)) %>%
{.}
## return D
D
})
## get differential analysis SE
D_differ <- reactive({
# Differential analysis D
D <- D_preprocess() %>%
mt_reporting_heading(heading = "Statistical Analysis", lvl = 1) %>%
diff_analysis_func(var=input$outcome,
binary=input$mod6_outcome_binary,
analysis_type=input$mod6_analysis_type,
mult_test_method=input$mod6_mult_test_method,
alpha=input$mod6_sig_threshold,
group_col_barplot=input$group_col_barplot,
color_col_barplot=input$color_col_barplot) %>%
{.}
## return D
D
})
# render logic of the log text of data loading
output$log_load <- renderPrint({
get_log_text(D_load())
})
# render logic of the log text of preprocessing
output$log_preprocess <- renderPrint({
# loading log
text_load <- get_log_text(D_load())
# preprocessing log
text_preprocess <- get_log_text(D_preprocess())
# paste log text
str <- paste(text_load, text_preprocess, sep = "\n")
cat(str)
})
# render logic of the log text of differential analysis
output$log_differ <- renderPrint({
# loading log
text_load <- get_log_text(D_load())
# preprocessing log
text_preprocess <- get_log_text(D_preprocess())
# differential analysis log
text_differ <- get_log_text(D_differ())
# paste log text
str <- paste(text_load, text_preprocess, text_differ, sep = "\n")
cat(str)
})
# download SE button
# https://mastering-shiny.org/action-transfer.html
output$download_se <- downloadHandler(
filename = function() {
paste0("SE_", Sys.Date(), ".Rdata")
},
content = function(fname) {
## loading D
file_data <- as.character(input$file1$datapath)
D <-
mt_load_xls(file=file_data,
sheet=input$assay_sheet,
samples_in_row=input$mod6_assay_in_row,
id_col=input$assay_id_column) %>%
mt_anno_xls(file=file_data,
sheet=input$rowdata_sheet,
anno_type="features",
anno_id_col=input$rowdata_id_column,
data_id_col = "name") %>%
mt_anno_xls(file=file_data,
sheet=input$coldata_sheet,
anno_type="samples",
anno_id_col =input$coldata_id_column,
data_id_col ="sample") %>%
mt_reporting_data() %>%
{.}
## preprocessing D
D <- D %>%
mt_reporting_heading(heading = "Preprocessing", lvl=1) %>%
mt_reporting_heading(heading = "Filtering", lvl = 2) %>%
mt_plots_missingness(feat_max=input$mod6_filter_feat_max,samp_max = input$mod6_filter_sample_max) %>%
mt_pre_filter_missingness(feat_max = input$mod6_filter_feat_max, samp_max = input$mod6_filter_sample_max) %>%
mt_plots_missingness(feat_max=input$mod6_filter_feat_max, samp_max = input$mod6_filter_sample_max) %>%
mt_anno_missingness(anno_type = "samples", out_col = "missing") %>%
mt_anno_missingness(anno_type = "features", out_col = "missing") %>%
mt_reporting_heading(heading = "Normalization", lvl = 2) %>%
mt_plots_sample_boxplot(color=!!sym(input$pre_sample_color_column), title = "Original", plot_logged = T) %>%
{.}
if(!is.null(input$pre_batch_column)){
D %<>%
mt_pre_batch_median(batch_col = input$pre_batch_column)
}
D <- D %>%
mt_plots_sample_boxplot(color=!!sym(input$pre_sample_color_column), title = "After batch correction", plot_logged = T) %>%
mt_pre_norm_quot(feat_max = input$mod6_feat_max_norm) %>%
mt_plots_dilution_factor(in_col=input$pre_sample_color_column) %>%
mt_plots_sample_boxplot(color=!!sym(input$pre_sample_color_column), title = "After normalization", plot_logged = T) %>%
mt_pre_trans_log() %>%
mt_pre_impute_knn() %>%
mt_plots_sample_boxplot(color=!!sym(input$pre_sample_color_column), title = "After imputation", plot_logged = T) %>%
mt_pre_outlier_detection_univariate() %>%
mt_reporting_data() %>%
mt_reporting_heading(heading = "Global Statistics", lvl = 1) %>%
{.}
## add PCA/UMAP plots
lapply(input$pre_pca_color_column, function(x){
D <<- D %>%
mt_plots_pca(scale_data = T, title = sprintf("scaled PCA - %s",x), color=!!sym(x), size=2.5, ggadd=scale_size_identity()) %>%
mt_plots_umap(scale_data = T, title = sprintf("scaled UMAP - %s",x), color=!!sym(x), size=2.5, ggadd=scale_size_identity()) %>%
{.}
}) %>% invisible
## add heatmap
D %<>%
mt_plots_heatmap(scale_data = T, annotation_col = input$pre_heatmap_anno_column, annotation_row = input$pre_heatmap_anno_row,
clustering_method = "ward.D2", fontsize = 5, cutree_rows = 3, cutree_cols = 3, color=gplots::bluered(101)) %>%
{.}
# Differential analysis D
D <- D %>%
mt_reporting_heading(heading = "Statistical Analysis", lvl = 1) %>%
diff_analysis_func(var=input$outcome,
binary=input$mod6_outcome_binary,
analysis_type=input$mod6_analysis_type,
mult_test_method=input$mod6_mult_test_method,
alpha=input$mod6_sig_threshold,
group_col_barplot=input$group_col_barplot,
color_col_barplot=input$color_col_barplot) %>%
{.}
# write Rdata to local
save(D, file=fname)
}
)
# Define rendering logic of control widgets in Module-Annotations Explorer(coded as mod5) ----------------------
output$mod5_dimension_ui <- renderUI({
switch(input$mod5_dimension,
"col"=list(selectInput("mod5_var1_select",
"Select the primary variable:",
choices = names(colData(D)),
selected = "Age",
width = "220px"),
checkboxInput("mod5_var1_type",
"Continuous",
value = TRUE),
tags$hr(),
selectInput("mod5_var2_select",
"Select the secondary variable:",
choices = names(colData(D)),
selected = "sample",
width = "220px"),
checkboxInput("mod5_var2_type",
"Continuous",
value = TRUE),
tags$hr(),
selectInput("mod5_select_hover",
"Select hovering text:",
choices = names(colData(D)),
selected = names(colData(D))[1],
width = "220px",
multiple=TRUE)
),
"row"=selectInput("mod5_rowdata_plot",
"Select one plot for row data:",
choices = c("SUPER_PATHWAY"),
width = "220px")
)
})
output$mod5_output_ui <- renderUI({
switch(input$mod5_dimension,
"col"=list(downloadButton("mod5_download_plotly", "download plotly"),
plotlyOutput('mod5_plot', height = 600)),
"row"=list(fluidRow(
splitLayout(style = "border: 1px", cellWidths = c(1000, 1000),
downloadButton("mod5_download_plotly", "download plotly"),
downloadButton("mod5_download_plotly2", "download plotly")
)
),
fluidRow(
splitLayout(style = "height:600px; border: 1px", cellWidths = c(1000, 1000),
plotlyOutput('mod5_plot', height = 600),
plotlyOutput('mod5_plot2', height = 600)
)
))
)
})
# Define rendering logic of outputs in Module-Annotations Explorer(coded as mod5) ------------------------------
mod5_input <- eventReactive(input$mod5_go,{
c(input$mod5_var1_select,
input$mod5_var1_type,
input$mod5_var2_select,
input$mod5_var2_type,
input$mod5_rowdata_plot)
})
output$mod5_plot <- renderPlotly({
session_store$mod5_plotly <- switch(input$mod5_dimension,
"col"=
if(mod5_input()[2]==TRUE & mod5_input()[4]==TRUE){
mod5_scatter(D, x=mod5_input()[3],
y=mod5_input()[1],
hover = input$mod5_select_hover)
} else if(mod5_input()[2]==TRUE & mod5_input()[4]==FALSE) {
mod5_boxplot(D, x=mod5_input()[3],
x_cate = FALSE,
y=mod5_input()[1],
y_cate = TRUE,
fill=mod5_input()[3],
hover=input$mod5_select_hover)
} else if(mod5_input()[2]==FALSE & mod5_input()[4]==TRUE) {
mod5_boxplot(D, x=mod5_input()[1],
x_cate = FALSE,
y=mod5_input()[3],
y_cate = TRUE,
fill=mod5_input()[1],
hover=input$mod5_select_hover)
} else {
mod5_barplot(D, x=mod5_input()[3],
fill=mod5_input()[1],
hover = input$mod5_select_hover)
},
"row"=
rowData(D) %>%
data.frame %>%
dplyr::rename(var=mod5_input()[5]) %>%
dplyr::group_by(var) %>%
dplyr::summarise(count=n()) %>%
plot_ly(labels = ~var,
values = ~count,
type = 'pie',
textposition = 'inside',
source="mod5-click",
title="<b>Distribution of Super Pathway</b>") %>%
layout(autosize = F, width = 1000, height = 500,
uniformtext=list(minsize=12, mode='hide'),
legend = list(x = 1,
y = .5,
tracegroupgap = 5)
)
)
session_store$mod5_plotly
}
)
# download button
output$mod5_download_plotly <- downloadHandler(
filename = function() {
paste("data-", Sys.Date(), ".html", sep = "")
},
content = function(file) {
saveWidget(as_widget(session_store$mod5_plotly), file, selfcontained = TRUE)
}
)
## to see the stored data of clicking
# output$info <- renderPrint({
# d5 <- event_data("plotly_click", source = "mod5-click")
# if(!is.null(d5)){
# d5
# }
# })
output$mod5_plot2 <- renderPlotly({
d5 <- event_data("plotly_click", source = "mod5-click")
pie_dat <- as.data.frame(rowData(D))
if (!is.null(d5)){
lvls <- rev(pie_dat$SUPER_PATHWAY)
label <- lvls[round(as.numeric(d5$pointNumber))+1]
session_store$mod5_plot2 <-
pie_dat[pie_dat$SUPER_PATHWAY == label, ] %>%
dplyr::rename(var="SUB_PATHWAY") %>%
dplyr::group_by(var) %>%
dplyr::summarise(count=n()) %>%
plot_ly(labels = ~var,
values = ~count,
type = 'pie',
textposition = 'inside',
title=paste0("<b>Distribution of Sub Pathway in Specified Super Pathway - </b>", label)
) %>%
layout(autosize = F, width = 1000, height = 500,
uniformtext=list(minsize=12, mode='hide'),
legend = list(x = 1,
y = .5,
tracegroupgap = 5)
)
session_store$mod5_plot2
}
})
# download button
output$mod5_download_plotly2 <- downloadHandler(
filename = function() {
paste("data-", Sys.Date(), ".html", sep = "")
},
content = function(file) {
saveWidget(as_widget(session_store$mod5_plotly2), file, selfcontained = TRUE)
}
)
# Define rendering logic of control widgets in Module-Annotations Explorer(coded as mod3) ------------------------
output$mod3_pca_data <- renderUI({
if(input$mod3_select_plot=="pca"){
selectInput("mod3_pca_data_type", "Select data type for PCA:",
width = "220px",
choices = c("scores", "loadings"),
selected = "scores"
)
} else {
NULL
}
})
# create intermediate var to indicate coloring widgets
inter_var <- reactive({
if (input$mod3_select_plot=="pca" & input$mod3_pca_data_type=="scores") {
"pca-scores"
} else if(input$mod3_select_plot=="pca" & input$mod3_pca_data_type=="loadings"){
"pca-loadings"
} else {
"umap"
}
})
# create reactive plotting argument for PCA/UMAP
output$mod3_plot_argument <- renderUI({
switch(
inter_var(),
"pca-scores"=list(
checkboxInput("mod3_scale_data", "Scaled data",
value = TRUE
),
selectInput("mod3_select_colData",
"Select one coloring variable:",
choices = names(colData(D)),
selected = "BOX.NUMBER",
width = "220px"
),
checkboxInput("mod3_checkbox_factor",
"Categorical Coloring",
value = FALSE
),
selectInput("mod3_select_hover",
"Select hovering text:",
# selectInput coerces its output to character
# https://github.com/rstudio/shiny/issues/2367
# choices = setNames(seq_along(colData(D)), names(colData(D))),
choices = names(colData(D)),
selected = "sample",
width = "220px",
multiple=TRUE
)
),
"pca-loadings"=list(
checkboxInput("mod3_scale_data", "Scaled data",
value = TRUE
),
selectInput("mod3_select_colData",
"Select one coloring variable:",
choices = names(rowData(D)),
selected = "SUPER_PATHWAY",
width = "220px"
),
checkboxInput("mod3_checkbox_factor",
"Categorical Coloring",
value = FALSE
),
selectInput("mod3_select_hover",
"Select hovering text:",
# choices = setNames(seq_along(rowData(D)), names(rowData(D))),
choices = names(rowData(D)),
selected = "name",
width = "220px",
multiple=TRUE
)
),
"umap"=list(numericInput("mod3_umap_n_neighbors",
"Number of neighbors for UMAP:",
value = 15,
width = "220px"
),
checkboxInput("mod3_scale_data", "Scaled data",
value = TRUE
),
selectInput("mod3_select_colData",
"Select one coloring variable:",
choices = names(colData(D)),
selected = "BOX.NUMBER",
width = "220px"
),
checkboxInput("mod3_checkbox_factor",
"Categorical Coloring",
value = FALSE
),
selectInput("mod3_select_hover",
"Select hovering text:",
# choices = setNames(seq_along(colData(D)), names(colData(D))),
choices = names(colData(D)),
selected = "sample",
width = "220px",
multiple=TRUE
)
)
)
})
# create reactive inputs list
mod3_input_object <- eventReactive(input$mod3_go,
{c(input$mod3_select_plot,
input$mod3_select_colData,
input$mod3_scale_data,
input$mod3_checkbox_factor,
input$mod3_pca_data_type,
input$mod3_umap_n_neighbors)}
)
# Define rendering logic of outputs in Module-Annotations Explorer(coded as mod3) --------------------------------
# render pca/umap of mod3
output$mod3_plot <- renderPlotly({
session_store$mod3_plotly <- if (mod3_input_object()[1]=="pca"){
mod3_plots_pca(D = D,
scale_data = mod3_input_object()[3],
color = mod3_input_object()[2],
categorizing=mod3_input_object()[4],
data_type = mod3_input_object()[5],
hover = input$mod3_select_hover
)
} else {
mod3_plots_umap(D = D,
scale_data = mod3_input_object()[3],
color = mod3_input_object()[2],
categorizing=mod3_input_object()[4],
n_neighbors = as.numeric(mod3_input_object()[6]),
hover = input$mod3_select_hover
)
}
session_store$mod3_plotly
})
# download button
output$mod3_download_plotly <- downloadHandler(
filename = function() {
paste("data-", Sys.Date(), ".html", sep = "")
},
content = function(file) {
saveWidget(as_widget(session_store$mod3_plotly), file, selfcontained = TRUE)
}
)
# Define rendering logic of control widgets in Module-All Results Explorer(coded as mod1) ------------------------
# create stat_name list dependent on radio button
output$mod1_select_statname_ui <- renderUI({
selectInput("mod1_select_statname", "Select one stat name:",
width = "220px",
choices = dplyr::distinct(obj_name[obj_name$V1==input$mod1_radio, ], stat_name)$stat_name
)
})
# create object list dependent on radio button and stat_name
output$mod1_select_object_ui <- renderUI({
if (input$mod1_radio=="stats"){
NULL
} else {
selectInput("mod1_select_object", "Select one object:",
width = "220px",
choices = dplyr::distinct(obj_name[obj_name$stat_name==input$mod1_select_statname&obj_name$V1==input$mod1_radio, ], V2)$V2
)
}
})
# create indicator of box plot output
box_switch <- reactive({
if (input$mod1_select_object=="box"){
"box_plot"
} else {
"non_box_plot"
}
})
## get the order of selected stat_name
ord <- reactive({
# assign a data frame of all the object names of box plots
# filter() cannot run in Shiny, use subset() instead
box_obj_name <- subset(obj_name, V1=="plots"&V2=="box")
box_output_order <- box_obj_name %>%
dplyr::mutate(order=seq(from=1, to=n()))
if(input$mod1_select_statname %in% box_output_order$stat_name){
box_output_order[box_output_order$stat_name==input$mod1_select_statname, ]$order
} else {
1
}
})
# create reactive inputs list
mod1_input_object <- eventReactive(input$mod1_go, ## delayed output
{c(input$mod1_radio,
input$mod1_select_statname,
input$mod1_select_object)}
)
# Define rendering logic of outputs in Module-All Results Explorer(coded as mod1) --------------------------------
# Insert the right number of plot output objects into UI
output$mod1_output_plot <- renderUI({
## limit plots to specified stat_name
obj_name <- subset(obj_name, V1==mod1_input_object()[1])
obj_name <- subset(obj_name, V2==mod1_input_object()[3])
output_order <- obj_name %>%
dplyr::mutate(order=seq(from=1, to=n()))
output_order <- subset(output_order, stat_name==mod1_input_object()[2])
plots <- list()
for(plot_i in seq_along(output_order$order)){
plots[[plot_i]] <- mtm_res_get_entries(D, c(mod1_input_object()[1], mod1_input_object()[3]))[[output_order$order[plot_i]]]
}
# there are multiple plots
len_i <- length(plots)
# some plots have multiple objects
len_j <- length(plots[[1]]$output)
# name every plot object in UI
mod1_plot_output_list <- lapply(1:(len_i*len_j), function(i) {
plotname <- paste("Plot", i, sep="")
# locate the row in the `plots`
row_n <- ceiling(i/len_j)
## set dynamic height of box scatter plots based on output2
height <- if(plots[[1]]$fun[2]=="box"&plots[[1]]$fun[3]=="scatter"&!is.null(plots[[row_n]]$output2)){
as.numeric(plots[[row_n]]$output2)*150
} else {
560
}
plotOutput(plotname, height = height, width = 850)
})
# Convert the list to a tagList - this is necessary for the list of items
# to display properly.
do.call(tagList, mod1_plot_output_list)
})
# Call renderPlot for each one. Plots are only actually generated when they
# are visible on the web page.
# get the max number of objects
num_df <- subset(obj_name, V1=="plots")
num_df <- num_df %>%
dplyr::group_by(V2, stat_name) %>%
dplyr::summarise(cnt_sum=sum(cnt))
max_plot <- max(num_df$cnt_sum)
for (i in 1:max_plot) {
# Need local so that each item gets its own number. Without it, the value
# of i in the renderPlot() will be the same across all instances, because
# of when the expression is evaluated.
local({
my_i <- i
plotname <- paste("Plot", my_i, sep="")
output[[plotname]] <- renderPlot({
## limit plots to specified stat_name
obj_name <- subset(obj_name, V1==mod1_input_object()[1])
obj_name <- subset(obj_name, V2==mod1_input_object()[3])
output_order <- obj_name %>%
dplyr::mutate(order=seq(from=1, to=n()))
output_order <- subset(output_order, stat_name==mod1_input_object()[2])
plots <- list()
for(plot_i in seq_along(output_order$order)){
plots[[plot_i]] <- mtm_res_get_entries(D, c(mod1_input_object()[1], mod1_input_object()[3]))[[output_order$order[plot_i]]]$output
}
# there are multiple plots
len_i <- length(plots)
# some plots have multiple objects
len_j <- length(plots[[1]])
# locate the row in the `plots`
row_n <- ceiling(my_i/len_j)
# locate the column in the `plots`
col_n <- ifelse((my_i %% len_j)==0, len_j, (my_i %% len_j))
# render the plot object in each loop
plots[[row_n]][col_n]
})
})
}
# render stats table of Mod1
output$mod1_output_table <- renderDataTable({
table <- data.frame(var=row.names(rowData(D)), rowData(D)) %>%
left_join(mtm_get_stat_by_name(D, mod1_input_object()[2]),
by=c("var"="var")
) %>%
dplyr::select(c(2, 20:26))
## put interested columns ahead
table <- if ('term' %in% names(table)) {
table %>%
dplyr::select(name, statistic, p.value, p.adj, term, dplyr::everything()) %>%
## scientific notation
dplyr::mutate(statistic=formatC(statistic, format = "E", digits = 2),
p.value=formatC(p.value, format = "E", digits = 2),
p.adj=formatC(p.adj, format = "E", digits = 2),
estimate=formatC(estimate, format = "E", digits = 2),
std.error=formatC(std.error, format = "E", digits = 2)
)
} else {
table %>%
dplyr::select(name, statistic, p.value, p.adj, dplyr::everything())
}
datatable(table,
options = list(
# limit number of rows
pageLength = 10,
lengthMenu = c(10, 20, 50),
## set column width
autoWidth = TRUE,
columnDefs = list(list(width = '100px', targets = c(2:4))),
scrollX = TRUE
))
})
# render plots or table
output$mod1_output <- renderUI({
switch(
mod1_input_object()[1],
"plots" = uiOutput("mod1_output_plot"),
"stats" = dataTableOutput("mod1_output_table")
)
})
# Define rendering logic of outputs in Module-Feature Results Explorer(coded as mod4) --------------------------------
# Module 4: general reactive stats table
mod4_metabolite_table <-
eventReactive(input$mod4_go,
{
table <- data.frame()
# assign an object of all stats tables
table_stats <- mtm_res_get_entries(D, c("stats", "univ"))
# assign an object of all stats plots
plot_stats <- mtm_res_get_entries(D, c("plots", "stats"))
for (i in 2:length(table_stats)) {
tab <- table_stats[[i]]$output$table %>%
dplyr::mutate(`stat name` = plot_stats[[i - 1]]$args$stat_list)
table <- rbind(table, tab)
}
table <- table %>%
dplyr::select(var, statistic, p.value, p.adj, `stat name`, estimate, std.error) %>%
dplyr::mutate(
statistic = formatC(statistic, format = "E", digits = 2),
p.value = formatC(p.value, format = "E", digits = 2),
p.adj = formatC(p.adj, format = "E", digits = 2),
estimate = formatC(estimate, format = "E", digits = 2),
std.error = formatC(std.error, format = "E", digits = 2)
) %>%
dplyr::filter(var == input$mod4_metabolite) %>%
dplyr::rename("name" = var)
})
# Module 4: output the stats table
output$mod4_table <- renderDataTable({
datatable(mod4_metabolite_table(),
selection = "single",
options = list(
dom = 't',
# limit number of rows
pageLength = 10,
lengthMenu = c(10, 20, 50)
)
)
})
observe({
if (!is.null(input$mod4_table_rows_selected)) {
session_store$mod4.tb.row <- input$mod4_table_rows_selected
}
})
# mod4: extract the stat_name
stat_name_selected <- reactive({
mod4_metabolite_table() %>%
dplyr::slice(round(as.numeric(session_store$mod4.tb.row))) %>%
dplyr::select(`stat name`)
})
# Module 4: volcano plot
output$mod4.p1 <- renderUI({
if (!is.null(session_store$mod4.tb.row)) {
list(
downloadButton("mod4_download_plotly_volcano", "download volcano plot"),
plotlyOutput('mod4_volcano', height = 800)
)
}
})
# Module 4: volcano plot by using stat_name
output$mod4_volcano <- renderPlotly({
# Get volcano data set
data_vol <- get_data_by_name(D, "stat_name", "volcano", stat_name_selected())
isSelected <- input$mod4_metabolite
# Set the legend color column
data_vol[, "isSelected"] <- ifelse(data_vol$var==isSelected, TRUE, FALSE)
highlight_point <- data_vol[data_vol$isSelected==TRUE, ]
plot <- data_vol %>%
ggplot(aes(x = statistic, y = p.value, color = isSelected, label = name)) +
geom_point() +
geom_point(data=highlight_point, size = 3) +
scale_y_continuous(trans = reverselog_trans(10),
breaks = scales::trans_breaks("log10", function(x) 10^x),
labels = scales::trans_format("log10", scales::math_format(10^.x))) +
labs(y = "p-value (10^(-y))") +
ggtitle(paste0(stat_name_selected(), "-", isSelected)) +
scale_color_manual(values=c("#999999", "red"))
session_store$mod4.vol <- ggplotly(plot, source = "mod4_sub_vol") %>%
layout(legend = list(orientation = 'h',
xanchor = "center",
x = 0.5,
y = -0.2,
title = list(text='<b> isSelected </b>')))
session_store$mod4.vol
})
# Module 4: volcano plot - html file
output$mod4_download_plotly_volcano <- downloadHandler(
filename = function() {
paste("data-", Sys.Date(), ".html", sep = "")
},
content = function(file) {
saveWidget(as_widget(session_store$mod4.vol), file, selfcontained = TRUE)
}
)
# Module 4: box/scatter plot
output$mod4.p2 <- renderUI({
d <- event_data("plotly_click", source = "mod4_sub_vol")
if (!is.null(d)) {
download.name <- ifelse(
input$mod4.box.or.scatter == "box",
"download box plot",
"download scatter plot"
)
list(
downloadButton("mod4_download_box_scatter", download.name),
plotOutput("mod4.box.scatter", height = 600)
)
}
})
# Module 4: box/scatter - ui
output$mod4.p.ui <- renderUI({
d <- event_data("plotly_click", source = "mod4_sub_vol")
if (!is.null(d)) {
radioButtons(
"mod4.box.or.scatter",
"Select plot type:",
choices = list("Box" = "box",
"Scatter" = "scatter"),
selected = "scatter"
)
}
})
# Module 4: box/scatter plot
output$mod4.box.scatter <- renderPlot({
# Get the data set
data <- D %>%
maplet:::mti_format_se_samplewise() %>%
tidyr::gather(var, value, dplyr::one_of(rownames(D)))
d <- event_data("plotly_click", source = "mod4_sub_vol")
if (!is.null(d)) {
data_vol <- get_data_by_name(D, "stat_name", "volcano", stat_name_selected())
# set the column curveNumber by color legend
isSelected <- input$mod4_metabolite
data_vol[, "curveNumber"] <- ifelse(data_vol$var==isSelected, 1, 0)
data_vol_true <- data_vol[data_vol$curveNumber==1, ]
data_vol_false <- data_vol[data_vol$curveNumber==0, ]
# By using click info (curveNumber & ponitNumber) to get the metabolite name
metabolite <- ifelse(d$curveNumber == 1,
data_vol_true[d$pointNumber + 1, ]$var[1],
data_vol_false[d$pointNumber + 1, ]$var[1])
term <- data_vol$term[1]
# Filter the data by metabolite name
data <- data[data$var == metabolite, ]
# Treat as categorical or not?
if (input$mod4.categorical) {
data[, term] <- factor(data[, term])
} else {
data[, term] <- as.numeric(data[, term])
}
# Draw the plot
if (input$mod4.box.or.scatter == "scatter") {
plot <- data %>%
ggplot(aes(x = !!sym(term), y = value)) +
geom_point(size = 3) +
geom_smooth(method = "lm", se = T, color = "black") +
ggtitle(metabolite)
} else {
plot <- data %>%
ggplot(aes(x = !!sym(term), y = value)) +
geom_boxplot() +
geom_jitter(size = 3, width = 0.2) +
ggtitle(metabolite)
}
}
session_store$mod4.box.scatter <- if (is.null(plot)) NULL else plot
session_store$mod4.box.scatter
})
# Module 4: scatter/box plot - png file
output$mod4_download_box_scatter <- downloadHandler(
filename = function() {
paste("data-", Sys.Date(), ".png", sep = "")
},
content = function(file) {
device <- function(..., width, height) grDevices::png(..., width = width, height = height, res = 300, units = "in")
ggsave(file, plot = session_store$mod4.box.scatter, device = device)
}
)
# Define rendering logic of outputs in Module-Pathway Results Explorer(coded as mod2) --------------------------------
# Module 2: create reactive inputs list
mod2_input_object <- eventReactive(input$mod2_go,
{c(input$mod2.stat,
input$mod2.plot1,
input$mod2.plot2,
input$mod2.plot3)}
)
# Module 2: store reactive output plots
session_store <- reactiveValues()
# Module 2: plot 1
output$mod2.p1 <- renderUI({
inputs <- mod2_input_object()
switch(
inputs[2],
"bar" = list(downloadButton("download_plotly_bar",
"download bar plot"),
plotlyOutput("mod2.bar", height = 600)),
"null" = NULL
)
})
# Module 2: plot 1 - bar plot
output$mod2.bar <- renderPlotly({
inputs <- mod2_input_object()
plots <- mtm_res_get_entries(D, c("plots", "stats"))
for (i in seq_along(plots)) {
if (plots[[i]]$args$stat_list == inputs[1]) {
plot <- plots[[i]]$output[[1]]
}
}
session_store$mod2.bar <- ggplotly(plot, source = "sub_bar") %>%
layout(legend = list(orientation = 'h', xanchor = "center", x = 0.5, y = -0.3))
# render plotly graph
session_store$mod2.bar
})
# Module 2: plot 1 - bar plot - html file
output$download_plotly_bar <- downloadHandler(
filename = function() {
paste("data-", Sys.Date(), ".html", sep = "")
},
content = function(file) {
# export plotly html widget as a temp file to download.
saveWidget(as_widget(session_store$mod2.bar), file, selfcontained = TRUE)
}
)
# Module 2: plot 2
output$mod2.p2 <- renderUI({
inputs <- mod2_input_object()
d <- event_data("plotly_click", source = "sub_bar")
vol_list <- list(
downloadButton("download_plotly_volcano",
"download volcano plot"),
plotlyOutput("mod2.vol", height = 600)
)
# equalizer/bar -> bar/null -> plot
plot2 <- switch(inputs[3],
"equalizer" = switch(
inputs[2],
"bar" = if (!is.null(d)) {
list(
downloadButton("download_plotly_eq",
"download equalizer plot"),
plotlyOutput("mod2.equal", height = 600)
)
},
"null" = list(
downloadButton("download_plotly_eq",
"download equalizer plot"),
uiOutput("mod2.equal.ui"),
plotlyOutput("mod2.equal", height = 600)
)
),
"volcano" = switch(inputs[2],
"bar" = if (!is.null(d)) {
vol_list
},
"null" = vol_list))
})
# Module 2: plot 2 - volcano plot
output$mod2.vol <- renderPlotly({
inputs <- mod2_input_object()
d <- event_data("plotly_click", source = "sub_bar")
# get the threshold for significance
alpha <- get_threshold_for_p_adj(D, inputs[1])
legend_name <- paste0("p.adj < ", alpha)
if (!is.null(d)) {
# D:SE object, inputs: sidebar value, legend_name: legend name
# d: click info for bar plot, pwvar: SUB_PATWAY/PATTHWAY, alpha: significant value (ex. p.adj < 0.1)
plot <- mod2_plot_vol(D, inputs, legend_name, d, pwvar, alpha)
} else {
plot <- mod2_plot_vol(D, inputs, legend_name, NULL, pwvar, alpha)
}
session_store$mod2.vol <- ggplotly(plot, source = "sub_vol") %>%
layout(legend = list(orientation = 'h',
xanchor = "center",
x = 0.5,
y = -0.2,
title = list(text=paste0('<b> ', legend_name, ' </b>'))))
session_store$mod2.vol
})
# Module 2: plot 2 - volcano plot - html file
output$download_plotly_volcano <- downloadHandler(
filename = function() {
paste("data-", Sys.Date(), ".html", sep = "")
},
content = function(file) {
saveWidget(as_widget(session_store$mod2.vol), file, selfcontained = TRUE)
}
)
# Module 2: plot 2 - equalizer plot - not bar
output$mod2.equal.ui <- renderUI({
inputs <- mod2_input_object()
data_bar <- get_data_by_name(D, "stat_list", "stats", inputs[1])
subpathways <- data_bar$name
selectInput(
"mod2.equal.path",
"Select one pathway name:",
choices = c(unique(unlist(subpathways))),
selected = ""
)
})
# Module 2: plot 2 - equalizer plot
output$mod2.equal <- renderPlotly({
inputs <- mod2_input_object()
# get click info for bar plot
d <- event_data("plotly_click", source = "sub_bar")
# get the threshold for significance
alpha <- get_threshold_for_p_adj(D, inputs[1])
if (inputs[2] == "null") {
# D:SE object, inputs: sidebar value, rd: pathway annotations
# alpha: significant value (ex. p.adj < 0.1), pwvar: SUB_PATWAY/PATTHWAY,
# path_name: pathway name for equalizer plot, d: click info for bar plot
plot <- mod2_plot_eq(D, inputs, rd, alpha, pwvar, input$mod2.equal.path, NULL)
} else {
plot <- mod2_plot_eq(D, inputs, rd, alpha, pwvar, NULL, d)
}
session_store$mod2.eq <- if (is.null(plot)) plotly_empty() else ggplotly(plot, source = "sub_eq")
session_store$mod2.eq
})
# Module 2: plot 2 - equalizer plot - html file
output$download_plotly_eq <- downloadHandler(
filename = function() {
paste("data-", Sys.Date(), ".html", sep = "")
},
content = function(file) {
saveWidget(as_widget(session_store$mod2.eq), file, selfcontained = TRUE)
}
)
# Module 2: plot 3 - box/scatter plot
output$mod2.p3 <- renderUI({
inputs <- mod2_input_object()
d.eq <- event_data("plotly_click", source = "sub_eq")
d.vol <- event_data("plotly_click", source = "sub_vol")
download.name <- ifelse(inputs[4]=="box", "download box plot", "download scatter plot")
plot.list <- list(
downloadButton("download_plotly_box_scatter", download.name),
plotOutput("mod2.box.scatter", height = 600)
)
if (!is.null(d.eq) | !is.null(d.vol)) {
plot.list
}
})
# Module 2: plot 3 - box/scatter plot
output$mod2.box.scatter <- renderPlot({
inputs <- mod2_input_object()
# Get the data set
data <- D %>%
maplet:::mti_format_se_samplewise() %>%
tidyr::gather(var, value, dplyr::one_of(rownames(D)))
# get the click info for bar/equalizer/volcano if available
d.bar <- event_data("plotly_click", source = "sub_bar")
d.eq <- event_data("plotly_click", source = "sub_eq")
d.vol <- event_data("plotly_click", source = "sub_vol")
# get the threshold for significance
alpha <- get_threshold_for_p_adj(D, inputs[1])
plot <- mod2_plot_box_scatter(D, # SE object
inputs, # sidebar inputs
d.bar, # click info for bar plot
d.eq, # click info for equalizer plot
d.vol, # click info for volcano plot
rd, # pathway annotations
pwvar, # pathway annotation column
input$mod2.equal.path, # pathway name if plot2 is "equalizer"
alpha, # significant value (ex. p.adj < 0.1)
input$mod2.categorical, # if treated categorical
data) # data for box/scatter plot
session_store$mod2.box.scatter <- if (is.null(plot)) NULL else plot
session_store$mod2.box.scatter
})
# Module 2: plot 3 - scatter/box plot - html file
output$download_plotly_box_scatter <- downloadHandler(
filename = function() {
paste("data-", Sys.Date(), ".png", sep = "")
},
content = function(file) {
device <- function(..., width, height) grDevices::png(..., width = width, height = height, res = 300, units = "in")
ggsave(file, plot = session_store$mod2.box.scatter, device = device)
}
)
}
# Run the application
shinyApp(ui = ui, server = server)
|
library(raster)
# x <- hps <- nepal::pyuthan_health_facilities
# y <- hps[hps@data$HF_TYPE %in% c('Sub Health Post', 'Health Post') |
# (hps@data$VDC_NAME1 == 'Bijubar' &
# hps@data$HF_TYPE == 'Hospital') |
# hps@data$VDC_NAME1 =='Bhingri PHC' ,]
# y$hospital <- (y@data$VDC_NAME1 == 'Bijubar' &
# y@data$HF_TYPE == 'Hospital') |
# y@data$VDC_NAME1 =='Bhingri PHC'
# y$health_post <- y$HF_TYPE == 'Health Post'
# y$sub_health_post <- y$HF_TYPE == 'Sub Health Post'
# y$hub <- y$hospital | y$VDC_NAME1 == 'Khawang'
# y$type <- ifelse(y$hub, 'Hub',
# ifelse(y$health_post, 'Health post',
# ifelse(y$sub_health_post, 'Sub health post', NA)))
# hf <- y
library(nepallite)
library(timevis)
# Read in google sheet with keys
library(gsheet)
use_old <- FALSE
if(!'goog.RData' %in% dir() | !use_old){
goog <- gsheet::gsheet2tbl(url = 'https://docs.google.com/spreadsheets/d/1-3kA62OoSdy1feDpRfcE18FvbP_gj5L41A3rCVDGxCw/edit?usp=sharing')
save(goog,
file = 'goog.RData')
} else {
load('goog.RData')
} | /inst/shiny/global.R | permissive | joebrew/nepallite | R | false | false | 1,102 | r | library(raster)
# x <- hps <- nepal::pyuthan_health_facilities
# y <- hps[hps@data$HF_TYPE %in% c('Sub Health Post', 'Health Post') |
# (hps@data$VDC_NAME1 == 'Bijubar' &
# hps@data$HF_TYPE == 'Hospital') |
# hps@data$VDC_NAME1 =='Bhingri PHC' ,]
# y$hospital <- (y@data$VDC_NAME1 == 'Bijubar' &
# y@data$HF_TYPE == 'Hospital') |
# y@data$VDC_NAME1 =='Bhingri PHC'
# y$health_post <- y$HF_TYPE == 'Health Post'
# y$sub_health_post <- y$HF_TYPE == 'Sub Health Post'
# y$hub <- y$hospital | y$VDC_NAME1 == 'Khawang'
# y$type <- ifelse(y$hub, 'Hub',
# ifelse(y$health_post, 'Health post',
# ifelse(y$sub_health_post, 'Sub health post', NA)))
# hf <- y
library(nepallite)
library(timevis)
# Read in google sheet with keys
library(gsheet)
use_old <- FALSE
if(!'goog.RData' %in% dir() | !use_old){
goog <- gsheet::gsheet2tbl(url = 'https://docs.google.com/spreadsheets/d/1-3kA62OoSdy1feDpRfcE18FvbP_gj5L41A3rCVDGxCw/edit?usp=sharing')
save(goog,
file = 'goog.RData')
} else {
load('goog.RData')
} |
# Author: Namra Ansari
# Overlaying Plots
library(datasets)
head(co2)
hist(co2)
hist(co2,freq = FALSE,col = "thistle1")
curve(dnorm(x, mean = mean(co2), sd = sd(co2)),col="purple",lwd=2,add = TRUE)
lines(density(co2),col="blue",lwd=2)
lines(density(co2,adjust = 2),col="pink",lwd=2)
rug(co2,col = "grey",lwd = 2)
| /overlaying_plot.r | no_license | NamraAnsari/Learning_R | R | false | false | 318 | r | # Author: Namra Ansari
# Overlaying Plots
library(datasets)
head(co2)
hist(co2)
hist(co2,freq = FALSE,col = "thistle1")
curve(dnorm(x, mean = mean(co2), sd = sd(co2)),col="purple",lwd=2,add = TRUE)
lines(density(co2),col="blue",lwd=2)
lines(density(co2,adjust = 2),col="pink",lwd=2)
rug(co2,col = "grey",lwd = 2)
|
cat('\n\n');timestamp();cat('\n')
library(btergm)
library(parallel)
library(texreg)
data_dir <- '/home/sdowning/data/firm_nets_rnr2'
firm_i <- 'verint'
d <- 3
ncpus <- 4
parallel <- "multicore"
data_file <- file.path(data_dir,sprintf('%s_d%s.rds',firm_i,d))
nets <- readRDS(data_file)
nPeriods <- 11 ## 5
if (!("fits" %in% ls())) fits <- list()
if (!(firm_i %in% names(fits)) ) fits[[firm_i]] <- list()
if (nPeriods < length(nets)) nets <- nets[(length(nets)-nPeriods+1):length(nets)]
cat("\n------------ estimating TERGM for:",firm_i,'--------------\n')
cat(sprintf("Using %s cores\n", detectCores()))
## make MMC nets list
mmc <- lapply(nets, function(net) as.matrix(net %n% 'mmc'))
cpc <- lapply(nets, function(net) as.matrix(net %n% 'coop'))
cpp <- lapply(nets, function(net) as.matrix(net %n% 'coop_past'))
cpa <- lapply(nets, function(net) as.matrix(net %n% 'coop') + as.matrix(net %n% 'coop_past') )
cossim <- lapply(nets, function(net) as.matrix(net %n% 'cat_cos_sim'))
centjoin <- lapply(nets, function(net) as.matrix(net %n% 'joint_cent_pow_n0_4'))
centratio <- lapply(nets, function(net) as.matrix(net %n% 'cent_ratio_pow_n0_4'))
shcomp <- lapply(nets, function(net) as.matrix(net %n% 'shared_competitor'))
shinv <- lapply(nets, function(net) as.matrix(net %n% 'shared_investor_nd'))
####################### DEFINE MODELS ###################################
m4 <- nets ~ edges + gwesp(0, fixed = T) + gwdegree(0, fixed=T) +
nodematch("ipo_status", diff = F) +
nodematch("state_code", diff = F) +
nodecov("age") + absdiff("age") +
##nodecov("employee_na_age") +
##nodecov("sales_na_0_mn") +
edgecov(cossim) +
edgecov(centjoin) +
##edgecov(shcomp) +
edgecov(shinv) +
edgecov(mmc) +
##edgecov(cpa) +
##edgecov(cpc) +
##edgecov(cpp) +
memory(type = "stability", lag = 1) +
timecov(transform = function(t) t) +
nodecov("genidx_multilevel") +
nodecov("cent_pow_n0_4") + absdiff("cent_pow_n0_4") +
cycle(3) + cycle(4) + cycle(5)
################################ end models#######################
##
# DEFINE MODEL and MODEL NAME TO COMPUTE
##
m_x <- 'm4'
##
# SET RESAMPLES
##
R <- 2000
## RUN TERGM
fits[[firm_i]][[m_x]] <- btergm(get(m_x), R=R, parallel = parallel, ncpus = ncpus)
## SAVE SERIALIZED
fits.file <- sprintf('/home/sdowning/compnet/results/amj_rnr2/fit_%s_pd%s_R%s_%s.rds', firm_i, nPeriods, R, m_x)
saveRDS(fits, file=fits.file)
## SAVE FORMATTED REGRESSION TABLE
html.file <- sprintf('/home/sdowning/compnet/results/amj_rnr2/%s_tergm_results_pd%s_R%s_%s.html', firm_i, nPeriods, R, m_x)
htmlreg(fits[[firm_i]], digits = 2, file=html.file)
#### SAVE GOODNESS OF FIT
##gf <- gof(fits[[firm_i]][[m_x]], nsim=1000,
## statistics=c(dsp, esp, deg, geodesic, rocpr, walktrap.modularity))
##gof.file <- sprintf('/home/sdowning/compnet/results/amj_rnr2/gof_%s_pd%s_R%s_%s.rds', firm_i, nPeriods, R, m_x)
##saveRDS(gf, file=gof.file)
cat('finished successfully.')
| /R/amj_rnr2/awareness_AMJ_RNR_TERGM_m4-verint.R | no_license | sdownin/compnet-venus | R | false | false | 2,976 | r | cat('\n\n');timestamp();cat('\n')
library(btergm)
library(parallel)
library(texreg)
data_dir <- '/home/sdowning/data/firm_nets_rnr2'
firm_i <- 'verint'
d <- 3
ncpus <- 4
parallel <- "multicore"
data_file <- file.path(data_dir,sprintf('%s_d%s.rds',firm_i,d))
nets <- readRDS(data_file)
nPeriods <- 11 ## 5
if (!("fits" %in% ls())) fits <- list()
if (!(firm_i %in% names(fits)) ) fits[[firm_i]] <- list()
if (nPeriods < length(nets)) nets <- nets[(length(nets)-nPeriods+1):length(nets)]
cat("\n------------ estimating TERGM for:",firm_i,'--------------\n')
cat(sprintf("Using %s cores\n", detectCores()))
## make MMC nets list
mmc <- lapply(nets, function(net) as.matrix(net %n% 'mmc'))
cpc <- lapply(nets, function(net) as.matrix(net %n% 'coop'))
cpp <- lapply(nets, function(net) as.matrix(net %n% 'coop_past'))
cpa <- lapply(nets, function(net) as.matrix(net %n% 'coop') + as.matrix(net %n% 'coop_past') )
cossim <- lapply(nets, function(net) as.matrix(net %n% 'cat_cos_sim'))
centjoin <- lapply(nets, function(net) as.matrix(net %n% 'joint_cent_pow_n0_4'))
centratio <- lapply(nets, function(net) as.matrix(net %n% 'cent_ratio_pow_n0_4'))
shcomp <- lapply(nets, function(net) as.matrix(net %n% 'shared_competitor'))
shinv <- lapply(nets, function(net) as.matrix(net %n% 'shared_investor_nd'))
####################### DEFINE MODELS ###################################
m4 <- nets ~ edges + gwesp(0, fixed = T) + gwdegree(0, fixed=T) +
nodematch("ipo_status", diff = F) +
nodematch("state_code", diff = F) +
nodecov("age") + absdiff("age") +
##nodecov("employee_na_age") +
##nodecov("sales_na_0_mn") +
edgecov(cossim) +
edgecov(centjoin) +
##edgecov(shcomp) +
edgecov(shinv) +
edgecov(mmc) +
##edgecov(cpa) +
##edgecov(cpc) +
##edgecov(cpp) +
memory(type = "stability", lag = 1) +
timecov(transform = function(t) t) +
nodecov("genidx_multilevel") +
nodecov("cent_pow_n0_4") + absdiff("cent_pow_n0_4") +
cycle(3) + cycle(4) + cycle(5)
################################ end models#######################
##
# DEFINE MODEL and MODEL NAME TO COMPUTE
##
m_x <- 'm4'
##
# SET RESAMPLES
##
R <- 2000
## RUN TERGM
fits[[firm_i]][[m_x]] <- btergm(get(m_x), R=R, parallel = parallel, ncpus = ncpus)
## SAVE SERIALIZED
fits.file <- sprintf('/home/sdowning/compnet/results/amj_rnr2/fit_%s_pd%s_R%s_%s.rds', firm_i, nPeriods, R, m_x)
saveRDS(fits, file=fits.file)
## SAVE FORMATTED REGRESSION TABLE
html.file <- sprintf('/home/sdowning/compnet/results/amj_rnr2/%s_tergm_results_pd%s_R%s_%s.html', firm_i, nPeriods, R, m_x)
htmlreg(fits[[firm_i]], digits = 2, file=html.file)
#### SAVE GOODNESS OF FIT
##gf <- gof(fits[[firm_i]][[m_x]], nsim=1000,
## statistics=c(dsp, esp, deg, geodesic, rocpr, walktrap.modularity))
##gof.file <- sprintf('/home/sdowning/compnet/results/amj_rnr2/gof_%s_pd%s_R%s_%s.rds', firm_i, nPeriods, R, m_x)
##saveRDS(gf, file=gof.file)
cat('finished successfully.')
|
#' Calculate the degree of each node in the network
#'
#' @param adjacency an adjacency matrix
#' @return a named vector
#' @export
degree <- function(adjacency) {
sort(colSums(adjacency) + rowSums(adjacency), decreasing=TRUE)
}
#' Identify sink nodes in the network
#'
#' @param adjacency an adjacency matrix
#' @return a vector of node names
#' @export
sinks <- function(adjacency) {
d <- degree(adjacency)
names(d)[(d - colSums(adjacency)[names(d)]) == 0]
}
#' Identify source nodes in the network
#'
#' @param adjacency an adjacency matrix
#' @return a vector of node names
#' @export
sources <- function(adjacency) {
d <- degree(adjacency)
names(d)[(d - rowSums(adjacency)[names(d)]) == 0]
}
#' Identify mediator nodes in the network
#'
#' @param adjacency an adjacency matrix
#' @return a vector of node names
#' @export
mediators <- function(adjacency) {
sinks <- sinks(adjacency)
sources <- sources(adjacency)
colnames(adjacency)[!(colnames(adjacency) %in% c(sinks, sources))]
}
#' Retrieve all nodes in the Neighborhood of a given node
#'
#' This function mimics the behaviour of \code{neighborhood} in the
#' \code{igraph} package, but is backwards compatible with \code{R 2.5.1}
#'
#' @details
#' The neighborhood of a given order ‘o’ of a vertex ‘v’ includes all
#' vertices which are closer to ‘v’ than the order. Ie. order 0 is
#' always ‘v’ itself, order 1 is ‘v’ plus its immediate neighbors,
#' order 2 is order 1 plus the immediate neighbors of the vertices in
#' order 1, etc.
#'
#' @param node Character constant, Node you want to retrieve neighborhood of.
#' @param adjacency an adjacency matrix
#' @param order Integer giving the order of the neighborhood.
#' @param mode Character constant, it specifies how to use the direction of
#' the edges if a directed graph is analyzed. For ‘out’ only the
#' outgoing edges are followed, so all vertices reachable from
#' the source vertex in at most ‘order’ steps are counted. For
#' ‘"in"’ all vertices from which the source vertex is reachable
#' in at most ‘order’ steps are counted. ‘"all"’ ignores the
#' direction of the edges.
#' @return a vector of node names in the requested neighborhood.
#' @export
neighborhood <- function(node, adjacency, order, mode="all") {
stopifnot(order >= 0)
stopifnot(mode %in% c("in", "out", "all"))
stopifnot(class(adjacency) == "matrix")
stopifnot(class(node) == "character")
if (order == 0) {
node
} else {
if (mode == "in") {
neighbors <- names(adjacency[,node][adjacency[,node] == 1])
} else if (mode == "out") {
neighbors <- names(adjacency[node,][adjacency[node,] == 1])
} else {
neighbors <- c(
names(adjacency[,node][adjacency[,node] == 1]),
names(adjacency[node,][adjacency[node,] == 1])
)
}
unique(c(neighbors,
unlist(sapply(neighbors, neighborhood, adjacency, order-1, mode)),
node
))
}
}
| /R/properties.R | no_license | sritchie73/networkTools | R | false | false | 3,054 | r | #' Calculate the degree of each node in the network
#'
#' @param adjacency an adjacency matrix
#' @return a named vector
#' @export
degree <- function(adjacency) {
sort(colSums(adjacency) + rowSums(adjacency), decreasing=TRUE)
}
#' Identify sink nodes in the network
#'
#' @param adjacency an adjacency matrix
#' @return a vector of node names
#' @export
sinks <- function(adjacency) {
d <- degree(adjacency)
names(d)[(d - colSums(adjacency)[names(d)]) == 0]
}
#' Identify source nodes in the network
#'
#' @param adjacency an adjacency matrix
#' @return a vector of node names
#' @export
sources <- function(adjacency) {
d <- degree(adjacency)
names(d)[(d - rowSums(adjacency)[names(d)]) == 0]
}
#' Identify mediator nodes in the network
#'
#' @param adjacency an adjacency matrix
#' @return a vector of node names
#' @export
mediators <- function(adjacency) {
sinks <- sinks(adjacency)
sources <- sources(adjacency)
colnames(adjacency)[!(colnames(adjacency) %in% c(sinks, sources))]
}
#' Retrieve all nodes in the Neighborhood of a given node
#'
#' This function mimics the behaviour of \code{neighborhood} in the
#' \code{igraph} package, but is backwards compatible with \code{R 2.5.1}
#'
#' @details
#' The neighborhood of a given order ‘o’ of a vertex ‘v’ includes all
#' vertices which are closer to ‘v’ than the order. Ie. order 0 is
#' always ‘v’ itself, order 1 is ‘v’ plus its immediate neighbors,
#' order 2 is order 1 plus the immediate neighbors of the vertices in
#' order 1, etc.
#'
#' @param node Character constant, Node you want to retrieve neighborhood of.
#' @param adjacency an adjacency matrix
#' @param order Integer giving the order of the neighborhood.
#' @param mode Character constant, it specifies how to use the direction of
#' the edges if a directed graph is analyzed. For ‘out’ only the
#' outgoing edges are followed, so all vertices reachable from
#' the source vertex in at most ‘order’ steps are counted. For
#' ‘"in"’ all vertices from which the source vertex is reachable
#' in at most ‘order’ steps are counted. ‘"all"’ ignores the
#' direction of the edges.
#' @return a vector of node names in the requested neighborhood.
#' @export
neighborhood <- function(node, adjacency, order, mode="all") {
stopifnot(order >= 0)
stopifnot(mode %in% c("in", "out", "all"))
stopifnot(class(adjacency) == "matrix")
stopifnot(class(node) == "character")
if (order == 0) {
node
} else {
if (mode == "in") {
neighbors <- names(adjacency[,node][adjacency[,node] == 1])
} else if (mode == "out") {
neighbors <- names(adjacency[node,][adjacency[node,] == 1])
} else {
neighbors <- c(
names(adjacency[,node][adjacency[,node] == 1]),
names(adjacency[node,][adjacency[node,] == 1])
)
}
unique(c(neighbors,
unlist(sapply(neighbors, neighborhood, adjacency, order-1, mode)),
node
))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateDataset.R
\name{generateDataset}
\alias{generateDataset}
\title{Generate a time series dataset}
\usage{
generateDataset()
}
\value{
\describe{
\item{X}{a matrix of 300 time series (300 rows and 256 columns)}
\item{col}{a vector, describing the class of each row (i.e. curve)}
}
}
\description{
The dataset composed by 300 curves of 256 time points, from 3 different groups.
}
| /man/generateDataset.Rd | no_license | Erwangf/funHDDC-wavelet | R | false | true | 462 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateDataset.R
\name{generateDataset}
\alias{generateDataset}
\title{Generate a time series dataset}
\usage{
generateDataset()
}
\value{
\describe{
\item{X}{a matrix of 300 time series (300 rows and 256 columns)}
\item{col}{a vector, describing the class of each row (i.e. curve)}
}
}
\description{
The dataset composed by 300 curves of 256 time points, from 3 different groups.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flame_generate_dataframe.R
\name{flame_generate_dataframe}
\alias{flame_generate_dataframe}
\title{Flame Database Generator}
\usage{
flame_generate_dataframe(a_list)
}
\arguments{
\item{a_list}{Flame data list}
}
\description{
Parses a list of flame values and then creates a tibble from this information.
}
\examples{
flame_generate_dataframe()
}
| /man/flame_generate_dataframe.Rd | no_license | callowy/flames | R | false | true | 426 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flame_generate_dataframe.R
\name{flame_generate_dataframe}
\alias{flame_generate_dataframe}
\title{Flame Database Generator}
\usage{
flame_generate_dataframe(a_list)
}
\arguments{
\item{a_list}{Flame data list}
}
\description{
Parses a list of flame values and then creates a tibble from this information.
}
\examples{
flame_generate_dataframe()
}
|
library(rtrek)
### Name: st_tiles
### Title: Return the url associated with a tile set
### Aliases: st_tiles
### ** Examples
st_tiles("galaxy1")
| /data/genthat_extracted_code/rtrek/examples/st_tiles.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 152 | r | library(rtrek)
### Name: st_tiles
### Title: Return the url associated with a tile set
### Aliases: st_tiles
### ** Examples
st_tiles("galaxy1")
|
library(data.table)
setwd("C:/Users/NNarasim/OneDrive/Temp/Coursera/04. Exploratory Data Analysis/RWorking")
require(data.table)
DT <- fread("./data/household_power_consumption.txt", sep=";", header=TRUE, na.strings="?")
febData <- as.data.frame(DT[Date == c("1/2/2007","2/2/2007"),])
febData$Time <- strptime(paste(febData$Date, febData$Time), "%d/%m/%Y %H:%M:%S")
febData$Date <- as.Date(febData$Date , "%d/%m/%Y")
febData$Global_active_power <- as.numeric(febData$Global_active_power)
png(filename="plot4.png", width=480, height=480)
par(mfrow = c(2,2))
#graph 1
with(febData, plot(Time, Global_active_power, type="l", xlab = "", ylab = "Global Active Power"))
#graph 2
with(febData, plot(Time, Voltage, type="l", xlab = "datetime", ylab = "Voltage"))
#graph 3
with(febData, plot(Time, Sub_metering_1, type="n", xlab = "", ylab = "Energy sub metering"))
with(febData, points(Time, Sub_metering_1, type = "l",col="black" ))
with(febData, points(Time, Sub_metering_2, type="l",col="red"))
with(febData, points(Time, Sub_metering_3, type="l",col="blue"))
legend("topright", legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"), lty=1)
#graph 4
with(febData, plot(Time, Global_reactive_power, type="l", xlab = "datetime", ylab = "Global_reactive_power"))
dev.off()
| /plot4.R | no_license | NNarasimulu/ExData_Plotting1 | R | false | false | 1,312 | r | library(data.table)
setwd("C:/Users/NNarasim/OneDrive/Temp/Coursera/04. Exploratory Data Analysis/RWorking")
require(data.table)
DT <- fread("./data/household_power_consumption.txt", sep=";", header=TRUE, na.strings="?")
febData <- as.data.frame(DT[Date == c("1/2/2007","2/2/2007"),])
febData$Time <- strptime(paste(febData$Date, febData$Time), "%d/%m/%Y %H:%M:%S")
febData$Date <- as.Date(febData$Date , "%d/%m/%Y")
febData$Global_active_power <- as.numeric(febData$Global_active_power)
png(filename="plot4.png", width=480, height=480)
par(mfrow = c(2,2))
#graph 1
with(febData, plot(Time, Global_active_power, type="l", xlab = "", ylab = "Global Active Power"))
#graph 2
with(febData, plot(Time, Voltage, type="l", xlab = "datetime", ylab = "Voltage"))
#graph 3
with(febData, plot(Time, Sub_metering_1, type="n", xlab = "", ylab = "Energy sub metering"))
with(febData, points(Time, Sub_metering_1, type = "l",col="black" ))
with(febData, points(Time, Sub_metering_2, type="l",col="red"))
with(febData, points(Time, Sub_metering_3, type="l",col="blue"))
legend("topright", legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"), lty=1)
#graph 4
with(febData, plot(Time, Global_reactive_power, type="l", xlab = "datetime", ylab = "Global_reactive_power"))
dev.off()
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/parse-save.R
\name{parse_save.list}
\alias{parse_save.list}
\title{create a Parse object from a list}
\usage{
\method{parse_save}{list}(x, class_name, ...)
}
\arguments{
\item{x}{list to be saved}
\item{class_name}{class to save the object to}
\item{...}{extra arguments to pass on to parse_api_POST}
}
\description{
create a Parse object from a list
}
| /man/parse_save.list.Rd | no_license | dgrtwo/rparse | R | false | false | 442 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/parse-save.R
\name{parse_save.list}
\alias{parse_save.list}
\title{create a Parse object from a list}
\usage{
\method{parse_save}{list}(x, class_name, ...)
}
\arguments{
\item{x}{list to be saved}
\item{class_name}{class to save the object to}
\item{...}{extra arguments to pass on to parse_api_POST}
}
\description{
create a Parse object from a list
}
|
library(shiny)
library(data.table)
library(dplyr)
library(ggplot2)
library(plotly)
library(shinyWidgets)
library(shinydashboard)
#### 6.1 Estrtura página dashboard####
dados <- fread('dados_limpos.csv',encoding = 'UTF-8')
cabecalho <- dashboardHeader(title = "Dashboard PROCONs",
titleWidth = '250px')
barra_lateral <- dashboardSidebar(width = '250px')
corpo_pagina <- dashboardBody()
ui = dashboardPage(header = cabecalho,
sidebar = barra_lateral,
body = corpo_pagina)
## front-end (tela que será mostrada para o usuário)
ui2 = fluidPage(
## título da página
titlePanel("Dashboard PROCON"),
mainPanel(
## caixa de seleção
checkboxGroupInput(inputId = "select_UF",label = "Estado:",
choices = c('TODOS',unique(dados$UF)),selected = 'TODOS'),
## calendário para selecionar período
dateRangeInput(inputId = "data_abertura",label = "Data Abertura:",
start = min(as.Date(dados$DataAbertura)),#"2001-01-01",
end = max(as.Date(dados$DataAbertura))), #"2010-12-31"),
## seleção de descrição de assunto
selectizeInput(inputId = "assunto",label = "Descrição Assunto:",
choices = c('TODOS', unique(dados$DescricaoAssunto)),
selected = 'TODOS',multiple = T,options = list(maxItems = 5)),
## gráfico de linhas
plotlyOutput(outputId = 'data',width = '100%'),
## texto descritivo do gráfico de linhas
textOutput(outputId = "descData"),
## gráfico
plotlyOutput(outputId = 'uf'),
## texto descritivo do gráfico
textOutput(outputId = "descUf"),
## gráfico
plotlyOutput(outputId = 'atendida'),
## texto descritivo do gráfico
textOutput(outputId = "descAtendida"),
## gráfico
plotlyOutput(outputId = 'atendidaAno'),
## texto descritivo do gráfico
textOutput(outputId = "descAtendidaAno")
)
)
## back-end (o que o sistema irá executar para retornar para o usuário, front-end)
server = function(input, output, session) {
dados_selecionados <- reactive({
## filtro UF
if (!'TODOS' %in% input$select_UF ){
dados <- dados %>% filter(UF %in% input$select_UF)
}
## filtro ASSUNTO
if(!'TODOS' %in% input$assunto){
dados <- dados %>% filter(DescricaoAssunto %in% input$assunto)
}
## filtro DATA
dados <- dados %>% filter(as.Date(DataAbertura) >= input$data_abertura[1] &
as.Date(DataAbertura) <= input$data_abertura[2])
dados
})
## gráfico de linhas ano-mes
output$data <- renderPlotly({
ano_mes <- data.frame(table(format(as.Date(dados_selecionados()$DataAbertura),
'%Y-%m'))) %>% rename(Data = Var1, Qtd=Freq)
ano_mes$Data <- as.Date(paste(ano_mes$Data,'01',sep = '-'))
ggplotly(
ggplot(data = ano_mes, aes(Data, Qtd)) +
geom_line(group = 1) +
theme_bw() +
theme(axis.text.x = element_text(angle = 45,hjust = 1))+
ggtitle('Quantidade de Reclamações por Ano-Mês') +
scale_x_date(date_labels = '%b-%Y',breaks = '6 month')
)
})
## gráfico UF
output$uf <- renderPlotly({
ggplotly(
data.frame(table(dados$UF)) %>% rename(UF = Var1,Qtd = Freq) %>%
ggplot(aes(x = reorder(UF,Qtd),y = Qtd,
text=paste(" UF:", UF, "<br>", "QTD:",Qtd))) +
geom_bar(fill = 'blue',stat = 'identity') +
coord_flip() +
xlab('UF') + #ylab('Quantidade') +
theme_bw() +
ggtitle('Quantidade de Reclamações por UF'),
tooltip = "text"
)
})
## gráfico atendida
output$atendida <- renderPlotly({
ggplotly(
ggplot(dados) +
geom_bar(aes(Atendida),fill = c('red','green'),stat = 'count') +
ylab('Quantidade') +
theme_bw() +
ggtitle('Quantidade de Chmados Atendidos')
)
})
## gráfico atendida por ano
output$atendidaAno <- renderPlotly({
ggplotly(
data.frame(table(dados$anocalendario,dados$Atendida)) %>%
rename(Ano = Var1, Atendida = Var2, Qtd = Freq) %>%
ggplot() +
geom_bar(aes(x = Ano,y = Qtd, fill = Atendida),
stat = 'identity',position = position_dodge2()) +
theme_bw() +
ggtitle('Quantidade de Reclamações Atendidas(não) por Ano')
)
})
## retornando texto para cada campo em específico
output$descData <- renderText({
paste("Gráfico com a quantidade de reclamações feitas entre:",
min(dados_selecionados()$DataAbertura),'-',
max(dados_selecionados()$DataAbertura))
})
output$descUf <- renderText({
estados <- paste(unique(dados_selecionados()$UF),collapse = ', ')
paste("Gráfico com a quantidade de reclamações feitas por UF: ",estados)
})
output$descAtendida <- renderText({"Gráfico com a quantidade de reclamações atendidas e não atendidas"})
output$descAtendidaAno <- renderText({"Gráfico com a quantidade de reclamações atendidas e não atendidas por Ano"})
}
shinyApp(ui, server)
| /6_1_app.R | no_license | gabriellimagomes15/r_shiny | R | false | false | 5,221 | r | library(shiny)
library(data.table)
library(dplyr)
library(ggplot2)
library(plotly)
library(shinyWidgets)
library(shinydashboard)
#### 6.1 Estrtura página dashboard####
dados <- fread('dados_limpos.csv',encoding = 'UTF-8')
cabecalho <- dashboardHeader(title = "Dashboard PROCONs",
titleWidth = '250px')
barra_lateral <- dashboardSidebar(width = '250px')
corpo_pagina <- dashboardBody()
ui = dashboardPage(header = cabecalho,
sidebar = barra_lateral,
body = corpo_pagina)
## front-end (tela que será mostrada para o usuário)
ui2 = fluidPage(
## título da página
titlePanel("Dashboard PROCON"),
mainPanel(
## caixa de seleção
checkboxGroupInput(inputId = "select_UF",label = "Estado:",
choices = c('TODOS',unique(dados$UF)),selected = 'TODOS'),
## calendário para selecionar período
dateRangeInput(inputId = "data_abertura",label = "Data Abertura:",
start = min(as.Date(dados$DataAbertura)),#"2001-01-01",
end = max(as.Date(dados$DataAbertura))), #"2010-12-31"),
## seleção de descrição de assunto
selectizeInput(inputId = "assunto",label = "Descrição Assunto:",
choices = c('TODOS', unique(dados$DescricaoAssunto)),
selected = 'TODOS',multiple = T,options = list(maxItems = 5)),
## gráfico de linhas
plotlyOutput(outputId = 'data',width = '100%'),
## texto descritivo do gráfico de linhas
textOutput(outputId = "descData"),
## gráfico
plotlyOutput(outputId = 'uf'),
## texto descritivo do gráfico
textOutput(outputId = "descUf"),
## gráfico
plotlyOutput(outputId = 'atendida'),
## texto descritivo do gráfico
textOutput(outputId = "descAtendida"),
## gráfico
plotlyOutput(outputId = 'atendidaAno'),
## texto descritivo do gráfico
textOutput(outputId = "descAtendidaAno")
)
)
## back-end (o que o sistema irá executar para retornar para o usuário, front-end)
server = function(input, output, session) {
dados_selecionados <- reactive({
## filtro UF
if (!'TODOS' %in% input$select_UF ){
dados <- dados %>% filter(UF %in% input$select_UF)
}
## filtro ASSUNTO
if(!'TODOS' %in% input$assunto){
dados <- dados %>% filter(DescricaoAssunto %in% input$assunto)
}
## filtro DATA
dados <- dados %>% filter(as.Date(DataAbertura) >= input$data_abertura[1] &
as.Date(DataAbertura) <= input$data_abertura[2])
dados
})
## gráfico de linhas ano-mes
output$data <- renderPlotly({
ano_mes <- data.frame(table(format(as.Date(dados_selecionados()$DataAbertura),
'%Y-%m'))) %>% rename(Data = Var1, Qtd=Freq)
ano_mes$Data <- as.Date(paste(ano_mes$Data,'01',sep = '-'))
ggplotly(
ggplot(data = ano_mes, aes(Data, Qtd)) +
geom_line(group = 1) +
theme_bw() +
theme(axis.text.x = element_text(angle = 45,hjust = 1))+
ggtitle('Quantidade de Reclamações por Ano-Mês') +
scale_x_date(date_labels = '%b-%Y',breaks = '6 month')
)
})
## gráfico UF
output$uf <- renderPlotly({
ggplotly(
data.frame(table(dados$UF)) %>% rename(UF = Var1,Qtd = Freq) %>%
ggplot(aes(x = reorder(UF,Qtd),y = Qtd,
text=paste(" UF:", UF, "<br>", "QTD:",Qtd))) +
geom_bar(fill = 'blue',stat = 'identity') +
coord_flip() +
xlab('UF') + #ylab('Quantidade') +
theme_bw() +
ggtitle('Quantidade de Reclamações por UF'),
tooltip = "text"
)
})
## gráfico atendida
output$atendida <- renderPlotly({
ggplotly(
ggplot(dados) +
geom_bar(aes(Atendida),fill = c('red','green'),stat = 'count') +
ylab('Quantidade') +
theme_bw() +
ggtitle('Quantidade de Chmados Atendidos')
)
})
## gráfico atendida por ano
output$atendidaAno <- renderPlotly({
ggplotly(
data.frame(table(dados$anocalendario,dados$Atendida)) %>%
rename(Ano = Var1, Atendida = Var2, Qtd = Freq) %>%
ggplot() +
geom_bar(aes(x = Ano,y = Qtd, fill = Atendida),
stat = 'identity',position = position_dodge2()) +
theme_bw() +
ggtitle('Quantidade de Reclamações Atendidas(não) por Ano')
)
})
## retornando texto para cada campo em específico
output$descData <- renderText({
paste("Gráfico com a quantidade de reclamações feitas entre:",
min(dados_selecionados()$DataAbertura),'-',
max(dados_selecionados()$DataAbertura))
})
output$descUf <- renderText({
estados <- paste(unique(dados_selecionados()$UF),collapse = ', ')
paste("Gráfico com a quantidade de reclamações feitas por UF: ",estados)
})
output$descAtendida <- renderText({"Gráfico com a quantidade de reclamações atendidas e não atendidas"})
output$descAtendidaAno <- renderText({"Gráfico com a quantidade de reclamações atendidas e não atendidas por Ano"})
}
shinyApp(ui, server)
|
#QBA random forests
#Informative dataset #1
#Non-differential misclassification of predictor for simulated data to assess prediction accuracy
library(foreign)
library(randomForest)
library(pROC)
#Get arguments from the command line
argv <- commandArgs(TRUE)
# Check if the command line is not empty and convert values to numerical values
if (length(argv) > 0){
myse <- as.numeric( argv[1] )
mysp <- as.numeric( argv[2] )
} else {
stop("Please input parameters")
#myse <- 0.95
#mysp <- 0.95
}
################################# Non-differential misclassification of all predictors #################################
dat.nondiff.misc.pred.func <- function(i, se, sp){
n=10000
x1=rbinom(n, 1, 0.10)
x2=rbinom(n, 1, 0.35+0.1*x1)
x3=rbinom(n, 1, 0.20+0.15*x2)
x4=rbinom(n, 1, 0.25+0.05*x3)
y=rbinom(n, 1, 0.05+0.05*x1+0.38*x2+0.35*x3+0.17*x4)
dat <- as.data.frame(cbind(x1, x2, x3, x4, y))
dat <- data.frame(lapply(dat, factor))
dat.nondiff.pred <- dat
dat.nondiff.pred$x1.new <- NA
dat.nondiff.pred$x1.new[dat.nondiff.pred$x1==1] <- rbinom(length(which(dat.nondiff.pred$x1==1)), 1, se)
dat.nondiff.pred$x1.new[dat.nondiff.pred$x1==0] <- rbinom(length(which(dat.nondiff.pred$x1==0)), 1, 1-sp)
dat.nondiff.pred$x2.new <- NA
dat.nondiff.pred$x2.new[dat.nondiff.pred$x2==1] <- rbinom(length(which(dat.nondiff.pred$x2==1)), 1, se)
dat.nondiff.pred$x2.new[dat.nondiff.pred$x2==0] <- rbinom(length(which(dat.nondiff.pred$x2==0)), 1, 1-sp)
dat.nondiff.pred$x3.new <- NA
dat.nondiff.pred$x3.new[dat.nondiff.pred$x3==1] <- rbinom(length(which(dat.nondiff.pred$x3==1)), 1, se)
dat.nondiff.pred$x3.new[dat.nondiff.pred$x3==0] <- rbinom(length(which(dat.nondiff.pred$x3==0)), 1, 1-sp)
dat.nondiff.pred$x4.new <- NA
dat.nondiff.pred$x4.new[dat.nondiff.pred$x4==1] <- rbinom(length(which(dat.nondiff.pred$x4==1)), 1, se)
dat.nondiff.pred$x4.new[dat.nondiff.pred$x4==0] <- rbinom(length(which(dat.nondiff.pred$x4==0)), 1, 1-sp)
#Subset data for misclassified variables and the other variables of interest
vars <- c("x1.new", "x2.new", "x3.new", "x4.new", "y")
dat.nondiff.pred2 <- dat.nondiff.pred[vars]
dat.nondiff.pred2 <- data.frame(lapply(dat.nondiff.pred2, factor))
#Run random forest for misclassified data set #1 with nondiff misclass. of predictor
rf.dat.nondiff.pred <- randomForest(y ~., data=dat.nondiff.pred2, importance=T, sampsize=c(length(which(dat.nondiff.pred2$y==1)), length(which(dat.nondiff.pred2$y==1))))
#Varimp
varimp <- importance(rf.dat.nondiff.pred, type=1)
#AUC
rf.dat.nondiff.pred.roc <- roc(dat$y, rf.dat.nondiff.pred$votes[,2])
auc <- auc(rf.dat.nondiff.pred.roc)
#Other Performance metrics
confusion.tab <- rf.dat.nondiff.pred$confusion
accuracy <- (confusion.tab[1,1] + confusion.tab[2,2]) / (confusion.tab[1,1] + confusion.tab[1,2] + confusion.tab[2,1] + confusion.tab[2,2])
sensitivity <- confusion.tab[2,2] / (confusion.tab[2,1] + confusion.tab[2,2])
specificity <- confusion.tab[1,1] / (confusion.tab[1,1] + confusion.tab[1,2])
ppv <- confusion.tab[2,2] / (confusion.tab[2,2] + confusion.tab[1,2])
npv <- confusion.tab[1,1] / (confusion.tab[1,1] + confusion.tab[2,1])
#Combine
results <- rbind(varimp, auc, accuracy, sensitivity, specificity, ppv, npv)
t(results)
}
#Repeat the whole process
dat.nondiff.misc.pred.func.result <- t(sapply(1:10000, dat.nondiff.misc.pred.func, se=myse,sp=mysp)) | /Informative dataset #1 - original and misclassified/informative dataset 1 misclassified nondiff predictors.R | no_license | jiangtammy/Quantitative-Bias-Analysis | R | false | false | 3,456 | r | #QBA random forests
#Informative dataset #1
#Non-differential misclassification of predictor for simulated data to assess prediction accuracy
library(foreign)
library(randomForest)
library(pROC)
#Get arguments from the command line
argv <- commandArgs(TRUE)
# Check if the command line is not empty and convert values to numerical values
if (length(argv) > 0){
myse <- as.numeric( argv[1] )
mysp <- as.numeric( argv[2] )
} else {
stop("Please input parameters")
#myse <- 0.95
#mysp <- 0.95
}
################################# Non-differential misclassification of all predictors #################################
dat.nondiff.misc.pred.func <- function(i, se, sp){
n=10000
x1=rbinom(n, 1, 0.10)
x2=rbinom(n, 1, 0.35+0.1*x1)
x3=rbinom(n, 1, 0.20+0.15*x2)
x4=rbinom(n, 1, 0.25+0.05*x3)
y=rbinom(n, 1, 0.05+0.05*x1+0.38*x2+0.35*x3+0.17*x4)
dat <- as.data.frame(cbind(x1, x2, x3, x4, y))
dat <- data.frame(lapply(dat, factor))
dat.nondiff.pred <- dat
dat.nondiff.pred$x1.new <- NA
dat.nondiff.pred$x1.new[dat.nondiff.pred$x1==1] <- rbinom(length(which(dat.nondiff.pred$x1==1)), 1, se)
dat.nondiff.pred$x1.new[dat.nondiff.pred$x1==0] <- rbinom(length(which(dat.nondiff.pred$x1==0)), 1, 1-sp)
dat.nondiff.pred$x2.new <- NA
dat.nondiff.pred$x2.new[dat.nondiff.pred$x2==1] <- rbinom(length(which(dat.nondiff.pred$x2==1)), 1, se)
dat.nondiff.pred$x2.new[dat.nondiff.pred$x2==0] <- rbinom(length(which(dat.nondiff.pred$x2==0)), 1, 1-sp)
dat.nondiff.pred$x3.new <- NA
dat.nondiff.pred$x3.new[dat.nondiff.pred$x3==1] <- rbinom(length(which(dat.nondiff.pred$x3==1)), 1, se)
dat.nondiff.pred$x3.new[dat.nondiff.pred$x3==0] <- rbinom(length(which(dat.nondiff.pred$x3==0)), 1, 1-sp)
dat.nondiff.pred$x4.new <- NA
dat.nondiff.pred$x4.new[dat.nondiff.pred$x4==1] <- rbinom(length(which(dat.nondiff.pred$x4==1)), 1, se)
dat.nondiff.pred$x4.new[dat.nondiff.pred$x4==0] <- rbinom(length(which(dat.nondiff.pred$x4==0)), 1, 1-sp)
#Subset data for misclassified variables and the other variables of interest
vars <- c("x1.new", "x2.new", "x3.new", "x4.new", "y")
dat.nondiff.pred2 <- dat.nondiff.pred[vars]
dat.nondiff.pred2 <- data.frame(lapply(dat.nondiff.pred2, factor))
#Run random forest for misclassified data set #1 with nondiff misclass. of predictor
rf.dat.nondiff.pred <- randomForest(y ~., data=dat.nondiff.pred2, importance=T, sampsize=c(length(which(dat.nondiff.pred2$y==1)), length(which(dat.nondiff.pred2$y==1))))
#Varimp
varimp <- importance(rf.dat.nondiff.pred, type=1)
#AUC
rf.dat.nondiff.pred.roc <- roc(dat$y, rf.dat.nondiff.pred$votes[,2])
auc <- auc(rf.dat.nondiff.pred.roc)
#Other Performance metrics
confusion.tab <- rf.dat.nondiff.pred$confusion
accuracy <- (confusion.tab[1,1] + confusion.tab[2,2]) / (confusion.tab[1,1] + confusion.tab[1,2] + confusion.tab[2,1] + confusion.tab[2,2])
sensitivity <- confusion.tab[2,2] / (confusion.tab[2,1] + confusion.tab[2,2])
specificity <- confusion.tab[1,1] / (confusion.tab[1,1] + confusion.tab[1,2])
ppv <- confusion.tab[2,2] / (confusion.tab[2,2] + confusion.tab[1,2])
npv <- confusion.tab[1,1] / (confusion.tab[1,1] + confusion.tab[2,1])
#Combine
results <- rbind(varimp, auc, accuracy, sensitivity, specificity, ppv, npv)
t(results)
}
#Repeat the whole process
dat.nondiff.misc.pred.func.result <- t(sapply(1:10000, dat.nondiff.misc.pred.func, se=myse,sp=mysp)) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bedfile_tests.R
\name{file_is_valid_bed_vectorized}
\alias{file_is_valid_bed_vectorized}
\title{Are files real, valid beds}
\usage{
file_is_valid_bed_vectorized(filepaths, genomefile, warn = TRUE)
}
\arguments{
\item{filepaths}{vector of paths to putative bedfiles (character)}
\item{genomefile}{path to bedtools genome file. To make one, see: https://www.biostars.org/p/70795/ (string)}
}
\value{
}
\description{
Are files real, valid beds
}
| /man/file_is_valid_bed_vectorized.Rd | permissive | selkamand/bedutilsr | R | false | true | 523 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bedfile_tests.R
\name{file_is_valid_bed_vectorized}
\alias{file_is_valid_bed_vectorized}
\title{Are files real, valid beds}
\usage{
file_is_valid_bed_vectorized(filepaths, genomefile, warn = TRUE)
}
\arguments{
\item{filepaths}{vector of paths to putative bedfiles (character)}
\item{genomefile}{path to bedtools genome file. To make one, see: https://www.biostars.org/p/70795/ (string)}
}
\value{
}
\description{
Are files real, valid beds
}
|
partido_analise <- tabPanel(title = "Análise Partidária",
value = "partidos",
br(), hr(),
column(width = 4,
column(width = 6,
pickerInput(inputId = "partido_ano",
label = "Ano",
choices = anos,
selected = 2014,
options = list(`live-search` = TRUE))
),
column(width = 6,
pickerInput(inputId = "partido_cargo",
label = "Cargo",
choices = cargos,
selected = 1,
options = list(`live-search` = TRUE))
)
),
column(width = 8,
br(),
actionBttn(inputId = "partidos_gerar_visualizacoes",
label = "Selecionar",
style = "fill",
color = "success",
icon = icon("check"))
),
##-- Outputs ----
column(width = 12,
conditionalPanel(condition = "input.partidos_gerar_visualizacoes > 0",
hr(),
HTML("<center>"),
pickerInput(inputId = "partido_partido_donuts",
label = "Partido",
choices = levels(factor(x = c("Todos os partidos", partidos),
levels = c("Todos os partidos", partidos))),
selected = "Todos os partidos",
options = list(`live-search` = TRUE,
`none-selected-text` = "Nenhum partido selecionado")),
HTML("</center>")
),
column(width = 4,
conditionalPanel(condition = "input.partidos_gerar_visualizacoes > 0",
HTML("<center><h1>Proporção de gênero</h1></center>"),
column(width = 12,
withSpinner(plotlyOutput("donut_sexo"), type = 6)
)
)
),
column(width = 4,
conditionalPanel(condition = "input.partidos_gerar_visualizacoes > 0",
HTML("<center><h1>Proporção de raça</h1></center>"),
column(width = 12,
withSpinner(plotlyOutput("donut_raca"), type = 6)
)
)
),
column(width = 4,
conditionalPanel(condition = "input.partidos_gerar_visualizacoes > 0",
HTML("<center><h1>Grau de instrução</h1></center>"),
column(width = 12,
withSpinner(dataTableOutput("tabela"), type = 6)
)
)
)
)#,
# column(width = 4, offset = 4,
# conditionalPanel(condition = "input.partidos_gerar_visualizacoes > 0",
# hr(),
# HTML("<center>"),
# pickerInput(inputId = "partido_partido_mapa",
# label = "Partido",
# choices = levels(factor(x = c("Todos os partidos", partidos),
# levels = c("Todos os partidos", partidos))),
# selected = "Todos os partidos",
# options = list(`live-search` = TRUE,
# `none-selected-text` = "Nenhum partido selecionado")),
# HTML("</center>"),
# HTML("<center><h1>Mapa de Candidatos</h1></center>"),
# column(width = 12,
# withSpinner(leafletOutput("mapa_cand"), type = 6)
# )
# )
# )
) | /tabs/ui/partidos/partidos_analise.R | no_license | voronoys/voronoys-app-dev | R | false | false | 6,366 | r | partido_analise <- tabPanel(title = "Análise Partidária",
value = "partidos",
br(), hr(),
column(width = 4,
column(width = 6,
pickerInput(inputId = "partido_ano",
label = "Ano",
choices = anos,
selected = 2014,
options = list(`live-search` = TRUE))
),
column(width = 6,
pickerInput(inputId = "partido_cargo",
label = "Cargo",
choices = cargos,
selected = 1,
options = list(`live-search` = TRUE))
)
),
column(width = 8,
br(),
actionBttn(inputId = "partidos_gerar_visualizacoes",
label = "Selecionar",
style = "fill",
color = "success",
icon = icon("check"))
),
##-- Outputs ----
column(width = 12,
conditionalPanel(condition = "input.partidos_gerar_visualizacoes > 0",
hr(),
HTML("<center>"),
pickerInput(inputId = "partido_partido_donuts",
label = "Partido",
choices = levels(factor(x = c("Todos os partidos", partidos),
levels = c("Todos os partidos", partidos))),
selected = "Todos os partidos",
options = list(`live-search` = TRUE,
`none-selected-text` = "Nenhum partido selecionado")),
HTML("</center>")
),
column(width = 4,
conditionalPanel(condition = "input.partidos_gerar_visualizacoes > 0",
HTML("<center><h1>Proporção de gênero</h1></center>"),
column(width = 12,
withSpinner(plotlyOutput("donut_sexo"), type = 6)
)
)
),
column(width = 4,
conditionalPanel(condition = "input.partidos_gerar_visualizacoes > 0",
HTML("<center><h1>Proporção de raça</h1></center>"),
column(width = 12,
withSpinner(plotlyOutput("donut_raca"), type = 6)
)
)
),
column(width = 4,
conditionalPanel(condition = "input.partidos_gerar_visualizacoes > 0",
HTML("<center><h1>Grau de instrução</h1></center>"),
column(width = 12,
withSpinner(dataTableOutput("tabela"), type = 6)
)
)
)
)#,
# column(width = 4, offset = 4,
# conditionalPanel(condition = "input.partidos_gerar_visualizacoes > 0",
# hr(),
# HTML("<center>"),
# pickerInput(inputId = "partido_partido_mapa",
# label = "Partido",
# choices = levels(factor(x = c("Todos os partidos", partidos),
# levels = c("Todos os partidos", partidos))),
# selected = "Todos os partidos",
# options = list(`live-search` = TRUE,
# `none-selected-text` = "Nenhum partido selecionado")),
# HTML("</center>"),
# HTML("<center><h1>Mapa de Candidatos</h1></center>"),
# column(width = 12,
# withSpinner(leafletOutput("mapa_cand"), type = 6)
# )
# )
# )
) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ustring.R
\name{text_to_utf16}
\alias{text_to_utf16}
\title{Convert a UTF-8 text to UTF-16 ustring.}
\usage{
text_to_utf16(text, endian = "big")
}
\arguments{
\item{text}{a scalar character}
\item{endian}{little endian or big endian?}
}
\description{
Convert a UTF-8 text to UTF-16 ustring.
}
| /man/text_to_utf16.Rd | permissive | randy3k/ustring | R | false | true | 372 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ustring.R
\name{text_to_utf16}
\alias{text_to_utf16}
\title{Convert a UTF-8 text to UTF-16 ustring.}
\usage{
text_to_utf16(text, endian = "big")
}
\arguments{
\item{text}{a scalar character}
\item{endian}{little endian or big endian?}
}
\description{
Convert a UTF-8 text to UTF-16 ustring.
}
|
# Default line plot
ggplot(data_time_long, aes(x = year, y = value / 1000000)) +
geom_line(aes(color = category)) +
labs(title = "Number of CAT and DOG lovers in 2000 - 2020") +
ylab("'000 000 of respondents")
# Basic line plot with 2 lines
ggplot(data_time_long, aes(x = year, y = value / 1000000)) +
geom_line(aes(color = category), size = 1.5) +
scale_colour_manual(name = "Animal",
labels = c(value_dog = "Dog", value_cat = "Cat"),
values = c(value_dog = "royalblue4", value_cat = "deeppink4")) +
labs(title = "Number of CAT and DOG lovers in 2000 - 2020",
subtitle = "'000 000 of respondents") +
theme(
plot.title = element_text(size = 18, margin = margin(10, 0, 0, 0)),
plot.subtitle = element_text(size = 12, margin = margin(10, 0, 30, 0), color = "gray"),
panel.background = element_rect(fill = NA),
panel.grid.major = element_blank(),
axis.line = element_line(color = "gray35"),
axis.ticks = element_blank(),
axis.title = element_blank(),
axis.text = element_text(size = 10, color = "gray35"),
axis.text.x = element_text(margin = margin(5, 0, 0, 0)),
axis.text.y = element_text(margin = margin(0, 5, 0, 0)),
legend.key = element_rect(fill = NA)
)
# Line plot with 2 lines and labels
ggplot(data_time_long, aes(x = year, y = value / 1000000)) +
geom_line(aes(color = category), size = 1.5) +
geom_text(aes(label = label, color = category), hjust = 1.2, vjust = 1.2) +
scale_colour_manual(values = c(value_dog = "royalblue4", value_cat = "deeppink4")) +
labs(title = "Number of CAT and DOG lovers in 2000 - 2020",
subtitle = "'000 000 of respondents") +
theme(
plot.title = element_text(size = 18, margin = margin(10, 0, 0, 0)),
plot.subtitle = element_text(size = 12, margin = margin(10, 0, 30, 0), color = "gray"),
plot.margin = margin(0, 50, 10, 10),
panel.background = element_rect(fill = NA),
panel.grid.major = element_blank(),
axis.line = element_line(color = "gray35"),
axis.ticks = element_blank(),
axis.title = element_blank(),
axis.text = element_text(size = 10, color = "gray35"),
axis.text.x = element_text(margin = margin(5, 0, 0, 0)),
axis.text.y = element_text(margin = margin(0, 5, 0, 0)),
legend.position = "none"
)
| /05_line_plot_labels.R | no_license | axc836/data_visualisation | R | false | false | 2,322 | r | # Default line plot
ggplot(data_time_long, aes(x = year, y = value / 1000000)) +
geom_line(aes(color = category)) +
labs(title = "Number of CAT and DOG lovers in 2000 - 2020") +
ylab("'000 000 of respondents")
# Basic line plot with 2 lines
ggplot(data_time_long, aes(x = year, y = value / 1000000)) +
geom_line(aes(color = category), size = 1.5) +
scale_colour_manual(name = "Animal",
labels = c(value_dog = "Dog", value_cat = "Cat"),
values = c(value_dog = "royalblue4", value_cat = "deeppink4")) +
labs(title = "Number of CAT and DOG lovers in 2000 - 2020",
subtitle = "'000 000 of respondents") +
theme(
plot.title = element_text(size = 18, margin = margin(10, 0, 0, 0)),
plot.subtitle = element_text(size = 12, margin = margin(10, 0, 30, 0), color = "gray"),
panel.background = element_rect(fill = NA),
panel.grid.major = element_blank(),
axis.line = element_line(color = "gray35"),
axis.ticks = element_blank(),
axis.title = element_blank(),
axis.text = element_text(size = 10, color = "gray35"),
axis.text.x = element_text(margin = margin(5, 0, 0, 0)),
axis.text.y = element_text(margin = margin(0, 5, 0, 0)),
legend.key = element_rect(fill = NA)
)
# Line plot with 2 lines and labels
ggplot(data_time_long, aes(x = year, y = value / 1000000)) +
geom_line(aes(color = category), size = 1.5) +
geom_text(aes(label = label, color = category), hjust = 1.2, vjust = 1.2) +
scale_colour_manual(values = c(value_dog = "royalblue4", value_cat = "deeppink4")) +
labs(title = "Number of CAT and DOG lovers in 2000 - 2020",
subtitle = "'000 000 of respondents") +
theme(
plot.title = element_text(size = 18, margin = margin(10, 0, 0, 0)),
plot.subtitle = element_text(size = 12, margin = margin(10, 0, 30, 0), color = "gray"),
plot.margin = margin(0, 50, 10, 10),
panel.background = element_rect(fill = NA),
panel.grid.major = element_blank(),
axis.line = element_line(color = "gray35"),
axis.ticks = element_blank(),
axis.title = element_blank(),
axis.text = element_text(size = 10, color = "gray35"),
axis.text.x = element_text(margin = margin(5, 0, 0, 0)),
axis.text.y = element_text(margin = margin(0, 5, 0, 0)),
legend.position = "none"
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Input.r
\name{LstepCE1}
\alias{LstepCE1}
\title{A management procedure that incrementally adjusts the TAC according to the
mean length of recent catches.}
\usage{
LstepCE1(x, Data, reps = 100, yrsmth = 5, xx = 0, stepsz = 0.05,
llim = c(0.96, 0.98, 1.05))
}
\arguments{
\item{x}{A position in data-limited methods data object}
\item{Data}{A data-limited methods data object}
\item{reps}{The number of effort samples}
\item{yrsmth}{Years over which to smooth recent estimates of surplus
production}
\item{xx}{Parameter controlling the fraction of mean catch to start using in
first year}
\item{stepsz}{Parameter controlling the size of the effort update increment.}
\item{llim}{A vector of length reference points that determine the
conditions for increasing, maintaining or reducing the effort.}
}
\value{
A numeric vector of input controls
}
\description{
A effort-based version of least biologically precautionary of four adaptive
length-based MPs proposed by Geromont and Butterworth 2014. Tested by
Carruthers et al. 2015
}
\author{
T. Carruthers
}
| /man/LstepCE1.Rd | no_license | Lijiuqi/DLMtool | R | false | true | 1,137 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Input.r
\name{LstepCE1}
\alias{LstepCE1}
\title{A management procedure that incrementally adjusts the TAC according to the
mean length of recent catches.}
\usage{
LstepCE1(x, Data, reps = 100, yrsmth = 5, xx = 0, stepsz = 0.05,
llim = c(0.96, 0.98, 1.05))
}
\arguments{
\item{x}{A position in data-limited methods data object}
\item{Data}{A data-limited methods data object}
\item{reps}{The number of effort samples}
\item{yrsmth}{Years over which to smooth recent estimates of surplus
production}
\item{xx}{Parameter controlling the fraction of mean catch to start using in
first year}
\item{stepsz}{Parameter controlling the size of the effort update increment.}
\item{llim}{A vector of length reference points that determine the
conditions for increasing, maintaining or reducing the effort.}
}
\value{
A numeric vector of input controls
}
\description{
A effort-based version of least biologically precautionary of four adaptive
length-based MPs proposed by Geromont and Butterworth 2014. Tested by
Carruthers et al. 2015
}
\author{
T. Carruthers
}
|
# December 16, 2018.
# Luis Da Silva.
library(ergm)
# Start by reading in the adjancecy matrix showing relationships between pairs of characters
ga.mat<-as.matrix(read.table("Grey's Anatomy - sociomat.tsv", sep="\t",
header=T, row.names=1, quote="\""))
# check it:
ga.mat
# Next import the attribute file
ga.atts <- read.table("Grey's Anatomy - attributes.tsv", sep="\t",
header=T, quote="\"",
stringsAsFactors=F, strip.white=T, as.is=T)
# check it and familiarise yourself with the attributes available:
ga.atts
ga.atts$sex = as.logical(model.matrix(~ sex, data=ga.atts)[,-1])
# create the network object to use for the coursework tasks
ga.net <- network(ga.mat, vertex.attr=ga.atts,
vertex.attrnames=colnames(ga.atts),
directed=F, hyper=F, loops=F, multiple=F, bipartite=F)
# check it:
ga.net
# Visualise the network, colour nodes based gender and include labels (names of characters)
plot(ga.net, vertex.col=c("blue","red")[1+(get.vertex.attribute(ga.net, "sex")==0)],
label=get.vertex.attribute(ga.net, "name"), label.cex=.7) # label.cex determines the label size
# ERGM
model1 <- ergm(ga.net~edges+nodematch("sex")+nodematch("position")+degree(c(1)))
summary(model1)
# simulation----
model1.sim <- simulate(model1,nsim=10)
class(model1.sim)
summary(model1.sim)
plot(model1.sim[[3]], vertex.col=c("blue","red")[1+(get.vertex.attribute(ga.net, "sex")==0)])
# Goodness of Fit----
model1.gof <- gof(model1~degree)
model1.gof
par(mfrow=c(2,1)) # Separate the plot window into a 2 by 1 orientation
plot(model1.gof)
dev.off()
| /Social Network Analysis/Greys anatomy social networks.R | permissive | fitrialif/Playground | R | false | false | 1,645 | r | # December 16, 2018.
# Luis Da Silva.
library(ergm)
# Start by reading in the adjancecy matrix showing relationships between pairs of characters
ga.mat<-as.matrix(read.table("Grey's Anatomy - sociomat.tsv", sep="\t",
header=T, row.names=1, quote="\""))
# check it:
ga.mat
# Next import the attribute file
ga.atts <- read.table("Grey's Anatomy - attributes.tsv", sep="\t",
header=T, quote="\"",
stringsAsFactors=F, strip.white=T, as.is=T)
# check it and familiarise yourself with the attributes available:
ga.atts
ga.atts$sex = as.logical(model.matrix(~ sex, data=ga.atts)[,-1])
# create the network object to use for the coursework tasks
ga.net <- network(ga.mat, vertex.attr=ga.atts,
vertex.attrnames=colnames(ga.atts),
directed=F, hyper=F, loops=F, multiple=F, bipartite=F)
# check it:
ga.net
# Visualise the network, colour nodes based gender and include labels (names of characters)
plot(ga.net, vertex.col=c("blue","red")[1+(get.vertex.attribute(ga.net, "sex")==0)],
label=get.vertex.attribute(ga.net, "name"), label.cex=.7) # label.cex determines the label size
# ERGM
model1 <- ergm(ga.net~edges+nodematch("sex")+nodematch("position")+degree(c(1)))
summary(model1)
# simulation----
model1.sim <- simulate(model1,nsim=10)
class(model1.sim)
summary(model1.sim)
plot(model1.sim[[3]], vertex.col=c("blue","red")[1+(get.vertex.attribute(ga.net, "sex")==0)])
# Goodness of Fit----
model1.gof <- gof(model1~degree)
model1.gof
par(mfrow=c(2,1)) # Separate the plot window into a 2 by 1 orientation
plot(model1.gof)
dev.off()
|
student<-c(1:10)
score<-c(85,61,85,67,74,72,70,75,59,66)
population<-data.frame(student,score)
population
u<-mean(population$score)
cat("Mean is ",u)
SD<-sd(population$score)
cat("Standard Deviation is ",SD)
n<-4
mean<-c()
for(i in 1:10){
sample<-sample(population$score,n,replace=FALSE)
mean<-c(mean,mean(sample))
cat("Mean of sample :",sample," is ",mean[i],"\n")
}
mean
par(mar = rep(2, 4))
color=c("red","blue","red","orange","black")
hist(mean,plot=TRUE,main="Histogram of values of mean",border="black",col=color)
lines(density(mean)) #lines to the histogram
aproxmean=mean(mean)
aproxSD=sd(mean)
cat("The approximate mean and std deviation are: ",aproxmean," ",aproxSD)
| /4)Population.R | no_license | Dhirajdgandhi/RStudio-Basics | R | false | false | 712 | r | student<-c(1:10)
score<-c(85,61,85,67,74,72,70,75,59,66)
population<-data.frame(student,score)
population
u<-mean(population$score)
cat("Mean is ",u)
SD<-sd(population$score)
cat("Standard Deviation is ",SD)
n<-4
mean<-c()
for(i in 1:10){
sample<-sample(population$score,n,replace=FALSE)
mean<-c(mean,mean(sample))
cat("Mean of sample :",sample," is ",mean[i],"\n")
}
mean
par(mar = rep(2, 4))
color=c("red","blue","red","orange","black")
hist(mean,plot=TRUE,main="Histogram of values of mean",border="black",col=color)
lines(density(mean)) #lines to the histogram
aproxmean=mean(mean)
aproxSD=sd(mean)
cat("The approximate mean and std deviation are: ",aproxmean," ",aproxSD)
|
rm(list=ls())
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
require(xlsx)
require(tidyverse)
df <- read.xlsx2('emisie.xlsx', startRow = 6, endRow = 181, sheetIndex = 1)
df <- df[-1,]
df2 <- data.frame(Country = df$Year, NUTS2 = df$X., value = df$X2017)
df2[df2$Country=='',1]<-NA
df2 <- df2 %>% fill(Country) %>% filter(NUTS2 !='-Total-')
df2$value <- as.numeric(as.character(df2$value))
df2$Country <- as.character(df2$Country)
df2$NUTS2 <- trimws(as.character(df2$NUTS2))
extra <- c("Romania", 'Ilfov', df2$value[df2$NUTS2=='Bucuresti'])
df2 <- rbind(df2,extra)
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_AUT_1_sf.rds",
# "AUT_adm1.rds", mode = "wb")
at = readRDS("AUT_adm1.rds")
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_CZE_1_sf.rds",
# "CZK_adm1.rds", mode = "wb")
cz = readRDS("CZK_adm1.rds")
cz$NAME_1[c(3,10)] <- 'Zapadocesky'
cz$NAME_1[c(4,5,9)]<- 'Vychodocesky'
cz$NAME_1[c(6,13)]<- 'Severocesky'
cz$NAME_1[c(7,8,14)]<- 'Severomoravsky'
cz$NAME_1[c(11)]<- 'Praha'
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_HUN_1_sf.rds",
# "HUN_adm1.rds", mode = "wb")
hu = readRDS("HUN_adm1.rds")
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_POL_1_sf.rds",
# "PLN_adm1.rds", mode = "wb")
pl = readRDS("PLN_adm1.rds")
pl$NAME_1[pl$NAME_1=="Zachodniopomorskie"] <- "Zachodnio-Pomorskie"
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_SVK_1_sf.rds",
# "SVK_adm1.rds", mode = "wb")
sk = readRDS("SVK_adm1.rds")
sk$VARNAME_1[is.na(sk$VARNAME_1)]<-'Presov'
sk$NAME_1 <- sk$VARNAME_1
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_SVN_1_sf.rds",
# "SVN_adm1.rds", mode = "wb")
sv = readRDS("SVN_adm1.rds")
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_HRV_1_sf.rds",
# "HRV_adm1.rds", mode = "wb")
hr = readRDS("HRV_adm1.rds")
hro <- hr %>% select(-geometry)
hr$VARNAME_1[is.na(hr$VARNAME_1)]<- 'Grad zagreb'
hr$VARNAME_1[hr$VARNAME_1=='Slavonski Brod-Posavina']<- 'Slavonski Brod-Posav'
hr$VARNAME_1[hr$VARNAME_1=='Split-Dalmacia']<- 'Split-Dalmatija'
hr$VARNAME_1[hr$VARNAME_1=='Varaždin|Varasd']<- 'varazdin'
hr$NAME_1 <- hr$VARNAME_1
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_ROU_1_sf.rds",
# "ROU_adm1.rds", mode = "wb")
ro = readRDS("ROU_adm1.rds")
ro$NAME_1[ro$NAME_1=='Bucharest']<- 'Bucuresti'
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_SRB_1_sf.rds",
# "SRB_adm1.rds", mode = "wb")
sr = readRDS("SRB_adm1.rds")
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_XKO_1_sf.rds",
# "XKO_adm1.rds", mode = "wb")
ko = readRDS("XKO_adm1.rds")
dfm <- rbind(at,cz,hr,hu,pl,ro,sk,sr,sv,ko)
dfm2 <- dfm %>% select(GID_0, NAME_0, NAME_1, geometry)
require(fuzzyjoin)
library(stringi)
dfm2$NAME_1 <- stri_trans_general(dfm2$NAME_1, "Latin-ASCII")
df2$NUTS2 <- trimws(df2$NUTS2)
#dfm2$NAME_1[27]<- 'Istra'
df2$NUTS2 <- tolower(df2$NUTS2)
dfm2$NAME_1 <- tolower(dfm2$NAME_1)
pok <- dplyr::select(dfm2, -geometry)
pok1 <- df2 %>% stringdist_left_join(.,dfm2, by = c('NUTS2' = 'NAME_1' ),max_dist = 1)
pok2 <- pok1[!is.na(pok1$NAME_1),]
pok2 %>% filter(Country=='Croatia') %>% select(-geometry)->aaa
#pok2 <- distinct(pok2)
saveRDS(pok2,'mapdata.rds')
| /data.R | no_license | kubocery/mapy | R | false | false | 3,532 | r | rm(list=ls())
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
require(xlsx)
require(tidyverse)
df <- read.xlsx2('emisie.xlsx', startRow = 6, endRow = 181, sheetIndex = 1)
df <- df[-1,]
df2 <- data.frame(Country = df$Year, NUTS2 = df$X., value = df$X2017)
df2[df2$Country=='',1]<-NA
df2 <- df2 %>% fill(Country) %>% filter(NUTS2 !='-Total-')
df2$value <- as.numeric(as.character(df2$value))
df2$Country <- as.character(df2$Country)
df2$NUTS2 <- trimws(as.character(df2$NUTS2))
extra <- c("Romania", 'Ilfov', df2$value[df2$NUTS2=='Bucuresti'])
df2 <- rbind(df2,extra)
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_AUT_1_sf.rds",
# "AUT_adm1.rds", mode = "wb")
at = readRDS("AUT_adm1.rds")
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_CZE_1_sf.rds",
# "CZK_adm1.rds", mode = "wb")
cz = readRDS("CZK_adm1.rds")
cz$NAME_1[c(3,10)] <- 'Zapadocesky'
cz$NAME_1[c(4,5,9)]<- 'Vychodocesky'
cz$NAME_1[c(6,13)]<- 'Severocesky'
cz$NAME_1[c(7,8,14)]<- 'Severomoravsky'
cz$NAME_1[c(11)]<- 'Praha'
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_HUN_1_sf.rds",
# "HUN_adm1.rds", mode = "wb")
hu = readRDS("HUN_adm1.rds")
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_POL_1_sf.rds",
# "PLN_adm1.rds", mode = "wb")
pl = readRDS("PLN_adm1.rds")
pl$NAME_1[pl$NAME_1=="Zachodniopomorskie"] <- "Zachodnio-Pomorskie"
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_SVK_1_sf.rds",
# "SVK_adm1.rds", mode = "wb")
sk = readRDS("SVK_adm1.rds")
sk$VARNAME_1[is.na(sk$VARNAME_1)]<-'Presov'
sk$NAME_1 <- sk$VARNAME_1
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_SVN_1_sf.rds",
# "SVN_adm1.rds", mode = "wb")
sv = readRDS("SVN_adm1.rds")
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_HRV_1_sf.rds",
# "HRV_adm1.rds", mode = "wb")
hr = readRDS("HRV_adm1.rds")
hro <- hr %>% select(-geometry)
hr$VARNAME_1[is.na(hr$VARNAME_1)]<- 'Grad zagreb'
hr$VARNAME_1[hr$VARNAME_1=='Slavonski Brod-Posavina']<- 'Slavonski Brod-Posav'
hr$VARNAME_1[hr$VARNAME_1=='Split-Dalmacia']<- 'Split-Dalmatija'
hr$VARNAME_1[hr$VARNAME_1=='Varaždin|Varasd']<- 'varazdin'
hr$NAME_1 <- hr$VARNAME_1
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_ROU_1_sf.rds",
# "ROU_adm1.rds", mode = "wb")
ro = readRDS("ROU_adm1.rds")
ro$NAME_1[ro$NAME_1=='Bucharest']<- 'Bucuresti'
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_SRB_1_sf.rds",
# "SRB_adm1.rds", mode = "wb")
sr = readRDS("SRB_adm1.rds")
#download.file("https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_XKO_1_sf.rds",
# "XKO_adm1.rds", mode = "wb")
ko = readRDS("XKO_adm1.rds")
dfm <- rbind(at,cz,hr,hu,pl,ro,sk,sr,sv,ko)
dfm2 <- dfm %>% select(GID_0, NAME_0, NAME_1, geometry)
require(fuzzyjoin)
library(stringi)
dfm2$NAME_1 <- stri_trans_general(dfm2$NAME_1, "Latin-ASCII")
df2$NUTS2 <- trimws(df2$NUTS2)
#dfm2$NAME_1[27]<- 'Istra'
df2$NUTS2 <- tolower(df2$NUTS2)
dfm2$NAME_1 <- tolower(dfm2$NAME_1)
pok <- dplyr::select(dfm2, -geometry)
pok1 <- df2 %>% stringdist_left_join(.,dfm2, by = c('NUTS2' = 'NAME_1' ),max_dist = 1)
pok2 <- pok1[!is.na(pok1$NAME_1),]
pok2 %>% filter(Country=='Croatia') %>% select(-geometry)->aaa
#pok2 <- distinct(pok2)
saveRDS(pok2,'mapdata.rds')
|
#' @include internal.R ConservationProblem-proto.R
NULL
#' Evaluate solution connectivity
#'
#' Calculate the connectivity held within a solution to a conservation
#' planning [problem()].
#' This summary statistic evaluates the connectivity of a solution using
#' pair-wise connectivity values between combinations of planning units.
#'
#' @inheritParams add_connectivity_penalties
#' @inheritParams eval_cost_summary
#'
#' @details`
#' This summary statistic is comparable to the `Connectivity_In` metric
#' reported by the
#' [*Marxan* software](https://marxansolutions.org) (Ball *et al.* 2009).
#' It is calculated using the same equations used to penalize solutions
#' with connectivity data (i.e. [add_connectivity_penalties()]).
#' Specifically, it is calculated as the sum of the pair-wise connectivity
#' values in the argument to `data`, weighted by the value of the planning
#' units in the solution.
#'
#' @inheritSection eval_cost_summary Solution format
#' @inheritSection add_connectivity_penalties Data format
#'
#' @return
#' [tibble::tibble()] object describing the connectivity of the
#' solution.
#' It contains the following columns:
#'
#' \describe{
#'
#' \item{summary}{`character` description of the summary statistic.
#' The statistic associated with the `"overall"` value
#' in this column is calculated using the entire solution
#' (including all management zones if there are multiple zones).
#' If multiple management zones are present, then summary statistics
#' are also provided for each zone separately
#' (indicated using zone names).}
#'
#' \item{connectivity}{`numeric` connectivity value.
#' Greater values correspond to solutions associated with greater
#' connectivity.
#' Thus conservation planning exercises typically prefer solutions
#' with greater values.}
#'
#' }
#'
#' @references
#' Ball IR, Possingham HP, and Watts M (2009) *Marxan and relatives:
#' Software for spatial conservation prioritisation* in Spatial conservation
#' prioritisation: Quantitative methods and computational tools. Eds Moilanen
#' A, Wilson KA, and Possingham HP. Oxford University Press, Oxford, UK.
#'
#' @seealso [problem()], [summaries], [add_connectivity_penalties()].
#'
#' @examples
#' \dontrun{
#' # set seed for reproducibility
#' set.seed(500)
#'
#' # load data
#' data(sim_pu_raster, sim_pu_sf, sim_features,
#' sim_pu_zones_sf, sim_features_zones)
#'
#' # build minimal conservation problem with raster data
#' p1 <- problem(sim_pu_raster, sim_features) %>%
#' add_min_set_objective() %>%
#' add_relative_targets(0.1) %>%
#' add_binary_decisions() %>%
#' add_default_solver(verbose = FALSE)
#'
#' # solve the problem
#' s1 <- solve(p1)
#'
#' # print solution
#' print(s1)
#'
#' # plot solution
#' plot(s1, main = "solution", axes = FALSE, box = FALSE)
#'
#' # simulate a connectivity matrix to describe the relative strength
#' # of connectivity between different planning units
#' # for brevity, we will use cost data here so that pairs
#' # of adjacent planning units with higher cost values will have a
#' # higher connectivity value
#' # (but see ?connectivity_matrix for more information)
#' cm1 <- connectivity_matrix(sim_pu_raster, sim_pu_raster)
#'
#' # calculate connectivity associated with the solution
#' r1 <- eval_connectivity_summary(p1, s1, data = cm1)
#' print(r1)
#'
#' # build minimal conservation problem with polygon (sf) data
#' p2 <- problem(sim_pu_sf, sim_features, cost_column = "cost") %>%
#' add_min_set_objective() %>%
#' add_relative_targets(0.1) %>%
#' add_binary_decisions() %>%
#' add_default_solver(verbose = FALSE)
#'
#' # solve the problem
#' s2 <- solve(p2)
#'
#' # print first six rows of the attribute table
#' print(head(s2))
#'
#' # plot solution
#' plot(s2[, "solution_1"])
#'
#' # simulate connectivity matrix
#' # here, we will generate connectivity values randomly
#' # between all pairs of planning units
#' cm2 <- matrix(runif(nrow(sim_pu_sf) ^ 2), nrow = nrow(sim_pu_sf))
#'
#' # calculate connectivity associated with the solution
#' r2 <- eval_connectivity_summary(p2, s2[, "solution_1"], data = cm2)
#' print(r2)
#'
#' # build multi-zone conservation problem with polygon (sf) data
#' p3 <- problem(sim_pu_zones_sf, sim_features_zones,
#' cost_column = c("cost_1", "cost_2", "cost_3")) %>%
#' add_min_set_objective() %>%
#' add_relative_targets(matrix(runif(15, 0.1, 0.2), nrow = 5,
#' ncol = 3)) %>%
#' add_binary_decisions() %>%
#' add_default_solver(verbose = FALSE)
#'
#' # solve the problem
#' s3 <- solve(p3)
#'
#' # print first six rows of the attribute table
#' print(head(s3))
#'
#' # create new column representing the zone id that each planning unit
#' # was allocated to in the solution
#' s3$solution <- category_vector(
#' s3[, c("solution_1_zone_1", "solution_1_zone_2", "solution_1_zone_3")])
#' s3$solution <- factor(s3$solution)
#'
#' # plot solution
#' plot(s3[, "solution"])
#'
#' # simulate connectivity matrix
#' # here, we will add a new column to sim_pu_zones_sf with
#' # randomly simulated values and create a connectivity matrix
#' # based on the average simulated values of adjacent planning units
#' sim_pu_zones_sf$con <- runif(nrow(sim_pu_zones_sf))
#' cm3 <- connectivity_matrix(sim_pu_zones_sf, "con")
#'
#' # calculate connectivity associated with the solution
#' r3 <- eval_connectivity_summary(
#' p3, s3[, c("solution_1_zone_1", "solution_1_zone_2", "solution_1_zone_3")],
#' data = cm3)
#' print(r3)
#'
#' }
#' @name eval_connectivity_summary
#'
#' @exportMethod eval_connectivity_summary
#'
#' @aliases eval_connectivity_summary,ConservationProblem,ANY,ANY,Matrix-method eval_connectivity_summary,ConservationProblem,ANY,ANY,matrix-method eval_connectivity_summary,ConservationProblem,ANY,ANY,dgCMatrix-method eval_connectivity_summary,ConservationProblem,ANY,ANY,data.frame-method eval_connectivity_summary,ConservationProblem,ANY,ANY,array-method
NULL
#' @export
methods::setGeneric("eval_connectivity_summary",
signature = methods::signature("x", "solution", "zones", "data"),
function(x, solution, zones = diag(number_of_zones(x)), data)
standardGeneric("eval_connectivity_summary"))
#' @name eval_connectivity_summary
#' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,matrix}(x, solution, zones, data)
#' @rdname eval_connectivity_summary
methods::setMethod("eval_connectivity_summary",
methods::signature("ConservationProblem", "ANY", "ANY", "matrix"),
function(x, solution, zones, data) {
eval_connectivity_summary(x, solution, zones, methods::as(data, "dgCMatrix"))
})
#' @name eval_connectivity_summary
#' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,Matrix}(x, solution, zones, data)
#' @rdname eval_connectivity_summary
methods::setMethod("eval_connectivity_summary",
methods::signature("ConservationProblem", "ANY", "ANY", "Matrix"),
function(x, solution, zones, data) {
eval_connectivity_summary(x, solution, zones, methods::as(data, "dgCMatrix"))
})
#' @name eval_connectivity_summary
#' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,data.frame}(x, solution, zones, data)
#' @rdname eval_connectivity_summary
methods::setMethod("eval_connectivity_summary",
methods::signature("ConservationProblem", "ANY", "ANY", "data.frame"),
function(x, solution, zones, data) {
eval_connectivity_summary(
x, solution, zones, marxan_boundary_data_to_matrix(x, data))
})
#' @name eval_connectivity_summary
#' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,dgCMatrix}(x, solution, zones, data)
#' @rdname eval_connectivity_summary
methods::setMethod("eval_connectivity_summary",
methods::signature("ConservationProblem", "ANY", "ANY", "dgCMatrix"),
function(x, solution, zones, data) {
# assert valid arguments
assertthat::assert_that(
inherits(x, "ConservationProblem"),
inherits(zones, c("matrix", "Matrix")),
nrow(zones) == ncol(zones), is.numeric(as.vector(zones)),
all(is.finite(as.vector(zones))),
is.numeric(data@x), ncol(data) == nrow(data),
max(zones) <= 1, min(zones) >= -1,
number_of_total_units(x) == ncol(data),
number_of_zones(x) == ncol(zones),
all(is.finite(data@x)))
# coerce zones to matrix
zones <- as.matrix(zones)
indices <- x$planning_unit_indices()
data <- data[indices, indices, drop = FALSE]
# convert zones & dgCMatrix data to list of sparse matrices
m <- list()
for (z1 in seq_len(ncol(zones))) {
m[[z1]] <- list()
for (z2 in seq_len(nrow(zones))) {
m[[z1]][[z2]] <- data * zones[z1, z2]
}
}
# calculate connectivity
internal_eval_connectivity_summary(
x, planning_unit_solution_status(x, solution), m, data)
})
#' @name eval_connectivity_summary
#' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,array}(x, solution, zones, data)
#' @rdname eval_connectivity_summary
methods::setMethod("eval_connectivity_summary",
methods::signature("ConservationProblem", "ANY", "ANY", "array"),
function(x, solution, zones, data) {
# assert valid arguments
assertthat::assert_that(inherits(x, "ConservationProblem"),
is.null(zones),
is.array(data), length(dim(data)) == 4,
dim(data)[1] == number_of_total_units(x),
dim(data)[2] == number_of_total_units(x),
dim(data)[3] == number_of_zones(x),
dim(data)[4] == number_of_zones(x),
all(is.finite(data)))
# generate indices for units that are planning units
indices <- x$planning_unit_indices()
# convert array to list of list of sparseMatrix objects
m <- list()
for (z1 in seq_len(dim(data)[3])) {
m[[z1]] <- list()
for (z2 in seq_len(dim(data)[4])) {
m[[z1]][[z2]] <-
methods::as(data[indices, indices, z1, z2], "dgCMatrix")
}
}
# calculate connectivity
internal_eval_connectivity_summary(
x, planning_unit_solution_status(x, solution), m, NULL)
})
internal_eval_connectivity_summary <- function(
x, solution, zone_scaled_data, data) {
# assert valid arguments
assertthat::assert_that(
inherits(x, "ConservationProblem"),
is.matrix(solution),
is.list(zone_scaled_data),
inherits(data, c("dgCMatrix", "NULL")))
# manually coerce NA values in solution to 0
solution[!is.finite(solution)] <- 0
# calculate overall connectivity
v <- rcpp_connectivity(zone_scaled_data, solution)
# main calculations
if (number_of_zones(x) == 1) {
## store result for single zone
out <- tibble::tibble(summary = "overall", connectivity = v)
} else {
## calculate connectivity for each zone separately
zv <- vapply(seq_len(ncol(solution)), FUN.VALUE = numeric(1), function(z) {
## prepare data the z'th zone
if (is.null(data)) {
zd <- methods::as(zone_scaled_data[[z]][[z]], "dgCMatrix")
} else {
zd <- data
}
## calculate connectivity
rcpp_connectivity(list(list(zd)), solution[, z, drop = FALSE])
})
## store results for multiple zones
out <- tibble::tibble(
summary = c("overall", zone_names(x)), connectivity = c(v, zv))
}
# return result
out
}
| /R/eval_connectivity_summary.R | no_license | diminera/prioritizr | R | false | false | 11,358 | r | #' @include internal.R ConservationProblem-proto.R
NULL
#' Evaluate solution connectivity
#'
#' Calculate the connectivity held within a solution to a conservation
#' planning [problem()].
#' This summary statistic evaluates the connectivity of a solution using
#' pair-wise connectivity values between combinations of planning units.
#'
#' @inheritParams add_connectivity_penalties
#' @inheritParams eval_cost_summary
#'
#' @details`
#' This summary statistic is comparable to the `Connectivity_In` metric
#' reported by the
#' [*Marxan* software](https://marxansolutions.org) (Ball *et al.* 2009).
#' It is calculated using the same equations used to penalize solutions
#' with connectivity data (i.e. [add_connectivity_penalties()]).
#' Specifically, it is calculated as the sum of the pair-wise connectivity
#' values in the argument to `data`, weighted by the value of the planning
#' units in the solution.
#'
#' @inheritSection eval_cost_summary Solution format
#' @inheritSection add_connectivity_penalties Data format
#'
#' @return
#' [tibble::tibble()] object describing the connectivity of the
#' solution.
#' It contains the following columns:
#'
#' \describe{
#'
#' \item{summary}{`character` description of the summary statistic.
#' The statistic associated with the `"overall"` value
#' in this column is calculated using the entire solution
#' (including all management zones if there are multiple zones).
#' If multiple management zones are present, then summary statistics
#' are also provided for each zone separately
#' (indicated using zone names).}
#'
#' \item{connectivity}{`numeric` connectivity value.
#' Greater values correspond to solutions associated with greater
#' connectivity.
#' Thus conservation planning exercises typically prefer solutions
#' with greater values.}
#'
#' }
#'
#' @references
#' Ball IR, Possingham HP, and Watts M (2009) *Marxan and relatives:
#' Software for spatial conservation prioritisation* in Spatial conservation
#' prioritisation: Quantitative methods and computational tools. Eds Moilanen
#' A, Wilson KA, and Possingham HP. Oxford University Press, Oxford, UK.
#'
#' @seealso [problem()], [summaries], [add_connectivity_penalties()].
#'
#' @examples
#' \dontrun{
#' # set seed for reproducibility
#' set.seed(500)
#'
#' # load data
#' data(sim_pu_raster, sim_pu_sf, sim_features,
#' sim_pu_zones_sf, sim_features_zones)
#'
#' # build minimal conservation problem with raster data
#' p1 <- problem(sim_pu_raster, sim_features) %>%
#' add_min_set_objective() %>%
#' add_relative_targets(0.1) %>%
#' add_binary_decisions() %>%
#' add_default_solver(verbose = FALSE)
#'
#' # solve the problem
#' s1 <- solve(p1)
#'
#' # print solution
#' print(s1)
#'
#' # plot solution
#' plot(s1, main = "solution", axes = FALSE, box = FALSE)
#'
#' # simulate a connectivity matrix to describe the relative strength
#' # of connectivity between different planning units
#' # for brevity, we will use cost data here so that pairs
#' # of adjacent planning units with higher cost values will have a
#' # higher connectivity value
#' # (but see ?connectivity_matrix for more information)
#' cm1 <- connectivity_matrix(sim_pu_raster, sim_pu_raster)
#'
#' # calculate connectivity associated with the solution
#' r1 <- eval_connectivity_summary(p1, s1, data = cm1)
#' print(r1)
#'
#' # build minimal conservation problem with polygon (sf) data
#' p2 <- problem(sim_pu_sf, sim_features, cost_column = "cost") %>%
#' add_min_set_objective() %>%
#' add_relative_targets(0.1) %>%
#' add_binary_decisions() %>%
#' add_default_solver(verbose = FALSE)
#'
#' # solve the problem
#' s2 <- solve(p2)
#'
#' # print first six rows of the attribute table
#' print(head(s2))
#'
#' # plot solution
#' plot(s2[, "solution_1"])
#'
#' # simulate connectivity matrix
#' # here, we will generate connectivity values randomly
#' # between all pairs of planning units
#' cm2 <- matrix(runif(nrow(sim_pu_sf) ^ 2), nrow = nrow(sim_pu_sf))
#'
#' # calculate connectivity associated with the solution
#' r2 <- eval_connectivity_summary(p2, s2[, "solution_1"], data = cm2)
#' print(r2)
#'
#' # build multi-zone conservation problem with polygon (sf) data
#' p3 <- problem(sim_pu_zones_sf, sim_features_zones,
#' cost_column = c("cost_1", "cost_2", "cost_3")) %>%
#' add_min_set_objective() %>%
#' add_relative_targets(matrix(runif(15, 0.1, 0.2), nrow = 5,
#' ncol = 3)) %>%
#' add_binary_decisions() %>%
#' add_default_solver(verbose = FALSE)
#'
#' # solve the problem
#' s3 <- solve(p3)
#'
#' # print first six rows of the attribute table
#' print(head(s3))
#'
#' # create new column representing the zone id that each planning unit
#' # was allocated to in the solution
#' s3$solution <- category_vector(
#' s3[, c("solution_1_zone_1", "solution_1_zone_2", "solution_1_zone_3")])
#' s3$solution <- factor(s3$solution)
#'
#' # plot solution
#' plot(s3[, "solution"])
#'
#' # simulate connectivity matrix
#' # here, we will add a new column to sim_pu_zones_sf with
#' # randomly simulated values and create a connectivity matrix
#' # based on the average simulated values of adjacent planning units
#' sim_pu_zones_sf$con <- runif(nrow(sim_pu_zones_sf))
#' cm3 <- connectivity_matrix(sim_pu_zones_sf, "con")
#'
#' # calculate connectivity associated with the solution
#' r3 <- eval_connectivity_summary(
#' p3, s3[, c("solution_1_zone_1", "solution_1_zone_2", "solution_1_zone_3")],
#' data = cm3)
#' print(r3)
#'
#' }
#' @name eval_connectivity_summary
#'
#' @exportMethod eval_connectivity_summary
#'
#' @aliases eval_connectivity_summary,ConservationProblem,ANY,ANY,Matrix-method eval_connectivity_summary,ConservationProblem,ANY,ANY,matrix-method eval_connectivity_summary,ConservationProblem,ANY,ANY,dgCMatrix-method eval_connectivity_summary,ConservationProblem,ANY,ANY,data.frame-method eval_connectivity_summary,ConservationProblem,ANY,ANY,array-method
NULL
#' @export
methods::setGeneric("eval_connectivity_summary",
signature = methods::signature("x", "solution", "zones", "data"),
function(x, solution, zones = diag(number_of_zones(x)), data)
standardGeneric("eval_connectivity_summary"))
#' @name eval_connectivity_summary
#' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,matrix}(x, solution, zones, data)
#' @rdname eval_connectivity_summary
methods::setMethod("eval_connectivity_summary",
methods::signature("ConservationProblem", "ANY", "ANY", "matrix"),
function(x, solution, zones, data) {
eval_connectivity_summary(x, solution, zones, methods::as(data, "dgCMatrix"))
})
#' @name eval_connectivity_summary
#' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,Matrix}(x, solution, zones, data)
#' @rdname eval_connectivity_summary
methods::setMethod("eval_connectivity_summary",
methods::signature("ConservationProblem", "ANY", "ANY", "Matrix"),
function(x, solution, zones, data) {
eval_connectivity_summary(x, solution, zones, methods::as(data, "dgCMatrix"))
})
#' @name eval_connectivity_summary
#' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,data.frame}(x, solution, zones, data)
#' @rdname eval_connectivity_summary
methods::setMethod("eval_connectivity_summary",
methods::signature("ConservationProblem", "ANY", "ANY", "data.frame"),
function(x, solution, zones, data) {
eval_connectivity_summary(
x, solution, zones, marxan_boundary_data_to_matrix(x, data))
})
#' @name eval_connectivity_summary
#' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,dgCMatrix}(x, solution, zones, data)
#' @rdname eval_connectivity_summary
methods::setMethod("eval_connectivity_summary",
methods::signature("ConservationProblem", "ANY", "ANY", "dgCMatrix"),
function(x, solution, zones, data) {
# assert valid arguments
assertthat::assert_that(
inherits(x, "ConservationProblem"),
inherits(zones, c("matrix", "Matrix")),
nrow(zones) == ncol(zones), is.numeric(as.vector(zones)),
all(is.finite(as.vector(zones))),
is.numeric(data@x), ncol(data) == nrow(data),
max(zones) <= 1, min(zones) >= -1,
number_of_total_units(x) == ncol(data),
number_of_zones(x) == ncol(zones),
all(is.finite(data@x)))
# coerce zones to matrix
zones <- as.matrix(zones)
indices <- x$planning_unit_indices()
data <- data[indices, indices, drop = FALSE]
# convert zones & dgCMatrix data to list of sparse matrices
m <- list()
for (z1 in seq_len(ncol(zones))) {
m[[z1]] <- list()
for (z2 in seq_len(nrow(zones))) {
m[[z1]][[z2]] <- data * zones[z1, z2]
}
}
# calculate connectivity
internal_eval_connectivity_summary(
x, planning_unit_solution_status(x, solution), m, data)
})
#' @name eval_connectivity_summary
#' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,array}(x, solution, zones, data)
#' @rdname eval_connectivity_summary
methods::setMethod("eval_connectivity_summary",
methods::signature("ConservationProblem", "ANY", "ANY", "array"),
function(x, solution, zones, data) {
# assert valid arguments
assertthat::assert_that(inherits(x, "ConservationProblem"),
is.null(zones),
is.array(data), length(dim(data)) == 4,
dim(data)[1] == number_of_total_units(x),
dim(data)[2] == number_of_total_units(x),
dim(data)[3] == number_of_zones(x),
dim(data)[4] == number_of_zones(x),
all(is.finite(data)))
# generate indices for units that are planning units
indices <- x$planning_unit_indices()
# convert array to list of list of sparseMatrix objects
m <- list()
for (z1 in seq_len(dim(data)[3])) {
m[[z1]] <- list()
for (z2 in seq_len(dim(data)[4])) {
m[[z1]][[z2]] <-
methods::as(data[indices, indices, z1, z2], "dgCMatrix")
}
}
# calculate connectivity
internal_eval_connectivity_summary(
x, planning_unit_solution_status(x, solution), m, NULL)
})
internal_eval_connectivity_summary <- function(
x, solution, zone_scaled_data, data) {
# assert valid arguments
assertthat::assert_that(
inherits(x, "ConservationProblem"),
is.matrix(solution),
is.list(zone_scaled_data),
inherits(data, c("dgCMatrix", "NULL")))
# manually coerce NA values in solution to 0
solution[!is.finite(solution)] <- 0
# calculate overall connectivity
v <- rcpp_connectivity(zone_scaled_data, solution)
# main calculations
if (number_of_zones(x) == 1) {
## store result for single zone
out <- tibble::tibble(summary = "overall", connectivity = v)
} else {
## calculate connectivity for each zone separately
zv <- vapply(seq_len(ncol(solution)), FUN.VALUE = numeric(1), function(z) {
## prepare data the z'th zone
if (is.null(data)) {
zd <- methods::as(zone_scaled_data[[z]][[z]], "dgCMatrix")
} else {
zd <- data
}
## calculate connectivity
rcpp_connectivity(list(list(zd)), solution[, z, drop = FALSE])
})
## store results for multiple zones
out <- tibble::tibble(
summary = c("overall", zone_names(x)), connectivity = c(v, zv))
}
# return result
out
}
|
\name{cacc.cut.points}
\alias{cacc.cut.points}
\title{Compute cut points.}
\usage{
cacc.cut.points(values, classes)
}
\arguments{
\item{values}{vector with the continuous values to
discretize.}
\item{classes}{vector with the classes associated with
the values in \code{values}.}
}
\value{
Vector with the cut points.
}
\description{
Compute the cut points for the given values.
}
| /man/cacc.cut.points.Rd | no_license | b3aver/uarray | R | false | false | 396 | rd | \name{cacc.cut.points}
\alias{cacc.cut.points}
\title{Compute cut points.}
\usage{
cacc.cut.points(values, classes)
}
\arguments{
\item{values}{vector with the continuous values to
discretize.}
\item{classes}{vector with the classes associated with
the values in \code{values}.}
}
\value{
Vector with the cut points.
}
\description{
Compute the cut points for the given values.
}
|
test_that("urls are valid", {
baseurl <- "https://data.baltimorecity.gov/api/views/dz54-2aru/"
testthat::expect_true(externalrdata:::url_exists(baseurl))
})
| /tests/testthat/test-get.R | no_license | jsta/externalrdata | R | false | false | 161 | r | test_that("urls are valid", {
baseurl <- "https://data.baltimorecity.gov/api/views/dz54-2aru/"
testthat::expect_true(externalrdata:::url_exists(baseurl))
})
|
# TODO depending on whether the file was generated from a description based on
# XMCDA v2 or v3, only one list is correct, either XMCDA_v2_TAG_FOR_FILENAME
# or XMCDA_v3_TAG_FOR_FILENAME: check them to determine which one should be
# adapted.
XMCDA_v2_TAG_FOR_FILENAME <- list(
# output name -> XMCDA v2 tag
necessaryDominance = "alternativesComparisons",
possibleDominance = "alternativesComparisons",
messages = "methodMessages"
)
XMCDA_v3_TAG_FOR_FILENAME <- list(
# output name -> XMCDA v3 tag
necessaryDominance = "alternativesMatrix",
possibleDominance = "alternativesMatrix",
messages = "programExecutionResult"
)
xmcda_v3_tag <- function(outputName){
return (XMCDA_v3_TAG_FOR_FILENAME[[outputName]])
}
xmcda_v2_tag <- function(outputName){
return (XMCDA_v2_TAG_FOR_FILENAME[[outputName]])
}
convert <- function(results, alternatives, programExecutionResult) {
necessaryResults <-.jnew("org/xmcda/XMCDA")
possibleResults <-.jnew("org/xmcda/XMCDA")
nMatrix <-J("org.xmcda.Factory")$alternativesMatrix() #.jnew("org/xmcda/AlternativesMatrix")
pMatrix <- J("org.xmcda.Factory")$alternativesMatrix()
preferenceMatrix <- results
int1 <-.jnew("java/lang/Integer", as.integer(1))
qValues <- .jnew("org/xmcda/QualifiedValues")
qValue <- .jnew("org/xmcda/QualifiedValue", .jcast(int1))
qValues$add(qValue)
for(i in 1:nrow(preferenceMatrix))
{
for(j in 1:ncol(preferenceMatrix))
{
initial <- .jcast(alternatives$get(alternatives$getIDs()$get(as.integer(i - 1))))
terminal <- .jcast(alternatives$get(alternatives$getIDs()$get(as.integer(j - 1))))
if(preferenceMatrix[i, j] == 'N')
{
nMatrix$put(.jnew("org/xmcda/utils/Coord", initial, terminal), qValues)
pMatrix$put(.jnew("org/xmcda/utils/Coord", initial, terminal), qValues)
}
else if(preferenceMatrix[i, j] == 'P')
{
pMatrix$put(.jnew("org/xmcda/utils/Coord", initial, terminal), qValues)
}
}
}
necessaryResults$alternativesMatricesList$add(nMatrix)
possibleResults$alternativesMatricesList$add(pMatrix)
return(list(necessaryDominance = necessaryResults, possibleDominance=possibleResults))
}
| /ImpreciseDEAValueADD/ImpreciseDEAValueADDPreferenceRelations/src/outputsHandler.R | no_license | alabijak/diviz_DEA | R | false | false | 2,197 | r | # TODO depending on whether the file was generated from a description based on
# XMCDA v2 or v3, only one list is correct, either XMCDA_v2_TAG_FOR_FILENAME
# or XMCDA_v3_TAG_FOR_FILENAME: check them to determine which one should be
# adapted.
XMCDA_v2_TAG_FOR_FILENAME <- list(
# output name -> XMCDA v2 tag
necessaryDominance = "alternativesComparisons",
possibleDominance = "alternativesComparisons",
messages = "methodMessages"
)
XMCDA_v3_TAG_FOR_FILENAME <- list(
# output name -> XMCDA v3 tag
necessaryDominance = "alternativesMatrix",
possibleDominance = "alternativesMatrix",
messages = "programExecutionResult"
)
xmcda_v3_tag <- function(outputName){
return (XMCDA_v3_TAG_FOR_FILENAME[[outputName]])
}
xmcda_v2_tag <- function(outputName){
return (XMCDA_v2_TAG_FOR_FILENAME[[outputName]])
}
convert <- function(results, alternatives, programExecutionResult) {
necessaryResults <-.jnew("org/xmcda/XMCDA")
possibleResults <-.jnew("org/xmcda/XMCDA")
nMatrix <-J("org.xmcda.Factory")$alternativesMatrix() #.jnew("org/xmcda/AlternativesMatrix")
pMatrix <- J("org.xmcda.Factory")$alternativesMatrix()
preferenceMatrix <- results
int1 <-.jnew("java/lang/Integer", as.integer(1))
qValues <- .jnew("org/xmcda/QualifiedValues")
qValue <- .jnew("org/xmcda/QualifiedValue", .jcast(int1))
qValues$add(qValue)
for(i in 1:nrow(preferenceMatrix))
{
for(j in 1:ncol(preferenceMatrix))
{
initial <- .jcast(alternatives$get(alternatives$getIDs()$get(as.integer(i - 1))))
terminal <- .jcast(alternatives$get(alternatives$getIDs()$get(as.integer(j - 1))))
if(preferenceMatrix[i, j] == 'N')
{
nMatrix$put(.jnew("org/xmcda/utils/Coord", initial, terminal), qValues)
pMatrix$put(.jnew("org/xmcda/utils/Coord", initial, terminal), qValues)
}
else if(preferenceMatrix[i, j] == 'P')
{
pMatrix$put(.jnew("org/xmcda/utils/Coord", initial, terminal), qValues)
}
}
}
necessaryResults$alternativesMatricesList$add(nMatrix)
possibleResults$alternativesMatricesList$add(pMatrix)
return(list(necessaryDominance = necessaryResults, possibleDominance=possibleResults))
}
|
# This tests the functionality of modelGeneCV2.
# library(testthat); library(scran); source("test-model-cv2.R")
set.seed(20001)
ncells <- 200
ngenes <- 1000
means <- 2^runif(ngenes, -1, 5)
dummy <- matrix(rnbinom(ngenes*ncells, mu=means, size=5), ncol=ncells, nrow=ngenes)
rownames(dummy) <- paste0("X", seq_len(ngenes))
library(scater)
dummy2 <- normalizeCounts(dummy, log=FALSE)
test_that("modelGeneCV2 works correctly without blocking", {
out <- modelGeneCV2(dummy)
expect_equal(out$mean, rowMeans(dummy2))
expect_equal(out$total, DelayedMatrixStats::rowVars(dummy2)/out$mean^2)
expect_equal(out$trend, metadata(out)$trend(out$mean))
expect_equal(out$ratio, out$total/out$trend)
expect_equal(order(out$p.value), order(out$ratio, decreasing=TRUE))
})
test_that("modelGeneCV2 responds to size factors", {
sf <- runif(ncells)
out <- modelGeneCV2(dummy, size.factors=sf)
ref <- modelGeneCV2(t(t(dummy)/sf*mean(sf)), size.factors=rep(1, ncells))
expect_equal(out, ref)
})
test_that("modelGeneCV2 works correctly with blocking, no weighting", {
block <- sample(LETTERS[1:5], ncells, replace=TRUE)
out <- modelGeneCV2(dummy, block=block)
accumulated.mean <- accumulated.total <- accumulated.trend <- 0
for (i in unique(block)) {
current <- i==block
ref <- modelGeneCV2(dummy2[,current], size.factors=rep(1, sum(current)))
subout <- out$per.block[[i]]
expect_identical(ref$mean, subout$mean)
expect_identical(ref$total, subout$total)
expect_identical(ref$trend, subout$trend)
expect_identical(ref$ratio, subout$ratio)
expect_identical(ref$p.value, subout$p.value)
accumulated.mean <- accumulated.mean + log(ref$mean)
accumulated.total <- accumulated.total + log(ref$total)
accumulated.trend <- accumulated.trend + log(ref$trend)
}
# Check combining statistics works correctly.
n <- length(unique(block))
expect_equal(out$mean, exp(accumulated.mean/n))
expect_equal(out$total, exp(accumulated.total/n))
expect_equal(out$trend, exp(accumulated.trend/n))
expect_equal(out$ratio, out$total/out$trend)
all.p <- lapply(out$per.block, "[[", i="p.value")
expect_equal(out$p.value, do.call(combinePValues, all.p))
# Responds to choice of method.
out2 <- modelGeneCV2(dummy, block=block, method="z")
all.p <- lapply(out2$per.block, "[[", i="p.value")
expect_equal(out2$p.value, do.call(combinePValues, c(all.p, list(method='z'))))
})
test_that("modelGeneCV2 works correctly with blocking and weighting", {
block <- sample(LETTERS[1:5], ncells, replace=TRUE)
out <- modelGeneCV2(dummy, block=block, equiweight=FALSE)
accumulated.mean <- accumulated.total <- accumulated.trend <- 0
for (i in unique(block)) {
current <- i==block
ref <- modelGeneCV2(dummy2[,current], size.factors=rep(1, sum(current)))
subout <- out$per.block[[i]]
expect_identical(ref$mean, subout$mean)
expect_identical(ref$total, subout$total)
expect_identical(ref$trend, subout$trend)
expect_identical(ref$ratio, subout$ratio)
expect_identical(ref$p.value, subout$p.value)
n <- sum(i==block)
accumulated.mean <- accumulated.mean + log(ref$mean) * n
accumulated.total <- accumulated.total + log(ref$total) * n
accumulated.trend <- accumulated.trend + log(ref$trend) * n
}
# Check combining statistics works correctly.
n <- length(block)
expect_equal(out$mean, exp(accumulated.mean/n))
expect_equal(out$total, exp(accumulated.total/n))
expect_equal(out$trend, exp(accumulated.trend/n))
expect_equal(out$ratio, out$total/out$trend)
all.p <- lapply(out$per.block, "[[", i="p.value")
expect_equal(out$p.value, do.call(combinePValues, all.p))
# Responds to choice of method with weighting.
out2 <- modelGeneCV2(dummy, block=block, method="z", equiweight=FALSE)
all.p <- lapply(out2$per.block, "[[", i="p.value")
w <- countMatches(names(all.p), block)
expect_equal(out2$p.value, do.call(combinePValues, c(all.p, list(method='z', weights=w))))
})
test_that("modelGeneCV2 handles blocks with no residual d.f.", {
out <- modelGeneCV2(dummy2, size.factors=rep(1, ncells), block=rep(1:2, c(1, ncells-1)))
ref <- modelGeneCV2(dummy2[,-1], size.factors=rep(1, ncells))
expect_equal(out$mean, ref$mean)
expect_equal(out$total, ref$total)
out <- modelGeneCV2(dummy, size.factors=rep(1, ncells), block=rep(1:3, c(1, 1, ncells-2)))
ref <- modelGeneCV2(dummy[,-c(1,2)], size.factors=rep(1, ncells))
expect_equal(out$mean, ref$mean)
expect_equal(out$total, ref$total)
expect_error(modelGeneCV2(dummy[,1,drop=FALSE]), "no residual d.f. in any level")
})
test_that("modelGeneCV2 works with subsetting options", {
chosen <- sample(ngenes, ngenes/2)
out <- modelGeneCV2(dummy, subset.row=chosen)
ref <- modelGeneCV2(dummy[chosen,])
expect_equal(out, ref)
# Subsetting by fit works.
out2 <- modelGeneCV2(dummy, subset.fit=chosen, size.factors=librarySizeFactors(dummy, chosen))
expect_identical(rownames(out2), rownames(dummy))
expect_equal(out2[chosen,1:5], ref[,1:5])
# Zero-length subsetting.
empty <- modelGeneCV2(dummy, subset.row=integer(0), subset.fit=chosen, size.factors=librarySizeFactors(dummy))
expect_identical(nrow(empty), 0L)
expect_error(modelGeneCV2(dummy, subset.fit=integer(0)), "need at least 2 points")
})
test_that("modelGeneCV2 works with SingleCellExperiment objects", {
X <- SingleCellExperiment(list(counts=dummy))
expect_equal(modelGeneCV2(X), modelGeneCV2(dummy))
sizeFactors(X) <- runif(ncol(X))
expect_equal(modelGeneCV2(X), modelGeneCV2(dummy, sizeFactors(X)))
X <- SingleCellExperiment(list(whee=dummy))
expect_equal(modelGeneCV2(X, assay.type="whee"), modelGeneCV2(dummy))
})
#######################################
#######################################
#######################################
set.seed(201001)
ncells <- 200
ngenes <- 1000
means <- 2^runif(ngenes, -1, 5)
dummy <- matrix(rnbinom(ngenes*ncells, mu=means, size=5), ncol=ncells, nrow=ngenes)
rownames(dummy) <- paste0("X", seq_len(ngenes))
nspikes <- 100
smeans <- 2^runif(nspikes, -1, 5)
spikes <- matrix(rnbinom(nspikes*ncells, mu=smeans, size=5), ncol=ncells, nrow=nspikes)
rownames(spikes) <- paste0("X", seq_len(nspikes))
normdummy <- scater::normalizeCounts(dummy, log=FALSE)
normspikes <- scater::normalizeCounts(spikes, log=FALSE)
test_that("modelGeneCV2WithSpikes works correctly in the basic case", {
out <- modelGeneCV2WithSpikes(dummy, spikes)
ref <- modelGeneCV2(normdummy)
expect_equal(out$mean, ref$mean)
expect_equal(out$total, ref$total)
expect_equal(metadata(out)$mean, rowMeans(normspikes))
expect_equal(metadata(out)$cv2, DelayedMatrixStats::rowVars(normspikes)/metadata(out)$mean^2)
fit <- fitTrendCV2(metadata(out)$mean, metadata(out)$cv2, ncells)
expect_identical(fit$std.dev, metadata(out)$std.dev)
expect_equal(out$trend, fit$trend(ref$mean))
expect_equal(out$ratio, out$total/out$trend)
})
test_that("modelGeneCV2WithSpikes works correctly with blocking", {
block <- sample(LETTERS[1:5], ncells, replace=TRUE)
out <- modelGeneCV2WithSpikes(dummy, spikes, block=block)
ref <- modelGeneCV2(normdummy, block=block)
expect_equal(out$mean, ref$mean)
expect_equal(out$total, ref$total)
accumulated.mean <- accumulated.total <- accumulated.trend <- 0
sf1 <- scater::librarySizeFactors(dummy)
sf2 <- scater::librarySizeFactors(spikes)
for (i in unique(block)) {
current <- i==block
# Forcibly avoid auto-centering of size.factors, to use as a reference here.
ssf1 <- sf1[current]
ssf2 <- sf2[current]
ssf2 <- ssf2/mean(ssf2) * mean(ssf1)
ref <- modelGeneCV2WithSpikes(t(t(dummy[,current])/ssf1),
size.factors=rep(1, sum(current)),
spikes=t(t(spikes[,current])/ssf2),
spike.size.factors=rep(1, sum(current)))
subout <- out$per.block[[i]]
expect_equal(ref$mean, subout$mean)
expect_equal(ref$total, subout$total)
expect_equal(ref$trend, subout$trend)
expect_equal(ref$ratio, subout$ratio)
expect_equal(ref$p.value, subout$p.value)
accumulated.mean <- accumulated.mean + log(ref$mean)
accumulated.total <- accumulated.total + log(ref$total)
accumulated.trend <- accumulated.trend + log(ref$trend)
}
# Check combining statistics works correctly.
n <- length(unique(block))
expect_equal(out$mean, exp(accumulated.mean/n))
expect_equal(out$total, exp(accumulated.total/n))
expect_equal(out$trend, exp(accumulated.trend/n))
expect_equal(out$ratio, out$total/out$trend)
all.p <- lapply(out$per.block, "[[", i="p.value")
expect_equal(out$p.value, do.call(combinePValues, all.p))
})
test_that("modelGeneCV2WithSpikes centers size factors correctly", {
# Without blocking.
sf1 <- 2^rnorm(ncells, 0.05)
sf2 <- 2^rnorm(ncells, 0.05)
out <- modelGeneCV2WithSpikes(dummy, size.factors=sf1, spikes=spikes, spike.size.factors=sf2)
msf1 <- sf1/mean(sf1)
msf2 <- sf2/mean(sf2)
ref <- modelGeneCV2WithSpikes(t(t(dummy)/msf1), size.factors=rep(1, ncells),
spikes=t(t(spikes)/msf2), spike.size.factors=rep(1, ncells))
expect_equal(ref$mean, out$mean)
expect_equal(ref$total, out$total)
expect_equal(ref$trend, out$trend)
expect_equal(ref$ratio, out$ratio)
expect_equal(ref$p.value, out$p.value)
# With blocking.
block <- sample(LETTERS[1:5], ncells, replace=TRUE)
out <- modelGeneCV2WithSpikes(dummy, size.factors=sf1, spikes=spikes, spike.size.factors=sf2, block=block)
for (i in unique(block)) {
current <- i==block
ssf1 <- msf1[current]
ssf2 <- msf2[current]
ssf2 <- ssf2/mean(ssf2) * mean(ssf1)
ref <- modelGeneCV2WithSpikes(t(t(dummy[,current])/ssf1),
size.factors=rep(1, sum(current)),
spikes=t(t(spikes[,current])/ssf2),
spike.size.factors=rep(1, sum(current)))
subout <- out$per.block[[i]]
expect_equal(ref$mean, subout$mean)
expect_equal(ref$total, subout$total)
expect_equal(ref$trend, subout$trend)
expect_equal(ref$ratio, subout$ratio)
expect_equal(ref$p.value, subout$p.value)
}
})
test_that("modelGeneCV2 works with SingleCellExperiment objects", {
X <- SingleCellExperiment(list(counts=dummy))
altExp(X, "spikes") <- SingleCellExperiment(list(counts=spikes))
expect_equal(modelGeneCV2WithSpikes(X, spikes="spikes"), modelGeneCV2WithSpikes(dummy, spikes))
X <- SingleCellExperiment(list(whee=dummy))
altExp(X, "spikes") <- SingleCellExperiment(list(whee=spikes))
expect_equal(modelGeneCV2WithSpikes(X, "spikes", assay.type="whee"), modelGeneCV2WithSpikes(dummy, spikes))
X <- SingleCellExperiment(list(whee=dummy))
sizeFactors(X) <- sf1 <- 2^rnorm(ncells, 0.1)
altExp(X, "spikes") <- SingleCellExperiment(list(whee=spikes))
sizeFactors(altExp(X)) <- sf2 <- 2^rnorm(ncells, 0.1)
expect_equal(modelGeneCV2WithSpikes(X, "spikes", assay.type="whee"), modelGeneCV2WithSpikes(dummy, size.factors=sf1, spikes, spike.size.factors=sf2))
})
| /tests/testthat/test-model-cv2.R | no_license | piyushjo15/scran | R | false | false | 11,414 | r | # This tests the functionality of modelGeneCV2.
# library(testthat); library(scran); source("test-model-cv2.R")
set.seed(20001)
ncells <- 200
ngenes <- 1000
means <- 2^runif(ngenes, -1, 5)
dummy <- matrix(rnbinom(ngenes*ncells, mu=means, size=5), ncol=ncells, nrow=ngenes)
rownames(dummy) <- paste0("X", seq_len(ngenes))
library(scater)
dummy2 <- normalizeCounts(dummy, log=FALSE)
test_that("modelGeneCV2 works correctly without blocking", {
out <- modelGeneCV2(dummy)
expect_equal(out$mean, rowMeans(dummy2))
expect_equal(out$total, DelayedMatrixStats::rowVars(dummy2)/out$mean^2)
expect_equal(out$trend, metadata(out)$trend(out$mean))
expect_equal(out$ratio, out$total/out$trend)
expect_equal(order(out$p.value), order(out$ratio, decreasing=TRUE))
})
test_that("modelGeneCV2 responds to size factors", {
sf <- runif(ncells)
out <- modelGeneCV2(dummy, size.factors=sf)
ref <- modelGeneCV2(t(t(dummy)/sf*mean(sf)), size.factors=rep(1, ncells))
expect_equal(out, ref)
})
test_that("modelGeneCV2 works correctly with blocking, no weighting", {
block <- sample(LETTERS[1:5], ncells, replace=TRUE)
out <- modelGeneCV2(dummy, block=block)
accumulated.mean <- accumulated.total <- accumulated.trend <- 0
for (i in unique(block)) {
current <- i==block
ref <- modelGeneCV2(dummy2[,current], size.factors=rep(1, sum(current)))
subout <- out$per.block[[i]]
expect_identical(ref$mean, subout$mean)
expect_identical(ref$total, subout$total)
expect_identical(ref$trend, subout$trend)
expect_identical(ref$ratio, subout$ratio)
expect_identical(ref$p.value, subout$p.value)
accumulated.mean <- accumulated.mean + log(ref$mean)
accumulated.total <- accumulated.total + log(ref$total)
accumulated.trend <- accumulated.trend + log(ref$trend)
}
# Check combining statistics works correctly.
n <- length(unique(block))
expect_equal(out$mean, exp(accumulated.mean/n))
expect_equal(out$total, exp(accumulated.total/n))
expect_equal(out$trend, exp(accumulated.trend/n))
expect_equal(out$ratio, out$total/out$trend)
all.p <- lapply(out$per.block, "[[", i="p.value")
expect_equal(out$p.value, do.call(combinePValues, all.p))
# Responds to choice of method.
out2 <- modelGeneCV2(dummy, block=block, method="z")
all.p <- lapply(out2$per.block, "[[", i="p.value")
expect_equal(out2$p.value, do.call(combinePValues, c(all.p, list(method='z'))))
})
test_that("modelGeneCV2 works correctly with blocking and weighting", {
block <- sample(LETTERS[1:5], ncells, replace=TRUE)
out <- modelGeneCV2(dummy, block=block, equiweight=FALSE)
accumulated.mean <- accumulated.total <- accumulated.trend <- 0
for (i in unique(block)) {
current <- i==block
ref <- modelGeneCV2(dummy2[,current], size.factors=rep(1, sum(current)))
subout <- out$per.block[[i]]
expect_identical(ref$mean, subout$mean)
expect_identical(ref$total, subout$total)
expect_identical(ref$trend, subout$trend)
expect_identical(ref$ratio, subout$ratio)
expect_identical(ref$p.value, subout$p.value)
n <- sum(i==block)
accumulated.mean <- accumulated.mean + log(ref$mean) * n
accumulated.total <- accumulated.total + log(ref$total) * n
accumulated.trend <- accumulated.trend + log(ref$trend) * n
}
# Check combining statistics works correctly.
n <- length(block)
expect_equal(out$mean, exp(accumulated.mean/n))
expect_equal(out$total, exp(accumulated.total/n))
expect_equal(out$trend, exp(accumulated.trend/n))
expect_equal(out$ratio, out$total/out$trend)
all.p <- lapply(out$per.block, "[[", i="p.value")
expect_equal(out$p.value, do.call(combinePValues, all.p))
# Responds to choice of method with weighting.
out2 <- modelGeneCV2(dummy, block=block, method="z", equiweight=FALSE)
all.p <- lapply(out2$per.block, "[[", i="p.value")
w <- countMatches(names(all.p), block)
expect_equal(out2$p.value, do.call(combinePValues, c(all.p, list(method='z', weights=w))))
})
test_that("modelGeneCV2 handles blocks with no residual d.f.", {
out <- modelGeneCV2(dummy2, size.factors=rep(1, ncells), block=rep(1:2, c(1, ncells-1)))
ref <- modelGeneCV2(dummy2[,-1], size.factors=rep(1, ncells))
expect_equal(out$mean, ref$mean)
expect_equal(out$total, ref$total)
out <- modelGeneCV2(dummy, size.factors=rep(1, ncells), block=rep(1:3, c(1, 1, ncells-2)))
ref <- modelGeneCV2(dummy[,-c(1,2)], size.factors=rep(1, ncells))
expect_equal(out$mean, ref$mean)
expect_equal(out$total, ref$total)
expect_error(modelGeneCV2(dummy[,1,drop=FALSE]), "no residual d.f. in any level")
})
test_that("modelGeneCV2 works with subsetting options", {
chosen <- sample(ngenes, ngenes/2)
out <- modelGeneCV2(dummy, subset.row=chosen)
ref <- modelGeneCV2(dummy[chosen,])
expect_equal(out, ref)
# Subsetting by fit works.
out2 <- modelGeneCV2(dummy, subset.fit=chosen, size.factors=librarySizeFactors(dummy, chosen))
expect_identical(rownames(out2), rownames(dummy))
expect_equal(out2[chosen,1:5], ref[,1:5])
# Zero-length subsetting.
empty <- modelGeneCV2(dummy, subset.row=integer(0), subset.fit=chosen, size.factors=librarySizeFactors(dummy))
expect_identical(nrow(empty), 0L)
expect_error(modelGeneCV2(dummy, subset.fit=integer(0)), "need at least 2 points")
})
test_that("modelGeneCV2 works with SingleCellExperiment objects", {
X <- SingleCellExperiment(list(counts=dummy))
expect_equal(modelGeneCV2(X), modelGeneCV2(dummy))
sizeFactors(X) <- runif(ncol(X))
expect_equal(modelGeneCV2(X), modelGeneCV2(dummy, sizeFactors(X)))
X <- SingleCellExperiment(list(whee=dummy))
expect_equal(modelGeneCV2(X, assay.type="whee"), modelGeneCV2(dummy))
})
#######################################
#######################################
#######################################
set.seed(201001)
ncells <- 200
ngenes <- 1000
means <- 2^runif(ngenes, -1, 5)
dummy <- matrix(rnbinom(ngenes*ncells, mu=means, size=5), ncol=ncells, nrow=ngenes)
rownames(dummy) <- paste0("X", seq_len(ngenes))
nspikes <- 100
smeans <- 2^runif(nspikes, -1, 5)
spikes <- matrix(rnbinom(nspikes*ncells, mu=smeans, size=5), ncol=ncells, nrow=nspikes)
rownames(spikes) <- paste0("X", seq_len(nspikes))
normdummy <- scater::normalizeCounts(dummy, log=FALSE)
normspikes <- scater::normalizeCounts(spikes, log=FALSE)
test_that("modelGeneCV2WithSpikes works correctly in the basic case", {
out <- modelGeneCV2WithSpikes(dummy, spikes)
ref <- modelGeneCV2(normdummy)
expect_equal(out$mean, ref$mean)
expect_equal(out$total, ref$total)
expect_equal(metadata(out)$mean, rowMeans(normspikes))
expect_equal(metadata(out)$cv2, DelayedMatrixStats::rowVars(normspikes)/metadata(out)$mean^2)
fit <- fitTrendCV2(metadata(out)$mean, metadata(out)$cv2, ncells)
expect_identical(fit$std.dev, metadata(out)$std.dev)
expect_equal(out$trend, fit$trend(ref$mean))
expect_equal(out$ratio, out$total/out$trend)
})
test_that("modelGeneCV2WithSpikes works correctly with blocking", {
block <- sample(LETTERS[1:5], ncells, replace=TRUE)
out <- modelGeneCV2WithSpikes(dummy, spikes, block=block)
ref <- modelGeneCV2(normdummy, block=block)
expect_equal(out$mean, ref$mean)
expect_equal(out$total, ref$total)
accumulated.mean <- accumulated.total <- accumulated.trend <- 0
sf1 <- scater::librarySizeFactors(dummy)
sf2 <- scater::librarySizeFactors(spikes)
for (i in unique(block)) {
current <- i==block
# Forcibly avoid auto-centering of size.factors, to use as a reference here.
ssf1 <- sf1[current]
ssf2 <- sf2[current]
ssf2 <- ssf2/mean(ssf2) * mean(ssf1)
ref <- modelGeneCV2WithSpikes(t(t(dummy[,current])/ssf1),
size.factors=rep(1, sum(current)),
spikes=t(t(spikes[,current])/ssf2),
spike.size.factors=rep(1, sum(current)))
subout <- out$per.block[[i]]
expect_equal(ref$mean, subout$mean)
expect_equal(ref$total, subout$total)
expect_equal(ref$trend, subout$trend)
expect_equal(ref$ratio, subout$ratio)
expect_equal(ref$p.value, subout$p.value)
accumulated.mean <- accumulated.mean + log(ref$mean)
accumulated.total <- accumulated.total + log(ref$total)
accumulated.trend <- accumulated.trend + log(ref$trend)
}
# Check combining statistics works correctly.
n <- length(unique(block))
expect_equal(out$mean, exp(accumulated.mean/n))
expect_equal(out$total, exp(accumulated.total/n))
expect_equal(out$trend, exp(accumulated.trend/n))
expect_equal(out$ratio, out$total/out$trend)
all.p <- lapply(out$per.block, "[[", i="p.value")
expect_equal(out$p.value, do.call(combinePValues, all.p))
})
test_that("modelGeneCV2WithSpikes centers size factors correctly", {
# Without blocking.
sf1 <- 2^rnorm(ncells, 0.05)
sf2 <- 2^rnorm(ncells, 0.05)
out <- modelGeneCV2WithSpikes(dummy, size.factors=sf1, spikes=spikes, spike.size.factors=sf2)
msf1 <- sf1/mean(sf1)
msf2 <- sf2/mean(sf2)
ref <- modelGeneCV2WithSpikes(t(t(dummy)/msf1), size.factors=rep(1, ncells),
spikes=t(t(spikes)/msf2), spike.size.factors=rep(1, ncells))
expect_equal(ref$mean, out$mean)
expect_equal(ref$total, out$total)
expect_equal(ref$trend, out$trend)
expect_equal(ref$ratio, out$ratio)
expect_equal(ref$p.value, out$p.value)
# With blocking.
block <- sample(LETTERS[1:5], ncells, replace=TRUE)
out <- modelGeneCV2WithSpikes(dummy, size.factors=sf1, spikes=spikes, spike.size.factors=sf2, block=block)
for (i in unique(block)) {
current <- i==block
ssf1 <- msf1[current]
ssf2 <- msf2[current]
ssf2 <- ssf2/mean(ssf2) * mean(ssf1)
ref <- modelGeneCV2WithSpikes(t(t(dummy[,current])/ssf1),
size.factors=rep(1, sum(current)),
spikes=t(t(spikes[,current])/ssf2),
spike.size.factors=rep(1, sum(current)))
subout <- out$per.block[[i]]
expect_equal(ref$mean, subout$mean)
expect_equal(ref$total, subout$total)
expect_equal(ref$trend, subout$trend)
expect_equal(ref$ratio, subout$ratio)
expect_equal(ref$p.value, subout$p.value)
}
})
test_that("modelGeneCV2 works with SingleCellExperiment objects", {
X <- SingleCellExperiment(list(counts=dummy))
altExp(X, "spikes") <- SingleCellExperiment(list(counts=spikes))
expect_equal(modelGeneCV2WithSpikes(X, spikes="spikes"), modelGeneCV2WithSpikes(dummy, spikes))
X <- SingleCellExperiment(list(whee=dummy))
altExp(X, "spikes") <- SingleCellExperiment(list(whee=spikes))
expect_equal(modelGeneCV2WithSpikes(X, "spikes", assay.type="whee"), modelGeneCV2WithSpikes(dummy, spikes))
X <- SingleCellExperiment(list(whee=dummy))
sizeFactors(X) <- sf1 <- 2^rnorm(ncells, 0.1)
altExp(X, "spikes") <- SingleCellExperiment(list(whee=spikes))
sizeFactors(altExp(X)) <- sf2 <- 2^rnorm(ncells, 0.1)
expect_equal(modelGeneCV2WithSpikes(X, "spikes", assay.type="whee"), modelGeneCV2WithSpikes(dummy, size.factors=sf1, spikes, spike.size.factors=sf2))
})
|
## call from main.R
make_AGP_sub <- function(n) {
new_model(name = "AGP",
label = sprintf("n = %s", n),
params = list(n=n),
simulate = function(n, nsim) {
if (n>nrow(otu_train)) stop('max sample size exceeded')
pcoef <- n/sum(runtab[test])
lapply(1:nsim, function(i) {
## subset equal proportions of each dataset
subsamp <- sample(1:nsamples(phy_test), n)
list(X=otu_test[subsamp,])
})
}
)
}
| /public/qiita/AGP/simulator/model_functions.R | no_license | zdk123/SpiecEasiSLR_manuscript | R | false | false | 557 | r | ## call from main.R
make_AGP_sub <- function(n) {
new_model(name = "AGP",
label = sprintf("n = %s", n),
params = list(n=n),
simulate = function(n, nsim) {
if (n>nrow(otu_train)) stop('max sample size exceeded')
pcoef <- n/sum(runtab[test])
lapply(1:nsim, function(i) {
## subset equal proportions of each dataset
subsamp <- sample(1:nsamples(phy_test), n)
list(X=otu_test[subsamp,])
})
}
)
}
|
library(rstan)
library(dplyr)
library(survival)
library(splines2)
library(statmod)
library(abind)
library(caret)
# create folder for fits
if (!dir.exists('fits')) {dir.create('fits')}
### setup data
mice <- read.csv('datasets/schultz_data.csv', header = TRUE, sep = ",")
mice.surv <- read.csv('datasets/schultz_surv_data.csv', header = TRUE, sep = ",")
mice$sex <- as.factor(mice$sex)
mice$real.age <- mice$time + mice$baseline.age
mice$prepair <- (mice$repair/mice$n)/mice$delta.t
mice$pdamage <- (mice$damage/(mice$N - mice$n))/mice$delta.t
mice <- na.omit(mice)
mice.surv <- na.omit(mice.surv)
mice$event.time <- mice$death.age - mice$baseline.age
mice.surv$event.time <- mice.surv$death.age - mice.surv$baseline.age
mice.surv <- mice.surv[mice.surv$mouse %in% unique(mice$mouse),]
mice.surv2 <- tmerge(mice.surv, mice.surv, id=mouse, endpt = event(event.time, status))
mice.surv.long <- tmerge(mice.surv2, mice, id=mouse, f = tdc(time, f), n = tdc(time, n),
real.age = tdc(time, real.age))
mice.surv.long$tstart <- mice.surv.long$tstart
mice.surv.long$tstop <- mice.surv.long$tstop
mice.surv.long$event.time <- mice.surv.long$event.time
mice.surv.long$sex <- as.factor(mice.surv.long$sex)
### create stan input
mice.X <- mice[,c('time', 'f', 'baseline.age')]
mice.Z <- mice[,c('time', 'f')]
mice.Z$intercept <- 1
mice.Z <- mice.Z[,c('intercept', 'time')]
# scale
age.mean <- mean(mice.X$baseline.age)
age.sd <- sd(mice.X$baseline.age)
time.mean <- mean(mice.X$time)
time.sd <- sd(mice.X$time)
f.mean <- mean(mice.X$f)
f.sd <- sd(mice.X$f)
std_scale <- function(x, m, s) return ((x-m)/s)
inv_scale <- function(x, m, s) return (s*x + m)
# standardize
normMice <- preProcess(mice.X)
mice.X <- predict(normMice, mice.X)
mice.Z$time <- (mice.Z$time - (normMice$mean)['time'])/(normMice$std)['time']
mice.repair <- mice[,'repair']
mice.damage <- mice[,'damage']
mice.numdef <- mice[,'n']
mice.offsets <- mice[,c('n', 'N', 'delta.t')]
mice.index <- as.integer(as.numeric(factor(mice$mouse)))
mice.surv.X <- mice.surv.long[,c('f',
'baseline.age', 'tstart', 'tstop')]
mice.start.age <- mice.surv.long$tstart
mice.death.age <- mice.surv.long$tstop
mice.status <- as.numeric(mice.surv.long$endpt)
dt <- mice.surv.X$tstop - mice.surv.X$tstart
mice.surv.X <- mice.surv.X[,c('f', 'baseline.age')]
normSurvMice <- preProcess(mice.surv.X)
mice.surv.X.normed <- predict(normSurvMice, mice.surv.X)
mice.surv.X <- mice.surv.X.normed
# create splines
mice.start.age <- std_scale(mice.start.age, time.mean, time.sd)
mice.death.age <- std_scale(mice.death.age, time.mean, time.sd)
num_knots <- 15 # number of knots for fitting
spline_degree <- 3
knots <- unname(quantile(mice.death.age, probs=seq(from=0.05, to=0.95, length.out = num_knots)))
boundary.knots <- c(min(mice.start.age), max(mice.death.age))
msMat <- mSpline(mice.death.age, knots = knots, degree = spline_degree, Boundary.knots = boundary.knots, intercept = FALSE)
num_basis <- dim(msMat)[2]
quadMat <- c()
quad <- gauss.quad(5, kind="legendre",alpha=0,beta=0)
quad.weights <- quad$weights
quad.nodes <- quad$nodes
for (i in 1:dim(msMat)[1]) {
delta = (mice.death.age[i] - mice.start.age[i])/2
average = (mice.death.age[i] + mice.start.age[i])/2.0
x = delta * quad.nodes + average
quadMat[[i]] <- mSpline(x, knots = knots, degree = spline_degree, Boundary.knots = boundary.knots, intercept = FALSE)
}
quadMat <- abind( quadMat, along=0 )
# compute size of each mouse time-series
index.size <- rep(0, length(unique(mice$mouse)))
for(i in 1:length(unique(mice$mouse))) {
index.size[i] <- length(mice.index[mice.index == i])
}
data <- list(m = length(unique(mice.index)),
n = length(mice$repair),
p = length(names(mice.X)),
ps = length(names(mice.surv.X)),
q = length(names(mice.Z)),
N = 116,
X = mice.X,
surv_X = mice.surv.X,
Z = mice.Z,
index = mice.index,
index_size = index.size,
offsets = mice.offsets,
repair = mice.repair,
damage = mice.damage,
Tstart = mice.start.age,
T = mice.death.age,
status = mice.status,
num_basis = num_basis,
num_quad = 5,
quad_splines = quadMat,
quad_nodes = quad.nodes,
quad_weights = quad.weights,
msplines = msMat)
rstan_options(auto_write = TRUE)
fit <- stan(file = 'models/joint_long.stan', data = data, chains=4, iter = 10000, cores = 4, verbose=TRUE, warmup=4000)
saveRDS(fit, 'fits/mouse_3.rds')
| /fit_mouse_3.r | permissive | Spencerfar/aging-damagerepair | R | false | false | 4,688 | r | library(rstan)
library(dplyr)
library(survival)
library(splines2)
library(statmod)
library(abind)
library(caret)
# create folder for fits
if (!dir.exists('fits')) {dir.create('fits')}
### setup data
mice <- read.csv('datasets/schultz_data.csv', header = TRUE, sep = ",")
mice.surv <- read.csv('datasets/schultz_surv_data.csv', header = TRUE, sep = ",")
mice$sex <- as.factor(mice$sex)
mice$real.age <- mice$time + mice$baseline.age
mice$prepair <- (mice$repair/mice$n)/mice$delta.t
mice$pdamage <- (mice$damage/(mice$N - mice$n))/mice$delta.t
mice <- na.omit(mice)
mice.surv <- na.omit(mice.surv)
mice$event.time <- mice$death.age - mice$baseline.age
mice.surv$event.time <- mice.surv$death.age - mice.surv$baseline.age
mice.surv <- mice.surv[mice.surv$mouse %in% unique(mice$mouse),]
mice.surv2 <- tmerge(mice.surv, mice.surv, id=mouse, endpt = event(event.time, status))
mice.surv.long <- tmerge(mice.surv2, mice, id=mouse, f = tdc(time, f), n = tdc(time, n),
real.age = tdc(time, real.age))
mice.surv.long$tstart <- mice.surv.long$tstart
mice.surv.long$tstop <- mice.surv.long$tstop
mice.surv.long$event.time <- mice.surv.long$event.time
mice.surv.long$sex <- as.factor(mice.surv.long$sex)
### create stan input
mice.X <- mice[,c('time', 'f', 'baseline.age')]
mice.Z <- mice[,c('time', 'f')]
mice.Z$intercept <- 1
mice.Z <- mice.Z[,c('intercept', 'time')]
# scale
age.mean <- mean(mice.X$baseline.age)
age.sd <- sd(mice.X$baseline.age)
time.mean <- mean(mice.X$time)
time.sd <- sd(mice.X$time)
f.mean <- mean(mice.X$f)
f.sd <- sd(mice.X$f)
std_scale <- function(x, m, s) return ((x-m)/s)
inv_scale <- function(x, m, s) return (s*x + m)
# standardize
normMice <- preProcess(mice.X)
mice.X <- predict(normMice, mice.X)
mice.Z$time <- (mice.Z$time - (normMice$mean)['time'])/(normMice$std)['time']
mice.repair <- mice[,'repair']
mice.damage <- mice[,'damage']
mice.numdef <- mice[,'n']
mice.offsets <- mice[,c('n', 'N', 'delta.t')]
mice.index <- as.integer(as.numeric(factor(mice$mouse)))
mice.surv.X <- mice.surv.long[,c('f',
'baseline.age', 'tstart', 'tstop')]
mice.start.age <- mice.surv.long$tstart
mice.death.age <- mice.surv.long$tstop
mice.status <- as.numeric(mice.surv.long$endpt)
dt <- mice.surv.X$tstop - mice.surv.X$tstart
mice.surv.X <- mice.surv.X[,c('f', 'baseline.age')]
normSurvMice <- preProcess(mice.surv.X)
mice.surv.X.normed <- predict(normSurvMice, mice.surv.X)
mice.surv.X <- mice.surv.X.normed
# create splines
mice.start.age <- std_scale(mice.start.age, time.mean, time.sd)
mice.death.age <- std_scale(mice.death.age, time.mean, time.sd)
num_knots <- 15 # number of knots for fitting
spline_degree <- 3
knots <- unname(quantile(mice.death.age, probs=seq(from=0.05, to=0.95, length.out = num_knots)))
boundary.knots <- c(min(mice.start.age), max(mice.death.age))
msMat <- mSpline(mice.death.age, knots = knots, degree = spline_degree, Boundary.knots = boundary.knots, intercept = FALSE)
num_basis <- dim(msMat)[2]
quadMat <- c()
quad <- gauss.quad(5, kind="legendre",alpha=0,beta=0)
quad.weights <- quad$weights
quad.nodes <- quad$nodes
for (i in 1:dim(msMat)[1]) {
delta = (mice.death.age[i] - mice.start.age[i])/2
average = (mice.death.age[i] + mice.start.age[i])/2.0
x = delta * quad.nodes + average
quadMat[[i]] <- mSpline(x, knots = knots, degree = spline_degree, Boundary.knots = boundary.knots, intercept = FALSE)
}
quadMat <- abind( quadMat, along=0 )
# compute size of each mouse time-series
index.size <- rep(0, length(unique(mice$mouse)))
for(i in 1:length(unique(mice$mouse))) {
index.size[i] <- length(mice.index[mice.index == i])
}
data <- list(m = length(unique(mice.index)),
n = length(mice$repair),
p = length(names(mice.X)),
ps = length(names(mice.surv.X)),
q = length(names(mice.Z)),
N = 116,
X = mice.X,
surv_X = mice.surv.X,
Z = mice.Z,
index = mice.index,
index_size = index.size,
offsets = mice.offsets,
repair = mice.repair,
damage = mice.damage,
Tstart = mice.start.age,
T = mice.death.age,
status = mice.status,
num_basis = num_basis,
num_quad = 5,
quad_splines = quadMat,
quad_nodes = quad.nodes,
quad_weights = quad.weights,
msplines = msMat)
rstan_options(auto_write = TRUE)
fit <- stan(file = 'models/joint_long.stan', data = data, chains=4, iter = 10000, cores = 4, verbose=TRUE, warmup=4000)
saveRDS(fit, 'fits/mouse_3.rds')
|
#' Plots for Top Variant and Region Inclusions
#' @description This function allows the user to create image plots of the top variants and top Regions
#' (any user specified set of variants such as pathways or genes) included in the top models.
#' Variants and Regions are ordered based on marginal BF and regional BF which are plotted on the right axis.
#' The width of the inclusion blocks are proportional to the posterior model probability that the variant or region is included in.
#'
#' @param x an object of class 'summary.bvs'
#' @param type specifies whether to plot the top variants ("s") or the top regions ("r")
#' @param num_models the number of top models to place on the x-axis
#' @param num_snps if type = "s", the number of the top variants to place on the y-axis
#' @param num_regions if type = "r", the number ofthe top regions to place on the y-axis
#' @param plot_coef only used for rare variant analysis when rare = TRUE and there are not
#' multiple regions. If plot_coef = TRUE, the log(OR) of the risk index for the top models is plotted on the x-axis
#' @param true_coef (optional) vector of the true odds ratios of each of the variants to plot on the
#' y-axis (i.e. if results are from a simulation)
#' @param regions (optional) string vector with the region name for each of the variants. By default, region
#' names are used from the 'summary.bvs' x. Using this argument will overwrite the names in the "summary.bvs" x.
#' @param prop_cases (optional) \eqn{p x 2} matrix giving the number of cases that have the
#' variant in column 1 and the number of controls with the variant in column 2.
#' If specified, these counts will be reported on the right axis under each variants marginal BF.
#' @param main optional string variable giving the title of the plot
#' @param ... additional arguments as required by plot S3 x
#' @importFrom graphics abline axis image par
#' @export
plot.summary.bvs <- function(x,
type = c("s", "r"),
num_models = 100,
num_snps = 20,
num_regions = 20,
plot_coef = FALSE,
true_coef = NULL,
regions = NULL,
prop_cases=NULL,
main = NULL, ...) {
# check type
type <- match.arg(type)
if (type == "r" && is.null(x$region_level$active_region)) {
stop("Error: type = 'r', but active_region = NULL in bvs.summary x")
}
active <- x$model_level$active_mat
post_prob <- x$model_level$post_prob
null_ind <- which(rowSums(active) == 0)
null_post <- post_prob[null_ind]
model_id <- x$model_level$model_id[-null_ind]
active <- active[-null_ind, , drop = FALSE]
post_prob <- post_prob[-null_ind]
num_snps <- min(ncol(active), num_snps)
num_models <- min(nrow(active) - 1, num_models)
model_order <- order(post_prob, decreasing = TRUE)
model_id <- model_id[model_order][1:num_models]
post_prob <- post_prob[model_order]
active <- active[model_order, , drop = FALSE]
if (!is.null(x$region_level$active_region)) {
active_region <- x$region_level$active_region[-null_ind, ][model_order, , drop = FALSE]
regionnames <- colnames(active_region)
if (!is.null(regions)) {
warning("Note: Overwriting regions in bvs.summary x with values provided for regions = argument.")
} else {
regions <- x$model_info$regions
}
}
if (plot_coef) {
if (ncol(coef) != 1) {
warning("Note: Coercing plot_coef to FALSE because ncol(coef) > 1.
To use plot_coef, check that your model has rare = TRUE and does not have multiple regions.")
plot_coef <- FALSE
} else {
coef <- drop(x$model_level$coef[-null_ind, ][model_order, ])
}
}
# create title, column / row labels, and matrix with color values
if (type == "s") {
nvar <- num_snps
bf <- x$marg_bf
order_top <- order(bf, decreasing = TRUE)[1:nvar]
bf <- bf[order_top]
rownms <- paste(colnames(active)[order_top], regions[order_top], sep = "\n")
color_matrix <- active[1:num_models, order_top, drop = FALSE] + 2
if (plot_coef) {
prob_labels <- paste("Marg BF:", round(bf, 2), "\nTrue OR:", round(true_coef[order_top], 2), sep = "")
} else if (!is.null(prop_cases)) {
if (plot_coef) {
warning("prop_cases cannot be used when plot_coef == TRUE, only plotting log(OR)")
}
prob_labels <- paste("Marg BF:", round(bf, 2), "\nCases: ", prop_cases[order_top, 1], " Controls: ", prop_cases[order_top, 2],sep = "")
} else {
prob_labels <- paste("Marg BF:", round(bf, 2))
}
if (is.null(main)) {
main <- paste("SNP Inclusions of Top Models \nGlobal BF =", round(x$global_bf, 1))
}
} else {
nvar <- min(length(regionnames), num_regions)
bf_region <- x$region_level$region_bf
order_top <- order(bf_region, decreasing = TRUE)[1:nvar]
bf_region <- bf_region[order_top]
rownms <- colnames(active_region)[order_top]
active_region[active_region > 1] <- 1
color_matrix <- active_region[1:num_models, order_top] + 2
if (!is.null(true_coef)) {
prob_labels <- paste("Region BF:", round(bf_region, 2), " \nTrue OR:", round(true_coef[order_top], 2), sep = "")
} else {
prob_labels <- paste("Region BF:", round(bf_region, 2))
}
if (is.null(main)) {
main <- paste("Region Inclusions of Top Models \nGlobal BF =", round(x$global_bf, 1))
}
}
# create plot
keep.mar <- par(mar = c(5, 6, 4, 2) + 0.1)
par(las = 1, mar = c(8, 12, 5, 12), ps = 10, font = 2)
prob_axis <- post_prob[1:num_models] / sum(post_prob[1:num_models])
prob_axis_cum <- cumsum(prob_axis)
clr <- c("#FFFFFF", "#A020F0", "#0000CD")
if (plot_coef) {
xlab = "log(OR) (Models Ranked by Post. Prob)"
} else {
xlab = "Model ID (Models Ranked by Post. Prob)"
}
image(x = c(0, prob_axis_cum),
y = 1:nvar,
z = color_matrix,
col = clr,
xlab = xlab,
ylab = "",
xaxt = "n",
yaxt ="n",
xlim = c(0, 1),
main = main)
abline(v = prob_axis_cum[prob_axis > 0.01], col = "white")
xat <- (prob_axis_cum + c(0, prob_axis_cum[-num_models])) / 2
if (plot_coef) {
beta.labels <- round(coef, 1)
if (num_models > 5) {
beta.labels[6:num_models] <- NA
}
axis(1, at = xat,labels = beta.labels)
} else {
axis(1, at = xat, labels = model_id)
}
axis(2, at = 1:nvar, labels = rownms)
axis(4, at = 1:nvar, labels = prob_labels)
par(mar = keep.mar)
}
| /R/bvs_plot.R | no_license | USCbiostats/bvs | R | false | false | 6,987 | r | #' Plots for Top Variant and Region Inclusions
#' @description This function allows the user to create image plots of the top variants and top Regions
#' (any user specified set of variants such as pathways or genes) included in the top models.
#' Variants and Regions are ordered based on marginal BF and regional BF which are plotted on the right axis.
#' The width of the inclusion blocks are proportional to the posterior model probability that the variant or region is included in.
#'
#' @param x an object of class 'summary.bvs'
#' @param type specifies whether to plot the top variants ("s") or the top regions ("r")
#' @param num_models the number of top models to place on the x-axis
#' @param num_snps if type = "s", the number of the top variants to place on the y-axis
#' @param num_regions if type = "r", the number ofthe top regions to place on the y-axis
#' @param plot_coef only used for rare variant analysis when rare = TRUE and there are not
#' multiple regions. If plot_coef = TRUE, the log(OR) of the risk index for the top models is plotted on the x-axis
#' @param true_coef (optional) vector of the true odds ratios of each of the variants to plot on the
#' y-axis (i.e. if results are from a simulation)
#' @param regions (optional) string vector with the region name for each of the variants. By default, region
#' names are used from the 'summary.bvs' x. Using this argument will overwrite the names in the "summary.bvs" x.
#' @param prop_cases (optional) \eqn{p x 2} matrix giving the number of cases that have the
#' variant in column 1 and the number of controls with the variant in column 2.
#' If specified, these counts will be reported on the right axis under each variants marginal BF.
#' @param main optional string variable giving the title of the plot
#' @param ... additional arguments as required by plot S3 x
#' @importFrom graphics abline axis image par
#' @export
plot.summary.bvs <- function(x,
type = c("s", "r"),
num_models = 100,
num_snps = 20,
num_regions = 20,
plot_coef = FALSE,
true_coef = NULL,
regions = NULL,
prop_cases=NULL,
main = NULL, ...) {
# check type
type <- match.arg(type)
if (type == "r" && is.null(x$region_level$active_region)) {
stop("Error: type = 'r', but active_region = NULL in bvs.summary x")
}
active <- x$model_level$active_mat
post_prob <- x$model_level$post_prob
null_ind <- which(rowSums(active) == 0)
null_post <- post_prob[null_ind]
model_id <- x$model_level$model_id[-null_ind]
active <- active[-null_ind, , drop = FALSE]
post_prob <- post_prob[-null_ind]
num_snps <- min(ncol(active), num_snps)
num_models <- min(nrow(active) - 1, num_models)
model_order <- order(post_prob, decreasing = TRUE)
model_id <- model_id[model_order][1:num_models]
post_prob <- post_prob[model_order]
active <- active[model_order, , drop = FALSE]
if (!is.null(x$region_level$active_region)) {
active_region <- x$region_level$active_region[-null_ind, ][model_order, , drop = FALSE]
regionnames <- colnames(active_region)
if (!is.null(regions)) {
warning("Note: Overwriting regions in bvs.summary x with values provided for regions = argument.")
} else {
regions <- x$model_info$regions
}
}
if (plot_coef) {
if (ncol(coef) != 1) {
warning("Note: Coercing plot_coef to FALSE because ncol(coef) > 1.
To use plot_coef, check that your model has rare = TRUE and does not have multiple regions.")
plot_coef <- FALSE
} else {
coef <- drop(x$model_level$coef[-null_ind, ][model_order, ])
}
}
# create title, column / row labels, and matrix with color values
if (type == "s") {
nvar <- num_snps
bf <- x$marg_bf
order_top <- order(bf, decreasing = TRUE)[1:nvar]
bf <- bf[order_top]
rownms <- paste(colnames(active)[order_top], regions[order_top], sep = "\n")
color_matrix <- active[1:num_models, order_top, drop = FALSE] + 2
if (plot_coef) {
prob_labels <- paste("Marg BF:", round(bf, 2), "\nTrue OR:", round(true_coef[order_top], 2), sep = "")
} else if (!is.null(prop_cases)) {
if (plot_coef) {
warning("prop_cases cannot be used when plot_coef == TRUE, only plotting log(OR)")
}
prob_labels <- paste("Marg BF:", round(bf, 2), "\nCases: ", prop_cases[order_top, 1], " Controls: ", prop_cases[order_top, 2],sep = "")
} else {
prob_labels <- paste("Marg BF:", round(bf, 2))
}
if (is.null(main)) {
main <- paste("SNP Inclusions of Top Models \nGlobal BF =", round(x$global_bf, 1))
}
} else {
nvar <- min(length(regionnames), num_regions)
bf_region <- x$region_level$region_bf
order_top <- order(bf_region, decreasing = TRUE)[1:nvar]
bf_region <- bf_region[order_top]
rownms <- colnames(active_region)[order_top]
active_region[active_region > 1] <- 1
color_matrix <- active_region[1:num_models, order_top] + 2
if (!is.null(true_coef)) {
prob_labels <- paste("Region BF:", round(bf_region, 2), " \nTrue OR:", round(true_coef[order_top], 2), sep = "")
} else {
prob_labels <- paste("Region BF:", round(bf_region, 2))
}
if (is.null(main)) {
main <- paste("Region Inclusions of Top Models \nGlobal BF =", round(x$global_bf, 1))
}
}
# create plot
keep.mar <- par(mar = c(5, 6, 4, 2) + 0.1)
par(las = 1, mar = c(8, 12, 5, 12), ps = 10, font = 2)
prob_axis <- post_prob[1:num_models] / sum(post_prob[1:num_models])
prob_axis_cum <- cumsum(prob_axis)
clr <- c("#FFFFFF", "#A020F0", "#0000CD")
if (plot_coef) {
xlab = "log(OR) (Models Ranked by Post. Prob)"
} else {
xlab = "Model ID (Models Ranked by Post. Prob)"
}
image(x = c(0, prob_axis_cum),
y = 1:nvar,
z = color_matrix,
col = clr,
xlab = xlab,
ylab = "",
xaxt = "n",
yaxt ="n",
xlim = c(0, 1),
main = main)
abline(v = prob_axis_cum[prob_axis > 0.01], col = "white")
xat <- (prob_axis_cum + c(0, prob_axis_cum[-num_models])) / 2
if (plot_coef) {
beta.labels <- round(coef, 1)
if (num_models > 5) {
beta.labels[6:num_models] <- NA
}
axis(1, at = xat,labels = beta.labels)
} else {
axis(1, at = xat, labels = model_id)
}
axis(2, at = 1:nvar, labels = rownms)
axis(4, at = 1:nvar, labels = prob_labels)
par(mar = keep.mar)
}
|
##
names(SCC)
NEI$type
SCCcoal <- SCC[grepl("coal", SCC$Short.Name, ignore.case = T),]
NEIcoal <- NEI[NEI$SCC %in% SCCcoal$SCC,]
totalCoal <- aggregate(Emissions ~ year + type, NEIcoal, sum)
png("plot4.png", width = 480, height = 480)
ggplot(totalCoal, aes(year, Emissions, col = type)) +
geom_line() +
geom_point() +
ggtitle(expression("Total US" ~ PM[2.5] ~ "Coal Emission by Type and Year")) +
xlab("Year") +
ylab(expression("US " ~ PM[2.5] ~ "Coal Emission")) +
scale_colour_discrete(name = "Type of sources") +
theme(legend.title = element_text(face = "bold"))
dev.off() | /plot4.r | no_license | pengguo-01/EPA-National-Emission-Inventory | R | false | false | 599 | r | ##
names(SCC)
NEI$type
SCCcoal <- SCC[grepl("coal", SCC$Short.Name, ignore.case = T),]
NEIcoal <- NEI[NEI$SCC %in% SCCcoal$SCC,]
totalCoal <- aggregate(Emissions ~ year + type, NEIcoal, sum)
png("plot4.png", width = 480, height = 480)
ggplot(totalCoal, aes(year, Emissions, col = type)) +
geom_line() +
geom_point() +
ggtitle(expression("Total US" ~ PM[2.5] ~ "Coal Emission by Type and Year")) +
xlab("Year") +
ylab(expression("US " ~ PM[2.5] ~ "Coal Emission")) +
scale_colour_discrete(name = "Type of sources") +
theme(legend.title = element_text(face = "bold"))
dev.off() |
## Finding the inverse of a square matrix is very expensive.
## If the matrix inverse has been already computed, it can be cached
## instead of being computed repeatedly. The following functions achieve this
## Create a matrix which can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(inverse) m <<- inverse
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Return a matrix that is the inverse of matrix 'x'
## If the inverse has already been computed, return the cached inverse
cacheSolve <- function(x, ...) {
m <- x$getInverse()
if(!is.null(m)) {
message("Now, getting cached data ...")
return(m)
}
message("This is the first computation.")
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
## To test use the following commands
## A <- matrix(c(1, 2, -3, -1), nrow=2, ncol=2)
## m <- makeCacheMatrix(A)
## cacheSolve(m)
| /cachematrix.R | no_license | git-skc/ProgrammingAssignment2 | R | false | false | 1,130 | r | ## Finding the inverse of a square matrix is very expensive.
## If the matrix inverse has been already computed, it can be cached
## instead of being computed repeatedly. The following functions achieve this
## Create a matrix which can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(inverse) m <<- inverse
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Return a matrix that is the inverse of matrix 'x'
## If the inverse has already been computed, return the cached inverse
cacheSolve <- function(x, ...) {
m <- x$getInverse()
if(!is.null(m)) {
message("Now, getting cached data ...")
return(m)
}
message("This is the first computation.")
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
## To test use the following commands
## A <- matrix(c(1, 2, -3, -1), nrow=2, ncol=2)
## m <- makeCacheMatrix(A)
## cacheSolve(m)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/smcfcs.r
\docType{data}
\name{ex_logisticquad}
\alias{ex_logisticquad}
\title{Simulated example data with binary outcome and quadratic covariate effects}
\format{A data frame with 1000 rows and 5 variables:
\describe{
\item{y}{Binary outcome}
\item{z}{Fully observed covariate, with linear effect on outcome (on log odds scale)}
\item{x}{Partially observed normally distributed covariate, with quadratic effect on outcome (on log odds scale)}
\item{xsq}{The square of x, which thus has missing values also}
\item{v}{An auxiliary variable (i.e. not contained in the substantive model)}
}}
\usage{
ex_logisticquad
}
\description{
A dataset containing simulated data where the binary outcome depends quadratically
on a partially observed covariate.
}
\keyword{datasets}
| /man/ex_logisticquad.Rd | no_license | guhjy/smcfcs | R | false | true | 856 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/smcfcs.r
\docType{data}
\name{ex_logisticquad}
\alias{ex_logisticquad}
\title{Simulated example data with binary outcome and quadratic covariate effects}
\format{A data frame with 1000 rows and 5 variables:
\describe{
\item{y}{Binary outcome}
\item{z}{Fully observed covariate, with linear effect on outcome (on log odds scale)}
\item{x}{Partially observed normally distributed covariate, with quadratic effect on outcome (on log odds scale)}
\item{xsq}{The square of x, which thus has missing values also}
\item{v}{An auxiliary variable (i.e. not contained in the substantive model)}
}}
\usage{
ex_logisticquad
}
\description{
A dataset containing simulated data where the binary outcome depends quadratically
on a partially observed covariate.
}
\keyword{datasets}
|
# Acquire -----------------------------------------------------------------
#' @title Acquire Twitter Tweets
#'
#' @description Function will enable a user to access the twitter API throught the
#' [Twitter Developers Account](https://dev.twitter.com/) site.
#' Once a user has a twitter developers account and has recieved their individual consumer key,
#' consumer secret key, access token, and access secret key and acquire tweets they can
#' acquire tweets based on a list of hashtags and a requested number of entires per hashtag.
#' @param consumer_key Twitter Application management consumer key.
#' @param consumer_secret Twitter Application management consumer secret key.
#' @param access_token Twitter Application management access token.
#' @param access_secret Twitter Application management access secret key.
#' @param HT A single hashtag or a list of hashtags the user has specified.
#' @param num_tweets Number of tweets to be acquired per each hashtag.
#' @param file_name User desired output .RData file name.
#' @param distinct Logical. If distinct = TRUE, the function removes multiple tweets that originate from the same twitter id at the exact same time.
#'
#' @importFrom twitteR setup_twitter_oauth twListToDF searchTwitter
#' @importFrom dplyr mutate distinct quo
#' @importFrom purrr map_df
#'
#' @return A DataFrame.
#'
#' @examples
#' \dontrun{
#' consumer_key <- "XXXXXXXXXXXXXXXXXXXXXXXXX"
#' consumer_secret <- "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
#' access_token <- "XXXXXXXXXXXXXXXXXX-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
#' access_secret <- "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
#'
#' hashtags <- c("#job", "#Friday", "#fail", "#icecream", "#random", "#kitten", "#airline")
#'
#' Acquire(consumer_key = consumer_key,
#' consumer_secret = consumer_secret,
#' access_token = access_token,
#' access_secret = access_secret,
#' HT = hashtags,
#' num_tweets = 10,
#' file_name = "test_tweets.RData",
#' distinct = TRUE)
#'
#' load("test_tweets.RData")
#' }
#'
#' @export
Acquire <- function(consumer_key, consumer_secret, access_token, access_secret, HT, num_tweets, file_name, distinct = TRUE) {
options(httr_oauth_cache = TRUE)
screenName <- dplyr::quo(screenName)
created <- dplyr::quo(created)
key <- dplyr::quo(key)
twitteR::setup_twitter_oauth(consumer_key,
consumer_secret,
access_token,
access_secret)
twitter_data <- list()
for (i in HT) {
twitter_data[[i]] <- twitteR::twListToDF(twitteR::searchTwitter(i,
n = num_tweets,
lang = "en")) %>%
dplyr::mutate(hashtag = substr(i, 2, nchar(i)))
}
raw_tweets <- purrr::map_df(twitter_data, rbind) %>%
dplyr::mutate(key = paste(screenName, created)) %>%
dplyr::distinct(key, .keep_all = distinct)
save(raw_tweets, file = file_name)
}
# Explore -----------------------------------------------------------------
#' @title Tidy Twitter Data
#'
#' @description Function to Tidy Twitter Data and remove all emoticons whilie maintaiing actual tweet.
#'
#' @param DataFrame DataFrame of Twitter Data.
#'
#' @importFrom dplyr mutate filter quo
#' @importFrom stringr str_replace_all
#' @importFrom tidytext unnest_tokens
#' @importFrom plyr rename
#'
#' @return A Tidy DataFrame.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' tidy_data
#' }
#'
#' @export
Tidy <- function(DataFrame) {
text <- dplyr::quo(text)
cleantext <- dplyr::quo(cleantext)
word <- dplyr::quo(word)
reg_words <- "([^A-Za-z_\\d#@']|'(?![A-Za-z_\\d#@]))"
TD_Tidy <- DataFrame %>%
dplyr::mutate(cleantext = stringr::str_replace_all(text, "https://t.co/[A-Za-z\\d]+|http://[A-Za-z\\d]+|&|<|>|RT|https", "")) %>%
dplyr::mutate(cleantext = stringr::str_replace_all(cleantext, "#", "")) %>%
dplyr::mutate(cleantext = stringr::str_replace_all(cleantext, "http", "")) %>%
dplyr::mutate(cleantext = stringr::str_replace_all(cleantext, "RT", "")) %>% # Remove retweet note
dplyr::mutate(cleantext = stringr::str_replace_all(cleantext, "[:punct:]", "")) %>%
dplyr::mutate(cleantext = stringr::str_replace_all(cleantext, "[^[:alnum:]///' ]", "")) %>% # Remove Emojis
tidytext::unnest_tokens(output = word, input = cleantext, token = "words", drop = TRUE) %>%
dplyr::filter(!word %in% tidytext::stop_words$word) %>%
plyr::rename(c("word" = "Token"))
return(TD_Tidy)
}
#' @title Merge Terms
#'
#' @description Function to merge terms within a dataframe and prevent redundancy in the analysis.
#' For example many users may refer to the same entity in multiple different ways:
#' President Trump, The U.S. President, POTUS, Trump, President Donald Trump, Donald Trump, etc.
#' While each entry is different, they all refer to the same individual. Using Merge Terms will allow all be converted into a single term.
#'
#' @param DataFrame DataFrame of Twitter Data.
#' @param term Term selected for merging.
#' @param term_replacement Desired replacement term.
#'
#' @return A Tidy DataFrame.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' data <- Merge.Terms(DataFrame = data,
#' term = "ice cream",
#' term_replacement = "ice_cream")
#' data
#' }
#' @export
Merge.Terms <- function(DataFrame, term, term_replacement){
for(i in 1: length(DataFrame$text)){
DataFrame[i, "text"] <- DataFrame[i, "text"] %>%
gsub(pattern = as.character(term),
replacement = as.character(term_replacement),
ignore.case = TRUE)
}
DataFrame <- DataFrame
}
#' @title Twitter Uni-Grams
#'
#' @description Determines and displays the text Uni-Grams within the Twitter data in sequence from the most used to the least used. A Uni-Gram is a single word.
#'
#' @param DataFrame DataFrame of Twitter Data.
#'
#' @importFrom dplyr count mutate filter quo
#' @importFrom stringr str_replace_all
#' @importFrom tidytext unnest_tokens
#'
#' @return A tribble.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' TD_Unigram <- Unigram(DataFrame = data)
#' TD_Unigram
#' }
#' @export
Unigram <- function(DataFrame){
text <- dplyr::quo(text)
word <- dplyr::quo(word)
TD_Unigram <- DataFrame %>%
dplyr::mutate(text = stringr::str_replace_all(text, "RT", "")) %>% # Remove retweet note
dplyr::mutate(text = stringr::str_replace_all(text, "&", "")) %>% # Remove Accelerated Mobile Pages (AMP) note
dplyr::mutate(text = stringr::str_replace_all(text, "https://t.co/[A-Za-z\\d]+|http://[A-Za-z\\d]+|&|<|>|RT|https", "")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "#", "")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "[:punct:]", "")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "[^[:alnum:]///' ]", "")) %>% # Remove Emojis
tidytext::unnest_tokens("word", "text") %>%
dplyr::filter(!"word" %in% c(tidytext::stop_words$word, '[0-9]+')) %>%
dplyr::count(word, sort = TRUE)
return(TD_Unigram)
}
#' @title Twitter Bi-Grams
#'
#' @description Determines and displays the text Bi-Grams within the Twitter data in sequence from the most used to the least used. A Bi-Gram is a combination of two consecutive words.
#'
#' @param DataFrame DataFrame of Twitter Data.
#'
#' @importFrom dplyr count mutate filter quo
#' @importFrom stringr str_replace_all
#' @importFrom tidyr separate
#' @importFrom tidytext unnest_tokens
#'
#' @return A tribble.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' TD_Bigram <- Bigram(DataFrame = data)
#' TD_Bigram
#' }
#' @export
Bigram <- function(DataFrame){
text <- dplyr::quo(text)
word1 <- dplyr::quo(word1)
word2 <- dplyr::quo(word2)
TD_Bigram <- DataFrame %>%
dplyr::mutate(text = stringr::str_replace_all(text, "https://t.co/[A-Za-z\\d]+|http://[A-Za-z\\d]+|&|<|>|RT|https", "")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "RT", "")) %>% # Remove retweet note
dplyr::mutate(text = stringr::str_replace_all(text, "&", "")) %>% # Remove Accelerated Mobile Pages (AMP) note
dplyr::mutate(text = stringr::str_replace_all(text, "#", "")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "[:punct:]", "")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "[^[:alnum:]///' ]", "")) %>% # Remove Emojis
tidytext::unnest_tokens("bigram", "text", token = "ngrams", n = 2) %>%
tidyr::separate("bigram", c("word1", "word2"), sep = " ") %>%
dplyr::filter(!"word1" %in% c(tidytext::stop_words$word, '[0-9]+')) %>%
dplyr::filter(!"word2" %in% c(tidytext::stop_words$word, '[0-9]+')) %>%
dplyr::count(word1, word2, sort = TRUE)
return(TD_Bigram)
}
#' @title Twitter Tri-Grams
#'
#' @description Determines and displays the text Tri-Grams within the Twitter data in sequence from the most used to the least used. A Tri-Gram is a combination of three consecutive words.
#'
#' @param DataFrame DataFrame of Twitter Data.
#'
#' @importFrom dplyr count mutate filter quo
#' @importFrom stringr str_replace_all
#' @importFrom tidyr separate
#' @importFrom tidytext unnest_tokens
#'
#' @return A tribble.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' TD_Trigram <- Trigram(DataFrame = data)
#' TD_Trigram
#' }
#' @export
Trigram <- function(DataFrame) {
text <- dplyr::quo(text)
word1 <- dplyr::quo(word1)
word2 <- dplyr::quo(word2)
word3 <- dplyr::quo(word3)
TD_Trigram <- DataFrame %>%
dplyr::mutate(text = stringr::str_replace_all(text, "RT", "")) %>% # Remove retweet note
dplyr::mutate(text = stringr::str_replace_all(text, "&", "")) %>% # Remove Accelerated Mobile Pages (AMP) note
dplyr::mutate(text = stringr::str_replace_all(text, "https://t.co/[A-Za-z\\d]+|http://[A-Za-z\\d]+|&|<|>|RT|https", "")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "#", "")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "[:punct:]", "")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "[^[:alnum:]///' ]", "")) %>% # Remove Emojis
tidytext::unnest_tokens("trigram", "text", token = "ngrams", n=3) %>%
tidyr::separate("trigram", c("word1", "word2", "word3"), sep = " ") %>%
dplyr::filter(!"word1" %in% c(tidytext::stop_words$word, '[0-9]+')) %>%
dplyr::filter(!"word2" %in% c(tidytext::stop_words$word, '[0-9]+')) %>%
dplyr::filter(!"word3" %in% c(tidytext::stop_words$word, '[0-9]+')) %>%
dplyr::count(word1, word2, word3, sort = TRUE)
return(TD_Trigram)
}
#' @title Twitter Bi-Gram Network
#'
#' @description Displays the Bi-Gram Network. Bi-Gram networks builds on computed Bi-Grams. Bi-Gram networks serve as a visualization tool that displays the relationships between the words simultaneously as opposed to a tabular display of Bi-Gram words.
#'
#' @param BiGramDataFrame DataFrame of Bi-Grams.
#' @param number The minimum desired number of Bi-Gram occurances to be displayed (number = 300, would display all Bi-Grams that have at least 300 instances.)
#' @param layout Desired layout from the `ggraph` package. Acceptable layouts: "star", "circle", "gem", "dh", "graphopt", "grid", "mds", "randomly", "fr", "kk", "drl", "lgl"
#' @param edge_color User desired edge color.
#' @param node_color User desired node color.
#' @param node_size User desired node size.
#' @param set_seed Seed for reproducable results.
#'
#' @importFrom dplyr filter quo
#' @importFrom igraph graph_from_data_frame
#' @importFrom ggraph ggraph geom_edge_link geom_node_point geom_node_text
#' @import ggplot2
#'
#' @return A ggraph plot.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' TD_Bigram <- Bigram(DataFrame = data)
#' TD_Bigram_Network <- Bigram.Network(BiGramDataFrame = TD_Bigram,
#' number = 300,
#' layout = "fr",
#' edge_color = "royalblue",
#' node_color = "black",
#' node_size = 3,
#' set_seed = 1234)
#'
#' TD_Bigram_Network
#' }
#' @export
Bigram.Network <- function(BiGramDataFrame, number = 300, layout = "fr", edge_color = "royalblue", node_color = "black", node_size = 3, set_seed = 1234) {
n <- dplyr::quo(n)
name <- dplyr::quo(name)
TD_Bigram_Network <- BiGramDataFrame %>%
dplyr::filter(n > number) %>%
igraph::graph_from_data_frame()
set.seed(set_seed)
TD_Bigram_Network %>%
ggraph::ggraph(layout = layout) +
ggraph::geom_edge_link(ggplot2::aes(edge_alpha = 1, edge_width = scales::rescale(n, to=c(1,10))),edge_colour = edge_color, show.legend = TRUE) +
ggraph::geom_node_point(colour = node_color, size = node_size) +
ggraph::geom_node_text(ggplot2::aes(label = name), repel = TRUE) +
ggplot2::ggtitle("Bi-Gram Network") +
ggplot2::theme_void()
}
#' @title Twitter Word Correlations
#'
#' @description The word correlation displays the mutual relationship between words.
#'
#' @param DataFrameTidy DataFrame of Twitter Data that has been tidy'd.
#' @param number The number of word instances to be included.
#' @param sort Rank order the results from most to least correlated.
#'
#' @importFrom dplyr group_by filter quo
#' @importFrom widyr pairwise_cor
#'
#' @return A tribble
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' TD_Word_Corr <- Word.Corr(DataFrameTidy = tidy_data,
#' number = 500,
#' sort = TRUE)
#'
#' TD_Word_Corr
#' }
#' @export
Word.Corr <- function(DataFrameTidy, number, sort = TRUE) {
Token <- dplyr::quo(Token)
n <- dplyr::quo(n)
key <- dplyr::quo(key)
TD_Word_Correlation <- DataFrameTidy %>%
dplyr::group_by(Token) %>%
dplyr::filter(n() >= number) %>%
widyr::pairwise_cor(Token, key, sort = sort)
}
#' @title Twitter Word Correlations Plot
#'
#' @description The word correlation network displays the mutual relationship between words. The correlation network shows higher correlations with a thicker and darker edge color.
#'
#' @param WordCorr DataFrame of Word Correlations.
#' @param Correlation Minimum level of correlation to be displayed.
#' @param layout Desired layout from the `ggraph` package. Acceptable layouts: "star", "circle", "gem", "dh", "graphopt", "grid", "mds", "randomly", "fr", "kk", "drl", "lgl"
#' @param edge_color User desired edge color.
#' @param node_color User desired node color.
#' @param node_size User desired node size.
#' @param set_seed Seed for reproducable results.
#'
#' @importFrom dplyr filter quo
#' @importFrom igraph graph_from_data_frame
#' @importFrom ggraph ggraph geom_edge_link geom_node_point geom_node_text
#' @import ggplot2
#'
#' @return An igraph plot
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' TD_Word_Corr <- Word.Corr(DataFrameTidy = tidy_data,
#' number = 500,
#' sort = TRUE)
#' TD_Word_Corr_Network <- Word.Corr.Plot(WordCorr = TD_Word_Corr,
#' Correlation = 0.15,
#' layout = "fr",
#' edge_color = "royalblue",
#' node_color = "black",
#' node_size = 2,
#' set_seed = 1234)
#'
#' TD_Word_Corr_Network
#' }
#' @export
Word.Corr.Plot <- function(WordCorr, Correlation = 0.15, layout = "fr", edge_color = "royalblue", node_color = "black", node_size = 2, set_seed = 1234) {
correlation <- dplyr::quo(correlation)
name <- dplyr::quo(name)
set.seed(set_seed)
WordCorr %>%
dplyr::filter(correlation > Correlation) %>%
igraph::graph_from_data_frame() %>%
ggraph::ggraph(layout = layout) +
ggraph::geom_edge_link(ggplot2::aes(edge_alpha = correlation, edge_width = correlation), edge_colour = edge_color, show.legend = TRUE) +
ggraph::geom_node_point(colour = node_color, size = node_size) +
ggraph::geom_node_text(ggplot2::aes(label = name), repel = TRUE) +
ggplot2::ggtitle("Word Correlation Network") +
theme_void()
}
# Topic Analysis ----------------------------------------------------------
#' @title Number Topics
#'
#' @description Determines the optimal number of Latent topics within a dataframe by tuning the Latent Dirichlet Allocation (LDA) model parameters.
#' Uses the `ldatuning` package and outputs an ldatuning plot.
#'
#' @param DataFrame DataFrame of Twitter Data.
#' @param num_cores The number of CPU cores to processes models simultaneously (2L for dual core processor).
#' @param min_clusters Lower range for the number of clusters.
#' @param max_clusters Upper range for the number of clusters.
#' @param skip Integer; The number of clusters to skip between entries.
#' @param set_seed Seed for reproducable results.
#'
#' @importFrom dplyr mutate group_by count anti_join quo
#' @importFrom stringr str_replace_all
#' @importFrom tidytext unnest_tokens cast_dtm
#' @importFrom ldatuning FindTopicsNumber
#' @importFrom scales rescale
#' @importFrom reshape2 melt
#' @import ggplot2
#'
#' @return A Tidy DataFrame.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' LDA_Topic_Plot <- Number.Topics(DataFrame = data,
#' num_cores = 2L,
#' min_clusters = 2,
#' max_clusters = 12,
#' skip = 2,
#' set_seed = 1234)
#'
#' LDA_Topic_Plot
#' }
#' @export
Number.Topics <- function(DataFrame, num_cores, min_clusters = 2, max_clusters = 12, skip = 2, set_seed = 1234) {
text <- dplyr::quo(text)
key <- dplyr::quo(key)
word <- dplyr::quo(word)
n <- dplyr::quo(n)
lda_prep <- DataFrame %>%
dplyr::mutate(text = base::iconv(DataFrame$text, "latin1", "ASCII", sub="")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "#", "")) %>% # Remove hashtag
dplyr::mutate(text = stringr::str_replace_all(text, "[:punct:]", "")) %>% # Remove punctuation
dplyr::mutate(text = stringr::str_replace_all(text, "RT", "")) %>% # Remove retweet note
dplyr::mutate(text = stringr::str_replace_all(text, "&", "")) %>% # Remove Accelerated Mobile Pages (AMP) note
dplyr::mutate(text = stringr::str_replace_all(text, "https://t.co/[A-Za-z\\d]+|http://[A-Za-z\\d]+|&|<|>|RT|https", "")) %>% # Remove links
dplyr::group_by(key) %>%
tidytext::unnest_tokens(word, text) %>%
dplyr::anti_join(tidytext::stop_words) %>%
dplyr::count(key, word, sort = TRUE) %>%
tidytext::cast_dtm(key, word, n) # create DTM
# Compute Values
values <- ldatuning::FindTopicsNumber(lda_prep,
topics = seq(from = min_clusters, to = max_clusters, by = skip),
metrics = c("Griffiths2004", "CaoJuan2009", "Arun2010", "Deveaud2014"),
method = "Gibbs",
mc.cores = num_cores,
verbose = TRUE)
# Plot
columns <- base::subset(values, select = 2:ncol(values))
values <- base::data.frame(values["topics"], base::apply(columns, 2, function(column) {scales::rescale(column, to = c(0, 1), from = range(column))}))
values <- reshape2::melt(values, id.vars = "topics", na.rm = TRUE)
values$group <- values$variable %in% c("Griffiths2004", "Deveaud2014")
values$group <- base::factor(values$group, levels = c(FALSE, TRUE), labels = c("minimize", "maximize"))
p <- ggplot2::ggplot(values, aes_string(x = "topics", y = "value", group = "variable"))
p <- p + geom_line()
p <- p + geom_point(aes_string(shape = "variable"), size = 3)
p <- p + guides(size = FALSE, shape = guide_legend(title = "metrics:"))
p <- p + scale_x_continuous(breaks = values$topics)
p <- p + labs(x = "number of topics", y = NULL)
p <- p + facet_grid(group ~ .)
p <- p + theme_bw() %+replace% theme(panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.major.x = element_line(colour = "grey70"),
panel.grid.minor.x = element_blank(),
legend.key = element_blank(),
strip.text.y = element_text(angle = 90))
return(p)
}
#' @title Tweet Topics
#'
#' @description Determines the Latent topics within a dataframe by using Latent Dirichlet Allocation (LDA) model parameters.
#' Uses the `ldatuning` package and outputs an ldatuning plot.
#' Prepares tweet text, creates DTM, conducts LDA, display data terms associated with each topic.
#'
#' @param DataFrame DataFrame of Twitter Data.
#' @param clusters The number of latent clusters.
#' @param method method = "Gibbs"
#' @param set_seed Seed for reproducable results.
#' @param num_terms The desired number of terms to be returned for each topic.
#'
#' @importFrom dplyr mutate group_by anti_join inner_join count select transmute quo
#' @importFrom stringr str_replace_all
#' @importFrom plyr rename
#' @importFrom tidytext cast_dtm
#' @importFrom topicmodels topics terms
#'
#' @return Returns LDA topics.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' LDA_data <- Tweet.Topics(DataFrame = data,
#' clusters = 8,
#' method = "Gibbs",
#' set_seed = 1234,
#' num_terms = 10)
#'
#' LDA_data
#' }
#' @export
Tweet.Topics <- function(DataFrame, clusters, method = "Gibbs", set_seed = 1234, num_terms = 10) {
text <- dplyr::quo(text)
key <- dplyr::quo(key)
word <- dplyr::quo(word)
n <- dplyr::quo(n)
lda_prep <- DataFrame %>%
dplyr::mutate(text = base::iconv(DataFrame$text, "latin1", "ASCII", sub="")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "#", "")) %>% # Remove hashtag
dplyr::mutate(text = stringr::str_replace_all(text, "[:punct:]", "")) %>% # Remove punctuation
dplyr::mutate(text = stringr::str_replace_all(text, "RT", "")) %>% # Remove retweet note
dplyr::mutate(text = stringr::str_replace_all(text, "&", "")) %>% # Remove Accelerated Mobile Pages (AMP) note
dplyr::mutate(text = stringr::str_replace_all(text, "https://t.co/[A-Za-z\\d]+|http://[A-Za-z\\d]+|&|<|>|RT|https", "")) %>% # Remove links
dplyr::group_by(key) %>%
tidytext::unnest_tokens(word, text) %>%
dplyr::anti_join(tidytext::stop_words) %>%
dplyr::count(key, word, sort = TRUE) %>%
tidytext::cast_dtm(key, word, n)
# Run LDA using Gibbs sampling
ldaout <- topicmodels::LDA(lda_prep, k = clusters, method = method, control = list(seed = set_seed))
ldaout_topics <- as.matrix(topicmodels::topics(ldaout))
ldaout_terms <- as.matrix(topicmodels::terms(ldaout, num_terms))
# probabilities associated with each topic assignment
topicProbabilities <- as.data.frame(ldaout@gamma)
data.topics <- topicmodels::topics(ldaout, 1)
data.terms <- as.data.frame(topicmodels::terms(ldaout, num_terms), stringsAsFactors = FALSE)
print(data.terms)
#View(data.terms)
# Creates a dataframe to store the Lesson Number and the most likely topic
tweettopics.df <- as.data.frame(data.topics)
tweettopics.df <- dplyr::transmute(tweettopics.df, LessonId = rownames(tweettopics.df), Topic = data.topics)
tweettopics.df$ArticleNo <- as.character(tweettopics.df$LessonId)
# Clean up and rename coluns to match previous dataframes
tweettopics <- tweettopics.df %>%
dplyr::select(c("ArticleNo", "Topic")) %>%
plyr::rename(c("ArticleNo" = "key"))
# Join original twitter data frame with tweet topics
Tweet.Topics <- dplyr::inner_join(DataFrame, tweettopics, by = "key")
return(Tweet.Topics)
}
# Sentiment Calculation ---------------------------------------------------
#' @title Score Tidy Twitter Data
#'
#' @description Function to Calculate Sentiment Scores that will account for sentiment by hashtag or topic.
#'
#' @param DataFrameTidy DataFrame of Twitter Data that has been tidy'd.
#' @param HT_Topic If using hashtag data select: "hashtag". If using topic data select: "topic"
#'
#' @importFrom dplyr mutate inner_join group_by count quo
#' @importFrom plyr rename
#' @importFrom tidyr spread
#' @importFrom lubridate as_date
#' @importFrom tidytext get_sentiments
#'
#' @return A Scored DataFrame.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' score_data
#' }
#' @export
Scores <- function(DataFrameTidy, HT_Topic) {
text <- dplyr::quo(text)
method <- dplyr::quo(method)
hashtag <- dplyr::quo(hashtag)
created <- dplyr::quo(created)
key <- dplyr::quo(key)
Sentiment <- dplyr::quo(Sentiment)
n <- dplyr::quo(n)
positive <- dplyr::quo(positive)
negative <- dplyr::quo(negative)
TweetSentimentScore <- dplyr::quo(TweetSentimentScore)
Topic <- dplyr::quo(Topic)
#data("Bing")
Bing <- tidytext::get_sentiments(lexicon = "bing") %>%
plyr::rename(c("word" = "Token", "sentiment" = "Sentiment"))
if(HT_Topic == "hashtag") {
TD_Hashtag_Scores <- DataFrameTidy %>%
dplyr::inner_join(Bing, by = "Token") %>%
dplyr::mutate(method = "Bing") %>%
dplyr::group_by(text, method, hashtag, created, key, Sentiment) %>%
dplyr::count(method, hashtag, created, key, Sentiment) %>%
tidyr::spread(Sentiment, n, fill = 0) %>%
dplyr::mutate(TweetSentimentScore = positive - negative) %>%
dplyr::mutate(TweetSentiment = ifelse(TweetSentimentScore == 0, "neutral",
ifelse(TweetSentimentScore > 0, "positive", "negative"))) %>%
dplyr::mutate(date = lubridate::as_date(created))
return(TD_Hashtag_Scores)
} else {
TD_Topic_Scores <- DataFrameTidy %>%
dplyr::inner_join(Bing, by = "Token") %>%
dplyr::mutate(method = "Bing") %>%
dplyr::group_by(text, method, Topic, created, key, Sentiment) %>%
dplyr::count(method, Topic, created, key, Sentiment) %>%
tidyr::spread(Sentiment, n, fill = 0) %>%
dplyr::mutate(TweetSentimentScore = positive - negative) %>%
dplyr::mutate(TweetSentiment = ifelse(TweetSentimentScore == 0, "neutral",
ifelse(TweetSentimentScore > 0, "positive", "negative"))) %>%
dplyr::mutate(date = lubridate::as_date(created))
return(TD_Topic_Scores)
}
}
#' @title Twitter Positive and Negative Words
#'
#' @description Determines and displays the most positive and negative words within the twitter data.
#'
#' @param DataFrameTidy DataFrame of Twitter Data that has been tidy'd.
#' @param num_words Desired number of words to be returned.
#' @param filterword Word or words to be removed
#'
#' @importFrom dplyr mutate inner_join group_by count filter ungroup top_n quo
#' @importFrom plyr rename
#' @importFrom tidytext get_sentiments
#' @importFrom stats reorder
#' @import ggplot2
#'
#' @return A ggplot
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' posneg <- PosNeg.Words(DataFrameTidy = tidy_data,
#' n = 10)
#' posneg
#'
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' posneg <- PosNeg.Words(DataFrameTidy = tidy_data,
#' n = 10,
#' filterword = "fail")
#' posneg
#'
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' posneg <- PosNeg.Words(DataFrameTidy = tidy_data,
#' n = 10,
#' filterword = c("fail", "urgent"))
#' posneg
#' }
#' @export
PosNeg.Words <- function(DataFrameTidy, num_words, filterword = NULL) {
Token <- dplyr::quo(Token)
Sentiment <- dplyr::quo(Sentiment)
n <- dplyr::quo(n)
Bing <- tidytext::get_sentiments(lexicon = "bing") %>%
plyr::rename(c("word" = "Token", "sentiment" = "Sentiment"))
TD_PosNeg_Words <- DataFrameTidy %>%
dplyr::inner_join(Bing, by = "Token") %>%
dplyr::filter(!(Token %in% filterword)) %>%
dplyr::count(Token, Sentiment) %>%
dplyr::ungroup() %>%
dplyr::group_by(Sentiment) %>%
dplyr::top_n(num_words, n) %>%
dplyr::ungroup() %>%
dplyr::mutate(Token = stats::reorder(Token, n)) %>%
ggplot2::ggplot(ggplot2::aes(Token, n, fill = Sentiment)) +
ggplot2::geom_col(show.legend = FALSE) +
ggplot2::facet_wrap(~Sentiment, scales = "free_y") +
ggplot2::labs(y = "Count",
x = NULL) +
ggplot2::ggtitle('Most common positive and negative words utilizing the Bing Lexicon') +
ggplot2::coord_flip()
return(TD_PosNeg_Words)
}
#' @title Twitter Data Minimum Scores
#'
#' @description Determines the minimum scores for either the entire dataset or the minimum scores associated with a hashtag or topic analysis.
#'
#' @param DataFrameTidyScores DataFrame of Twitter Data that has been tidy'd and scored.
#' @param HT_Topic If using hashtag data select: "hashtag". If using topic data select: "topic".
#' @param HT_Topic_Selection THe hashtag or topic to be investigated. NULL will find min across entire dataframe.
#'
#' @importFrom dplyr arrange filter quo
#' @importFrom plyr desc
#' @importFrom utils head
#'
#' @return A Tidy DataFrame.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' min_scores <- Min.Scores(DataFrameTidyScores = score_data,
#' HT_Topic = "hashtag")
#'
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' min_scores <- Min.Scores(DataFrameTidyScores = score_data,
#' HT_Topic = "hashtag",
#' HT_Topic_Selection = "icecream")
#' }
#' @export
Min.Scores <- function(DataFrameTidyScores, HT_Topic, HT_Topic_Selection = NULL) {
hashtag <- dplyr::quo(hashtag)
Topic <- dplyr::quo(Topic)
TweetSentimentScore <- dplyr::quo(TweetSentimentScore)
if(HT_Topic == "hashtag" & is.null(HT_Topic_Selection)) {
TD_HT_noSel_Min_Scores <- DataFrameTidyScores %>%
dplyr::arrange((TweetSentimentScore)) %>%
utils::head()
return(TD_HT_noSel_Min_Scores)
} else if(HT_Topic == "hashtag" & !is.null(HT_Topic_Selection)) {
TD_HT_Sel_Min_Scores <- DataFrameTidyScores %>%
dplyr::filter(hashtag == HT_Topic_Selection) %>%
dplyr::arrange((TweetSentimentScore)) %>%
utils::head()
return(TD_HT_Sel_Min_Scores)
} else if(HT_Topic == "topic" & is.null(HT_Topic_Selection)) {
TD_Topic_noSel_Min_Scores <- DataFrameTidyScores %>%
dplyr::arrange((TweetSentimentScore)) %>%
utils::head()
return(TD_Topic_noSel_Min_Scores)
} else {
TD_Topic_Sel_Min_Scores <- DataFrameTidyScores %>%
dplyr::filter(Topic == HT_Topic_Selection) %>%
dplyr::arrange((TweetSentimentScore)) %>%
utils::head()
return(TD_Topic_Sel_Min_Scores)
}
}
#' @title Twitter Data Maximum Scores
#'
#' @description Determines the Maximum scores for either the entire dataset or the Maximum scores associated with a hashtag or topic analysis.
#'
#' @param DataFrameTidyScores DataFrame of Twitter Data that has been tidy'd and scored.
#' @param HT_Topic If using hashtag data select: "hashtag". If using topic data select: "topic".
#' @param HT_Topic_Selection THe hashtag or topic to be investigated. NULL will find min across entire dataframe.
#'
#' @importFrom dplyr arrange filter quo
#' @importFrom plyr desc
#' @importFrom utils head
#'
#' @return A Tidy DataFrame.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' min_scores <- Max.Scores(DataFrameTidyScores = score_data,
#' HT_Topic = "hashtag")
#'
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' min_scores <- Max.Scores(DataFrameTidyScores = score_data,
#' HT_Topic = "hashtag",
#' HT_Topic_Selection = "icecream")
#' }
#' @export
Max.Scores <- function(DataFrameTidyScores, HT_Topic, HT_Topic_Selection = NULL) {
hashtag <- dplyr::quo(hashtag)
Topic <- dplyr::quo(Topic)
TweetSentimentScore <- dplyr::quo(TweetSentimentScore)
if(HT_Topic == "hashtag" & is.null(HT_Topic_Selection)) {
TD_HT_noSel_Max_Scores <- DataFrameTidyScores %>%
dplyr::arrange(plyr::desc(TweetSentimentScore)) %>%
utils::head()
return(TD_HT_noSel_Max_Scores)
} else if(HT_Topic == "hashtag" & !is.null(HT_Topic_Selection)) {
TD_HT_Sel_Max_Scores <- DataFrameTidyScores %>%
dplyr::filter(hashtag == HT_Topic_Selection) %>%
dplyr::arrange(plyr::desc(TweetSentimentScore)) %>%
utils::head()
return(TD_HT_Sel_Max_Scores)
} else if(HT_Topic == "topic" & is.null(HT_Topic_Selection)) {
TD_Topic_noSel_Max_Scores <- DataFrameTidyScores %>%
dplyr::arrange(plyr::desc(TweetSentimentScore)) %>%
utils::head()
return(TD_Topic_noSel_Max_Scores)
} else {
TD_Topic_Sel_Max_Scores <- DataFrameTidyScores %>%
dplyr::filter(Topic == HT_Topic_Selection) %>%
dplyr::arrange(plyr::desc(TweetSentimentScore)) %>%
utils::head()
return(TD_Topic_Sel_Max_Scores)
}
}
# Visualization -----------------------------------------------------------
#' @title Twitter Corpus Distribution
#'
#' @description Determines the scores distribution for the entire Twitter data corpus.
#'
#' @param DataFrameTidyScores DataFrame of Twitter Data that has been tidy'd and scored.
#' @param color The user selected color to highlight the bins.
#' @param fill The interior color of the bins.
#'
#' @import ggplot2
#' @importFrom dplyr quo
#'
#' @return A ggplot.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' Corp_Dist <- Corpus.Distribution(DataFrameTidyScores = score_data,
#' color = "black",
#' fill = "white")
#' Corp_Dist
#' }
#' @export
Corpus.Distribution <- function(DataFrameTidyScores, color = "black", fill = "white") {
TweetSentimentScore<-dplyr::quo(TweetSentimentScore)
TD_Corups_Distribution <- DataFrameTidyScores %>%
ggplot2::ggplot(ggplot2::aes(TweetSentimentScore)) +
ggplot2::geom_bar(stat = "count", colour = color, fill = fill) +
ggplot2::theme(legend.position = "none") +
ggplot2::ggtitle("Sentiment Score Distribution") +
ggplot2::xlab('Sentiment') +
ggplot2::ylab('Count')
return(TD_Corups_Distribution)
}
#' @title Twitter Hashtag or Topic Distribution
#'
#' @description Determines the scores distribution by hashtag or topic for Twitter data.
#'
#' @param DataFrameTidyScores DataFrame of Twitter Data that has been tidy'd and scored.
#' @param HT_Topic If using hashtag data select: "hashtag". If using topic data select: "topic".
#' @param color The user selected color to highlight the bins.
#' @param fill The interior color of the bins.
#'
#' @import ggplot2
#'
#' @return A facet wrap ggplot.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' Dist <- Distribution(DataFrameTidyScores = score_data,
#' HT_Topic = "hashtag",
#' color = "black",
#' fill = "white")
#' Dist
#' }
#' @export
Distribution <- function(DataFrameTidyScores, HT_Topic, color = "black", fill = "white") {
if(HT_Topic == "hashtag") {
TD_HT_Distribution <- DataFrameTidyScores %>%
ggplot2::ggplot(ggplot2::aes("TweetSentimentScore")) +
ggplot2::geom_bar(stat = "count", colour = color, fill = fill) +
ggplot2::facet_wrap(~hashtag, ncol = 2) +
ggplot2::theme(legend.position = "none") +
ggplot2::ggtitle("Sentiment Score Distribution Across all #Hashtags") +
ggplot2::xlab('Sentiment') +
ggplot2:: ylab('Count')
return(TD_HT_Distribution)
} else {
TD_Topic_Distribution <- DataFrameTidyScores %>%
ggplot2::ggplot(ggplot2::aes("TweetSentimentScore")) +
ggplot2::geom_bar(stat = "count", colour = color, fill = fill) +
ggplot2::facet_wrap(~Topic, ncol = 2) +
ggplot2::theme(legend.position = "none") +
ggplot2::ggtitle("Sentiment Score Distribution Across all Topics") +
ggplot2::xlab('Sentiment') +
ggplot2::ylab('Count')
return(TD_Topic_Distribution)
}
}
#' @title Twitter Data Box Plot
#'
#' @description Displays the distribution scores of either hashtag or topic Twitter data.
#'
#' @param DataFrameTidyScores DataFrame of Twitter Data that has been tidy'd and scored.
#' @param HT_Topic If using hashtag data select: "hashtag". If using topic data select: "topic".
#'
#' @import ggplot2
#' @importFrom dplyr quo
#'
#' @return A ggplot box plot.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' ht_box <- BoxPlot(DataFrameTidyScores = score_data,
#' HT_Topic = "hashtag")
#' ht_box
#'
#' data <- raw_tweets
#' TD_Topics <- SAoTD::Tweet.Topics(DataFrame = data,
#' clusters = 5,
#' method = "Gibbs",
#' set_seed = 1234,
#' num_terms = 10)
#'
#' TD_Topics <- TD_Topics %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^1$", "travel")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^2$", "recreation")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^3$", "hiring")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^4$", "cats")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^5$", "random"))
#'
#' tidy_data <- Tidy(DataFrame = TD_Topics)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "topic")
#' topic_box <- BoxPlot(DataFrameTidyScores = score_data,
#' HT_Topic = "topic")
#' topic_box
#' }
#' @export
BoxPlot <- function(DataFrameTidyScores, HT_Topic) {
hashtag <- dplyr::quo(hashtag)
TweetSentimentScore <- dplyr::quo(hashtag)
Topic <- dplyr::quo(Topic)
if(HT_Topic == "hashtag") {
TD_HT_BoxPlot <- DataFrameTidyScores %>%
ggplot2::ggplot(ggplot2::aes(hashtag, TweetSentimentScore)) +
ggplot2::geom_boxplot() +
ggplot2::theme(legend.position = "none") +
ggplot2::ggtitle("Sentiment Scores Across each #Hashtag") +
ggplot2::xlab('#Hashtag') +
ggplot2::ylab('Sentiment') +
ggplot2::coord_flip()
return(TD_HT_BoxPlot)
} else {
TD_Topic_BoxPlot <- DataFrameTidyScores %>%
ggplot2::ggplot(ggplot2::aes(Topic, TweetSentimentScore)) +
ggplot2::geom_boxplot() +
ggplot2::theme(legend.position = "none") +
ggplot2::ggtitle("Sentiment Scores Across each Topic") +
ggplot2::xlab('Topic') +
ggplot2::ylab('Sentiment') +
ggplot2::coord_flip()
return(TD_Topic_BoxPlot)
}
}
#' @title Twitter Data Violin Plot
#'
#' @description Displays the distribution scores of either hashtag or topic Twitter data.
#'
#' @param DataFrameTidyScores DataFrame of Twitter Data that has been tidy'd and scored.
#' @param HT_Topic If using hashtag data select: "hashtag". If using topic data select: "topic".
#'
#' @import ggplot2
#' @importFrom dplyr quo
#' @importFrom stats median
#'
#' @return A ggplot violin plot.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' ht_violin <- ViolinPlot(DataFrameTidyScores = score_data,
#' HT_Topic = "hashtag")
#' ht_violin
#'
#' data <- raw_tweets
#' TD_Topics <- SAoTD::Tweet.Topics(DataFrame = data,
#' clusters = 5,
#' method = "Gibbs",
#' set_seed = 1234,
#' num_terms = 10)
#'
#' TD_Topics <- TD_Topics %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^1$", "travel")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^2$", "recreation")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^3$", "hiring")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^4$", "cats")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^5$", "random"))
#'
#' tidy_data <- Tidy(DataFrame = TD_Topics)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "topic")
#' topic_violin <- ViolinPlot(DataFrameTidyScores = score_data,
#' HT_Topic = "topic")
#' topic_violin
#' }
#' @export
ViolinPlot <- function(DataFrameTidyScores, HT_Topic) {
hashtag <- dplyr::quo(hashtag)
TweetSentimentScore <- dplyr::quo(TweetSentimentScore)
Topic <- dplyr::quo(Topic)
if(HT_Topic == "hashtag") {
TD_HT_ViolinPlot <- DataFrameTidyScores %>%
ggplot2:: ggplot(ggplot2::aes(hashtag, TweetSentimentScore)) +
ggplot2::geom_violin(scale = "area") +
ggplot2::stat_summary(fun.y = stats::median, geom = "point", shape = 23, size = 2) +
ggplot2::ggtitle("Sentiment Scores Across each #Hashtag") +
ggplot2::xlab('#Hashtag') +
ggplot2::ylab('Sentiment') +
ggplot2::coord_flip()
return(TD_HT_ViolinPlot)
} else{
TD_Topic_ViolinPlot <- DataFrameTidyScores %>%
ggplot2:: ggplot(ggplot2::aes(Topic, TweetSentimentScore)) +
ggplot2::geom_violin(scale = "area") +
ggplot2::stat_summary(fun.y = stats::median, geom = "point", shape = 23, size = 2) +
ggplot2::ggtitle("Sentiment Scores Across each Topic") +
ggplot2::xlab('Topic') +
ggplot2::ylab('Sentiment') +
ggplot2::coord_flip()
return(TD_Topic_ViolinPlot)
}
}
#' @title Twitter Data Timeseries Plot.
#'
#' @description Displays the Twitter data sentiment scores through time. The sentiment scores by hashtag or topic are summed per day and plotted to show the change in sentiment through time.
#'
#' @param DataFrameTidyScores DataFrame of Twitter Data that has been tidy'd and scored.
#' @param HT_Topic If using hashtag data select: "hashtag". If using topic data select: "topic".
#'
#' @importFrom dplyr summarize group_by quo
#' @import ggplot2
#'
#' @return A ggplot plot.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' ht_time <- TimeScale(DataFrameTidyScores = score_data,
#' HT_Topic = "hashtag")
#' ht_time
#'
#' data <- raw_tweets
#' TD_Topics <- SAoTD::Tweet.Topics(DataFrame = data,
#' clusters = 5,
#' method = "Gibbs",
#' set_seed = 1234,
#' num_terms = 10)
#'
#' TD_Topics <- TD_Topics %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^1$", "travel")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^2$", "recreation")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^3$", "hiring")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^4$", "cats")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^5$", "random"))
#'
#' tidy_data <- Tidy(DataFrame = TD_Topics)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "topic")
#' topic_time <- TimeScale(DataFrameTidyScores = score_data,
#' HT_Topic = "topic")
#' topic_time
#' }
#' @export
TimeScale <- function(DataFrameTidyScores, HT_Topic) {
hashtag <- dplyr::quo(hashtag)
TweetSentimentScore <- dplyr::quo(TweetSentimentScore)
DayScore <- dplyr::quo(DayScore)
Topic <- dplyr::quo(Topic)
if(HT_Topic == "hashtag") {
TD_HT_TimeScale <- DataFrameTidyScores %>%
dplyr::group_by(hashtag, date) %>%
dplyr::summarise(DayScore = sum(TweetSentimentScore)) %>%
ggplot2::ggplot(ggplot2::aes(x = factor(date), y = DayScore, colour = hashtag)) +
ggplot2::geom_point() +
ggplot2::geom_path(ggplot2::aes(group=1)) +
ggplot2::geom_hline(yintercept = 0, color = "black") +
ggplot2::facet_wrap(~hashtag, ncol = 2, scales = "free_y") +
ggplot2::theme(legend.position = "none") +
ggplot2::ggtitle("Sentiment Scores Across all #Hashtags") +
ggplot2::xlab('Day') +
ggplot2::ylab('Daily Sentiment Score') +
ggplot2::theme(axis.text.x = element_text(angle = 45, hjust = 1))
return(TD_HT_TimeScale)
} else {
TD_Topic_TimeScale <- DataFrameTidyScores %>%
dplyr::group_by(Topic, date) %>%
dplyr::summarise(DayScore = sum(TweetSentimentScore)) %>%
ggplot2::ggplot(ggplot2::aes(x = factor(date), y = DayScore, colour = Topic)) +
ggplot2::geom_point() +
ggplot2::geom_path(ggplot2::aes(group=1)) +
ggplot2::geom_hline(yintercept = 0, color = "black") +
ggplot2::facet_wrap(~Topic, ncol = 2, scales = "free_y") +
ggplot2::theme(legend.position = "none") +
ggplot2::ggtitle("Sentiment Scores Across all Topics") +
ggplot2::xlab('Day') +
ggplot2::ylab('Daily Sentiment Score') +
ggplot2::theme(axis.text.x = element_text(angle = 45, hjust = 1))
return(TD_Topic_TimeScale)
}
}
#' @title Twitter Data Worldmap Plot.
#'
#' @description Displays the location of a tweet across the globe by hashtag or topic.
#'
#' @param DataFrame DataFrame of Twitter Data that has been tidy'd and scored.
#' @param HT_Topic If using hashtag data select: "hashtag". If using topic data select: "topic".
#'
#' @importFrom ggplot2 ggplot map_data
#' @importFrom dplyr quo
#'
#' @return A ggplot plot.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' ht_map <- WorldMap(DataFrame = tidy_data,
#' HT_Topic = "hashtag")
#' ht_map
#'
#' data <- raw_tweets
#' TD_Topics <- SAoTD::Tweet.Topics(DataFrame = data,
#' clusters = 5,
#' method = "Gibbs",
#' set_seed = 1234,
#' num_terms = 10)
#'
#' TD_Topics <- TD_Topics %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^1$", "travel")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^2$", "recreation")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^3$", "hiring")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^4$", "cats")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^5$", "random"))
#'
#' tidy_data <- Tidy(DataFrame = TD_Topics)
#' topic_map <- WorldMap(DataFrame = tidy_data,
#' HT_Topic = "topic")
#' topic_map
#' }
#' @export
WorldMap <- function(DataFrame, HT_Topic) {
long <- dplyr::quo(long)
lat <- dplyr::quo(lat)
group <- dplyr::quo(group)
longitude <- dplyr::quo(longitude)
latitude <- dplyr::quo(latitude)
hashtag <- dplyr::quo(hashtag)
Topic <- dplyr::quo(Topic)
if(HT_Topic == "hashtag") {
TD_HT_WorldMap <- ggplot2::map_data("world") %>%
ggplot2::ggplot() +
ggplot2::geom_polygon(ggplot2::aes(x = long, y = lat, group = group), colour = "black", fill = "white") +
ggplot2::geom_jitter(data = DataFrame,
ggplot2::aes(x = as.numeric(longitude),
y = as.numeric(latitude),
colour = hashtag)) +
ggplot2::ggtitle("World Map of Tweets") +
ggplot2::theme(legend.position = "bottom") +
ggplot2::scale_fill_continuous(guide = guide_legend(title = NULL)) +
ggplot2::coord_quickmap()
return(TD_HT_WorldMap)
} else {
TD_Topic_WorldMap <- ggplot2::map_data("world") %>%
ggplot2::ggplot() +
ggplot2::geom_polygon(ggplot2::aes(x = long, y = lat, group = group), colour = "black", fill = "white") +
ggplot2::geom_jitter(data = DataFrame,
ggplot2::aes(x = as.numeric(longitude),
y = as.numeric(latitude),
colour = Topic)) +
ggplot2::ggtitle("World Map of Tweets") +
ggplot2::theme(legend.position = "bottom") +
ggplot2::scale_fill_continuous(guide = guide_legend(title = NULL)) +
ggplot2::coord_quickmap()
return(TD_Topic_WorldMap)
}
}
| /R/Functions.R | no_license | whaleshark16/SAoTD | R | false | false | 50,941 | r |
# Acquire -----------------------------------------------------------------
#' @title Acquire Twitter Tweets
#'
#' @description Function will enable a user to access the twitter API throught the
#' [Twitter Developers Account](https://dev.twitter.com/) site.
#' Once a user has a twitter developers account and has recieved their individual consumer key,
#' consumer secret key, access token, and access secret key and acquire tweets they can
#' acquire tweets based on a list of hashtags and a requested number of entires per hashtag.
#' @param consumer_key Twitter Application management consumer key.
#' @param consumer_secret Twitter Application management consumer secret key.
#' @param access_token Twitter Application management access token.
#' @param access_secret Twitter Application management access secret key.
#' @param HT A single hashtag or a list of hashtags the user has specified.
#' @param num_tweets Number of tweets to be acquired per each hashtag.
#' @param file_name User desired output .RData file name.
#' @param distinct Logical. If distinct = TRUE, the function removes multiple tweets that originate from the same twitter id at the exact same time.
#'
#' @importFrom twitteR setup_twitter_oauth twListToDF searchTwitter
#' @importFrom dplyr mutate distinct quo
#' @importFrom purrr map_df
#'
#' @return A DataFrame.
#'
#' @examples
#' \dontrun{
#' consumer_key <- "XXXXXXXXXXXXXXXXXXXXXXXXX"
#' consumer_secret <- "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
#' access_token <- "XXXXXXXXXXXXXXXXXX-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
#' access_secret <- "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
#'
#' hashtags <- c("#job", "#Friday", "#fail", "#icecream", "#random", "#kitten", "#airline")
#'
#' Acquire(consumer_key = consumer_key,
#' consumer_secret = consumer_secret,
#' access_token = access_token,
#' access_secret = access_secret,
#' HT = hashtags,
#' num_tweets = 10,
#' file_name = "test_tweets.RData",
#' distinct = TRUE)
#'
#' load("test_tweets.RData")
#' }
#'
#' @export
Acquire <- function(consumer_key, consumer_secret, access_token, access_secret, HT, num_tweets, file_name, distinct = TRUE) {
options(httr_oauth_cache = TRUE)
screenName <- dplyr::quo(screenName)
created <- dplyr::quo(created)
key <- dplyr::quo(key)
twitteR::setup_twitter_oauth(consumer_key,
consumer_secret,
access_token,
access_secret)
twitter_data <- list()
for (i in HT) {
twitter_data[[i]] <- twitteR::twListToDF(twitteR::searchTwitter(i,
n = num_tweets,
lang = "en")) %>%
dplyr::mutate(hashtag = substr(i, 2, nchar(i)))
}
raw_tweets <- purrr::map_df(twitter_data, rbind) %>%
dplyr::mutate(key = paste(screenName, created)) %>%
dplyr::distinct(key, .keep_all = distinct)
save(raw_tweets, file = file_name)
}
# Explore -----------------------------------------------------------------
#' @title Tidy Twitter Data
#'
#' @description Function to Tidy Twitter Data and remove all emoticons whilie maintaiing actual tweet.
#'
#' @param DataFrame DataFrame of Twitter Data.
#'
#' @importFrom dplyr mutate filter quo
#' @importFrom stringr str_replace_all
#' @importFrom tidytext unnest_tokens
#' @importFrom plyr rename
#'
#' @return A Tidy DataFrame.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' tidy_data
#' }
#'
#' @export
Tidy <- function(DataFrame) {
text <- dplyr::quo(text)
cleantext <- dplyr::quo(cleantext)
word <- dplyr::quo(word)
reg_words <- "([^A-Za-z_\\d#@']|'(?![A-Za-z_\\d#@]))"
TD_Tidy <- DataFrame %>%
dplyr::mutate(cleantext = stringr::str_replace_all(text, "https://t.co/[A-Za-z\\d]+|http://[A-Za-z\\d]+|&|<|>|RT|https", "")) %>%
dplyr::mutate(cleantext = stringr::str_replace_all(cleantext, "#", "")) %>%
dplyr::mutate(cleantext = stringr::str_replace_all(cleantext, "http", "")) %>%
dplyr::mutate(cleantext = stringr::str_replace_all(cleantext, "RT", "")) %>% # Remove retweet note
dplyr::mutate(cleantext = stringr::str_replace_all(cleantext, "[:punct:]", "")) %>%
dplyr::mutate(cleantext = stringr::str_replace_all(cleantext, "[^[:alnum:]///' ]", "")) %>% # Remove Emojis
tidytext::unnest_tokens(output = word, input = cleantext, token = "words", drop = TRUE) %>%
dplyr::filter(!word %in% tidytext::stop_words$word) %>%
plyr::rename(c("word" = "Token"))
return(TD_Tidy)
}
#' @title Merge Terms
#'
#' @description Function to merge terms within a dataframe and prevent redundancy in the analysis.
#' For example many users may refer to the same entity in multiple different ways:
#' President Trump, The U.S. President, POTUS, Trump, President Donald Trump, Donald Trump, etc.
#' While each entry is different, they all refer to the same individual. Using Merge Terms will allow all be converted into a single term.
#'
#' @param DataFrame DataFrame of Twitter Data.
#' @param term Term selected for merging.
#' @param term_replacement Desired replacement term.
#'
#' @return A Tidy DataFrame.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' data <- Merge.Terms(DataFrame = data,
#' term = "ice cream",
#' term_replacement = "ice_cream")
#' data
#' }
#' @export
Merge.Terms <- function(DataFrame, term, term_replacement){
for(i in 1: length(DataFrame$text)){
DataFrame[i, "text"] <- DataFrame[i, "text"] %>%
gsub(pattern = as.character(term),
replacement = as.character(term_replacement),
ignore.case = TRUE)
}
DataFrame <- DataFrame
}
#' @title Twitter Uni-Grams
#'
#' @description Determines and displays the text Uni-Grams within the Twitter data in sequence from the most used to the least used. A Uni-Gram is a single word.
#'
#' @param DataFrame DataFrame of Twitter Data.
#'
#' @importFrom dplyr count mutate filter quo
#' @importFrom stringr str_replace_all
#' @importFrom tidytext unnest_tokens
#'
#' @return A tribble.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' TD_Unigram <- Unigram(DataFrame = data)
#' TD_Unigram
#' }
#' @export
Unigram <- function(DataFrame){
text <- dplyr::quo(text)
word <- dplyr::quo(word)
TD_Unigram <- DataFrame %>%
dplyr::mutate(text = stringr::str_replace_all(text, "RT", "")) %>% # Remove retweet note
dplyr::mutate(text = stringr::str_replace_all(text, "&", "")) %>% # Remove Accelerated Mobile Pages (AMP) note
dplyr::mutate(text = stringr::str_replace_all(text, "https://t.co/[A-Za-z\\d]+|http://[A-Za-z\\d]+|&|<|>|RT|https", "")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "#", "")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "[:punct:]", "")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "[^[:alnum:]///' ]", "")) %>% # Remove Emojis
tidytext::unnest_tokens("word", "text") %>%
dplyr::filter(!"word" %in% c(tidytext::stop_words$word, '[0-9]+')) %>%
dplyr::count(word, sort = TRUE)
return(TD_Unigram)
}
#' @title Twitter Bi-Grams
#'
#' @description Determines and displays the text Bi-Grams within the Twitter data in sequence from the most used to the least used. A Bi-Gram is a combination of two consecutive words.
#'
#' @param DataFrame DataFrame of Twitter Data.
#'
#' @importFrom dplyr count mutate filter quo
#' @importFrom stringr str_replace_all
#' @importFrom tidyr separate
#' @importFrom tidytext unnest_tokens
#'
#' @return A tribble.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' TD_Bigram <- Bigram(DataFrame = data)
#' TD_Bigram
#' }
#' @export
Bigram <- function(DataFrame){
text <- dplyr::quo(text)
word1 <- dplyr::quo(word1)
word2 <- dplyr::quo(word2)
TD_Bigram <- DataFrame %>%
dplyr::mutate(text = stringr::str_replace_all(text, "https://t.co/[A-Za-z\\d]+|http://[A-Za-z\\d]+|&|<|>|RT|https", "")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "RT", "")) %>% # Remove retweet note
dplyr::mutate(text = stringr::str_replace_all(text, "&", "")) %>% # Remove Accelerated Mobile Pages (AMP) note
dplyr::mutate(text = stringr::str_replace_all(text, "#", "")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "[:punct:]", "")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "[^[:alnum:]///' ]", "")) %>% # Remove Emojis
tidytext::unnest_tokens("bigram", "text", token = "ngrams", n = 2) %>%
tidyr::separate("bigram", c("word1", "word2"), sep = " ") %>%
dplyr::filter(!"word1" %in% c(tidytext::stop_words$word, '[0-9]+')) %>%
dplyr::filter(!"word2" %in% c(tidytext::stop_words$word, '[0-9]+')) %>%
dplyr::count(word1, word2, sort = TRUE)
return(TD_Bigram)
}
#' @title Twitter Tri-Grams
#'
#' @description Determines and displays the text Tri-Grams within the Twitter data in sequence from the most used to the least used. A Tri-Gram is a combination of three consecutive words.
#'
#' @param DataFrame DataFrame of Twitter Data.
#'
#' @importFrom dplyr count mutate filter quo
#' @importFrom stringr str_replace_all
#' @importFrom tidyr separate
#' @importFrom tidytext unnest_tokens
#'
#' @return A tribble.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' TD_Trigram <- Trigram(DataFrame = data)
#' TD_Trigram
#' }
#' @export
Trigram <- function(DataFrame) {
text <- dplyr::quo(text)
word1 <- dplyr::quo(word1)
word2 <- dplyr::quo(word2)
word3 <- dplyr::quo(word3)
TD_Trigram <- DataFrame %>%
dplyr::mutate(text = stringr::str_replace_all(text, "RT", "")) %>% # Remove retweet note
dplyr::mutate(text = stringr::str_replace_all(text, "&", "")) %>% # Remove Accelerated Mobile Pages (AMP) note
dplyr::mutate(text = stringr::str_replace_all(text, "https://t.co/[A-Za-z\\d]+|http://[A-Za-z\\d]+|&|<|>|RT|https", "")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "#", "")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "[:punct:]", "")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "[^[:alnum:]///' ]", "")) %>% # Remove Emojis
tidytext::unnest_tokens("trigram", "text", token = "ngrams", n=3) %>%
tidyr::separate("trigram", c("word1", "word2", "word3"), sep = " ") %>%
dplyr::filter(!"word1" %in% c(tidytext::stop_words$word, '[0-9]+')) %>%
dplyr::filter(!"word2" %in% c(tidytext::stop_words$word, '[0-9]+')) %>%
dplyr::filter(!"word3" %in% c(tidytext::stop_words$word, '[0-9]+')) %>%
dplyr::count(word1, word2, word3, sort = TRUE)
return(TD_Trigram)
}
#' @title Twitter Bi-Gram Network
#'
#' @description Displays the Bi-Gram Network. Bi-Gram networks builds on computed Bi-Grams. Bi-Gram networks serve as a visualization tool that displays the relationships between the words simultaneously as opposed to a tabular display of Bi-Gram words.
#'
#' @param BiGramDataFrame DataFrame of Bi-Grams.
#' @param number The minimum desired number of Bi-Gram occurances to be displayed (number = 300, would display all Bi-Grams that have at least 300 instances.)
#' @param layout Desired layout from the `ggraph` package. Acceptable layouts: "star", "circle", "gem", "dh", "graphopt", "grid", "mds", "randomly", "fr", "kk", "drl", "lgl"
#' @param edge_color User desired edge color.
#' @param node_color User desired node color.
#' @param node_size User desired node size.
#' @param set_seed Seed for reproducable results.
#'
#' @importFrom dplyr filter quo
#' @importFrom igraph graph_from_data_frame
#' @importFrom ggraph ggraph geom_edge_link geom_node_point geom_node_text
#' @import ggplot2
#'
#' @return A ggraph plot.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' TD_Bigram <- Bigram(DataFrame = data)
#' TD_Bigram_Network <- Bigram.Network(BiGramDataFrame = TD_Bigram,
#' number = 300,
#' layout = "fr",
#' edge_color = "royalblue",
#' node_color = "black",
#' node_size = 3,
#' set_seed = 1234)
#'
#' TD_Bigram_Network
#' }
#' @export
Bigram.Network <- function(BiGramDataFrame, number = 300, layout = "fr", edge_color = "royalblue", node_color = "black", node_size = 3, set_seed = 1234) {
n <- dplyr::quo(n)
name <- dplyr::quo(name)
TD_Bigram_Network <- BiGramDataFrame %>%
dplyr::filter(n > number) %>%
igraph::graph_from_data_frame()
set.seed(set_seed)
TD_Bigram_Network %>%
ggraph::ggraph(layout = layout) +
ggraph::geom_edge_link(ggplot2::aes(edge_alpha = 1, edge_width = scales::rescale(n, to=c(1,10))),edge_colour = edge_color, show.legend = TRUE) +
ggraph::geom_node_point(colour = node_color, size = node_size) +
ggraph::geom_node_text(ggplot2::aes(label = name), repel = TRUE) +
ggplot2::ggtitle("Bi-Gram Network") +
ggplot2::theme_void()
}
#' @title Twitter Word Correlations
#'
#' @description The word correlation displays the mutual relationship between words.
#'
#' @param DataFrameTidy DataFrame of Twitter Data that has been tidy'd.
#' @param number The number of word instances to be included.
#' @param sort Rank order the results from most to least correlated.
#'
#' @importFrom dplyr group_by filter quo
#' @importFrom widyr pairwise_cor
#'
#' @return A tribble
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' TD_Word_Corr <- Word.Corr(DataFrameTidy = tidy_data,
#' number = 500,
#' sort = TRUE)
#'
#' TD_Word_Corr
#' }
#' @export
Word.Corr <- function(DataFrameTidy, number, sort = TRUE) {
Token <- dplyr::quo(Token)
n <- dplyr::quo(n)
key <- dplyr::quo(key)
TD_Word_Correlation <- DataFrameTidy %>%
dplyr::group_by(Token) %>%
dplyr::filter(n() >= number) %>%
widyr::pairwise_cor(Token, key, sort = sort)
}
#' @title Twitter Word Correlations Plot
#'
#' @description The word correlation network displays the mutual relationship between words. The correlation network shows higher correlations with a thicker and darker edge color.
#'
#' @param WordCorr DataFrame of Word Correlations.
#' @param Correlation Minimum level of correlation to be displayed.
#' @param layout Desired layout from the `ggraph` package. Acceptable layouts: "star", "circle", "gem", "dh", "graphopt", "grid", "mds", "randomly", "fr", "kk", "drl", "lgl"
#' @param edge_color User desired edge color.
#' @param node_color User desired node color.
#' @param node_size User desired node size.
#' @param set_seed Seed for reproducable results.
#'
#' @importFrom dplyr filter quo
#' @importFrom igraph graph_from_data_frame
#' @importFrom ggraph ggraph geom_edge_link geom_node_point geom_node_text
#' @import ggplot2
#'
#' @return An igraph plot
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' TD_Word_Corr <- Word.Corr(DataFrameTidy = tidy_data,
#' number = 500,
#' sort = TRUE)
#' TD_Word_Corr_Network <- Word.Corr.Plot(WordCorr = TD_Word_Corr,
#' Correlation = 0.15,
#' layout = "fr",
#' edge_color = "royalblue",
#' node_color = "black",
#' node_size = 2,
#' set_seed = 1234)
#'
#' TD_Word_Corr_Network
#' }
#' @export
Word.Corr.Plot <- function(WordCorr, Correlation = 0.15, layout = "fr", edge_color = "royalblue", node_color = "black", node_size = 2, set_seed = 1234) {
correlation <- dplyr::quo(correlation)
name <- dplyr::quo(name)
set.seed(set_seed)
WordCorr %>%
dplyr::filter(correlation > Correlation) %>%
igraph::graph_from_data_frame() %>%
ggraph::ggraph(layout = layout) +
ggraph::geom_edge_link(ggplot2::aes(edge_alpha = correlation, edge_width = correlation), edge_colour = edge_color, show.legend = TRUE) +
ggraph::geom_node_point(colour = node_color, size = node_size) +
ggraph::geom_node_text(ggplot2::aes(label = name), repel = TRUE) +
ggplot2::ggtitle("Word Correlation Network") +
theme_void()
}
# Topic Analysis ----------------------------------------------------------
#' @title Number Topics
#'
#' @description Determines the optimal number of Latent topics within a dataframe by tuning the Latent Dirichlet Allocation (LDA) model parameters.
#' Uses the `ldatuning` package and outputs an ldatuning plot.
#'
#' @param DataFrame DataFrame of Twitter Data.
#' @param num_cores The number of CPU cores to processes models simultaneously (2L for dual core processor).
#' @param min_clusters Lower range for the number of clusters.
#' @param max_clusters Upper range for the number of clusters.
#' @param skip Integer; The number of clusters to skip between entries.
#' @param set_seed Seed for reproducable results.
#'
#' @importFrom dplyr mutate group_by count anti_join quo
#' @importFrom stringr str_replace_all
#' @importFrom tidytext unnest_tokens cast_dtm
#' @importFrom ldatuning FindTopicsNumber
#' @importFrom scales rescale
#' @importFrom reshape2 melt
#' @import ggplot2
#'
#' @return A Tidy DataFrame.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' LDA_Topic_Plot <- Number.Topics(DataFrame = data,
#' num_cores = 2L,
#' min_clusters = 2,
#' max_clusters = 12,
#' skip = 2,
#' set_seed = 1234)
#'
#' LDA_Topic_Plot
#' }
#' @export
Number.Topics <- function(DataFrame, num_cores, min_clusters = 2, max_clusters = 12, skip = 2, set_seed = 1234) {
text <- dplyr::quo(text)
key <- dplyr::quo(key)
word <- dplyr::quo(word)
n <- dplyr::quo(n)
lda_prep <- DataFrame %>%
dplyr::mutate(text = base::iconv(DataFrame$text, "latin1", "ASCII", sub="")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "#", "")) %>% # Remove hashtag
dplyr::mutate(text = stringr::str_replace_all(text, "[:punct:]", "")) %>% # Remove punctuation
dplyr::mutate(text = stringr::str_replace_all(text, "RT", "")) %>% # Remove retweet note
dplyr::mutate(text = stringr::str_replace_all(text, "&", "")) %>% # Remove Accelerated Mobile Pages (AMP) note
dplyr::mutate(text = stringr::str_replace_all(text, "https://t.co/[A-Za-z\\d]+|http://[A-Za-z\\d]+|&|<|>|RT|https", "")) %>% # Remove links
dplyr::group_by(key) %>%
tidytext::unnest_tokens(word, text) %>%
dplyr::anti_join(tidytext::stop_words) %>%
dplyr::count(key, word, sort = TRUE) %>%
tidytext::cast_dtm(key, word, n) # create DTM
# Compute Values
values <- ldatuning::FindTopicsNumber(lda_prep,
topics = seq(from = min_clusters, to = max_clusters, by = skip),
metrics = c("Griffiths2004", "CaoJuan2009", "Arun2010", "Deveaud2014"),
method = "Gibbs",
mc.cores = num_cores,
verbose = TRUE)
# Plot
columns <- base::subset(values, select = 2:ncol(values))
values <- base::data.frame(values["topics"], base::apply(columns, 2, function(column) {scales::rescale(column, to = c(0, 1), from = range(column))}))
values <- reshape2::melt(values, id.vars = "topics", na.rm = TRUE)
values$group <- values$variable %in% c("Griffiths2004", "Deveaud2014")
values$group <- base::factor(values$group, levels = c(FALSE, TRUE), labels = c("minimize", "maximize"))
p <- ggplot2::ggplot(values, aes_string(x = "topics", y = "value", group = "variable"))
p <- p + geom_line()
p <- p + geom_point(aes_string(shape = "variable"), size = 3)
p <- p + guides(size = FALSE, shape = guide_legend(title = "metrics:"))
p <- p + scale_x_continuous(breaks = values$topics)
p <- p + labs(x = "number of topics", y = NULL)
p <- p + facet_grid(group ~ .)
p <- p + theme_bw() %+replace% theme(panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.major.x = element_line(colour = "grey70"),
panel.grid.minor.x = element_blank(),
legend.key = element_blank(),
strip.text.y = element_text(angle = 90))
return(p)
}
#' @title Tweet Topics
#'
#' @description Determines the Latent topics within a dataframe by using Latent Dirichlet Allocation (LDA) model parameters.
#' Uses the `ldatuning` package and outputs an ldatuning plot.
#' Prepares tweet text, creates DTM, conducts LDA, display data terms associated with each topic.
#'
#' @param DataFrame DataFrame of Twitter Data.
#' @param clusters The number of latent clusters.
#' @param method method = "Gibbs"
#' @param set_seed Seed for reproducable results.
#' @param num_terms The desired number of terms to be returned for each topic.
#'
#' @importFrom dplyr mutate group_by anti_join inner_join count select transmute quo
#' @importFrom stringr str_replace_all
#' @importFrom plyr rename
#' @importFrom tidytext cast_dtm
#' @importFrom topicmodels topics terms
#'
#' @return Returns LDA topics.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' LDA_data <- Tweet.Topics(DataFrame = data,
#' clusters = 8,
#' method = "Gibbs",
#' set_seed = 1234,
#' num_terms = 10)
#'
#' LDA_data
#' }
#' @export
Tweet.Topics <- function(DataFrame, clusters, method = "Gibbs", set_seed = 1234, num_terms = 10) {
text <- dplyr::quo(text)
key <- dplyr::quo(key)
word <- dplyr::quo(word)
n <- dplyr::quo(n)
lda_prep <- DataFrame %>%
dplyr::mutate(text = base::iconv(DataFrame$text, "latin1", "ASCII", sub="")) %>%
dplyr::mutate(text = stringr::str_replace_all(text, "#", "")) %>% # Remove hashtag
dplyr::mutate(text = stringr::str_replace_all(text, "[:punct:]", "")) %>% # Remove punctuation
dplyr::mutate(text = stringr::str_replace_all(text, "RT", "")) %>% # Remove retweet note
dplyr::mutate(text = stringr::str_replace_all(text, "&", "")) %>% # Remove Accelerated Mobile Pages (AMP) note
dplyr::mutate(text = stringr::str_replace_all(text, "https://t.co/[A-Za-z\\d]+|http://[A-Za-z\\d]+|&|<|>|RT|https", "")) %>% # Remove links
dplyr::group_by(key) %>%
tidytext::unnest_tokens(word, text) %>%
dplyr::anti_join(tidytext::stop_words) %>%
dplyr::count(key, word, sort = TRUE) %>%
tidytext::cast_dtm(key, word, n)
# Run LDA using Gibbs sampling
ldaout <- topicmodels::LDA(lda_prep, k = clusters, method = method, control = list(seed = set_seed))
ldaout_topics <- as.matrix(topicmodels::topics(ldaout))
ldaout_terms <- as.matrix(topicmodels::terms(ldaout, num_terms))
# probabilities associated with each topic assignment
topicProbabilities <- as.data.frame(ldaout@gamma)
data.topics <- topicmodels::topics(ldaout, 1)
data.terms <- as.data.frame(topicmodels::terms(ldaout, num_terms), stringsAsFactors = FALSE)
print(data.terms)
#View(data.terms)
# Creates a dataframe to store the Lesson Number and the most likely topic
tweettopics.df <- as.data.frame(data.topics)
tweettopics.df <- dplyr::transmute(tweettopics.df, LessonId = rownames(tweettopics.df), Topic = data.topics)
tweettopics.df$ArticleNo <- as.character(tweettopics.df$LessonId)
# Clean up and rename coluns to match previous dataframes
tweettopics <- tweettopics.df %>%
dplyr::select(c("ArticleNo", "Topic")) %>%
plyr::rename(c("ArticleNo" = "key"))
# Join original twitter data frame with tweet topics
Tweet.Topics <- dplyr::inner_join(DataFrame, tweettopics, by = "key")
return(Tweet.Topics)
}
# Sentiment Calculation ---------------------------------------------------
#' @title Score Tidy Twitter Data
#'
#' @description Function to Calculate Sentiment Scores that will account for sentiment by hashtag or topic.
#'
#' @param DataFrameTidy DataFrame of Twitter Data that has been tidy'd.
#' @param HT_Topic If using hashtag data select: "hashtag". If using topic data select: "topic"
#'
#' @importFrom dplyr mutate inner_join group_by count quo
#' @importFrom plyr rename
#' @importFrom tidyr spread
#' @importFrom lubridate as_date
#' @importFrom tidytext get_sentiments
#'
#' @return A Scored DataFrame.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' score_data
#' }
#' @export
Scores <- function(DataFrameTidy, HT_Topic) {
text <- dplyr::quo(text)
method <- dplyr::quo(method)
hashtag <- dplyr::quo(hashtag)
created <- dplyr::quo(created)
key <- dplyr::quo(key)
Sentiment <- dplyr::quo(Sentiment)
n <- dplyr::quo(n)
positive <- dplyr::quo(positive)
negative <- dplyr::quo(negative)
TweetSentimentScore <- dplyr::quo(TweetSentimentScore)
Topic <- dplyr::quo(Topic)
#data("Bing")
Bing <- tidytext::get_sentiments(lexicon = "bing") %>%
plyr::rename(c("word" = "Token", "sentiment" = "Sentiment"))
if(HT_Topic == "hashtag") {
TD_Hashtag_Scores <- DataFrameTidy %>%
dplyr::inner_join(Bing, by = "Token") %>%
dplyr::mutate(method = "Bing") %>%
dplyr::group_by(text, method, hashtag, created, key, Sentiment) %>%
dplyr::count(method, hashtag, created, key, Sentiment) %>%
tidyr::spread(Sentiment, n, fill = 0) %>%
dplyr::mutate(TweetSentimentScore = positive - negative) %>%
dplyr::mutate(TweetSentiment = ifelse(TweetSentimentScore == 0, "neutral",
ifelse(TweetSentimentScore > 0, "positive", "negative"))) %>%
dplyr::mutate(date = lubridate::as_date(created))
return(TD_Hashtag_Scores)
} else {
TD_Topic_Scores <- DataFrameTidy %>%
dplyr::inner_join(Bing, by = "Token") %>%
dplyr::mutate(method = "Bing") %>%
dplyr::group_by(text, method, Topic, created, key, Sentiment) %>%
dplyr::count(method, Topic, created, key, Sentiment) %>%
tidyr::spread(Sentiment, n, fill = 0) %>%
dplyr::mutate(TweetSentimentScore = positive - negative) %>%
dplyr::mutate(TweetSentiment = ifelse(TweetSentimentScore == 0, "neutral",
ifelse(TweetSentimentScore > 0, "positive", "negative"))) %>%
dplyr::mutate(date = lubridate::as_date(created))
return(TD_Topic_Scores)
}
}
#' @title Twitter Positive and Negative Words
#'
#' @description Determines and displays the most positive and negative words within the twitter data.
#'
#' @param DataFrameTidy DataFrame of Twitter Data that has been tidy'd.
#' @param num_words Desired number of words to be returned.
#' @param filterword Word or words to be removed
#'
#' @importFrom dplyr mutate inner_join group_by count filter ungroup top_n quo
#' @importFrom plyr rename
#' @importFrom tidytext get_sentiments
#' @importFrom stats reorder
#' @import ggplot2
#'
#' @return A ggplot
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' posneg <- PosNeg.Words(DataFrameTidy = tidy_data,
#' n = 10)
#' posneg
#'
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' posneg <- PosNeg.Words(DataFrameTidy = tidy_data,
#' n = 10,
#' filterword = "fail")
#' posneg
#'
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' posneg <- PosNeg.Words(DataFrameTidy = tidy_data,
#' n = 10,
#' filterword = c("fail", "urgent"))
#' posneg
#' }
#' @export
PosNeg.Words <- function(DataFrameTidy, num_words, filterword = NULL) {
Token <- dplyr::quo(Token)
Sentiment <- dplyr::quo(Sentiment)
n <- dplyr::quo(n)
Bing <- tidytext::get_sentiments(lexicon = "bing") %>%
plyr::rename(c("word" = "Token", "sentiment" = "Sentiment"))
TD_PosNeg_Words <- DataFrameTidy %>%
dplyr::inner_join(Bing, by = "Token") %>%
dplyr::filter(!(Token %in% filterword)) %>%
dplyr::count(Token, Sentiment) %>%
dplyr::ungroup() %>%
dplyr::group_by(Sentiment) %>%
dplyr::top_n(num_words, n) %>%
dplyr::ungroup() %>%
dplyr::mutate(Token = stats::reorder(Token, n)) %>%
ggplot2::ggplot(ggplot2::aes(Token, n, fill = Sentiment)) +
ggplot2::geom_col(show.legend = FALSE) +
ggplot2::facet_wrap(~Sentiment, scales = "free_y") +
ggplot2::labs(y = "Count",
x = NULL) +
ggplot2::ggtitle('Most common positive and negative words utilizing the Bing Lexicon') +
ggplot2::coord_flip()
return(TD_PosNeg_Words)
}
#' @title Twitter Data Minimum Scores
#'
#' @description Determines the minimum scores for either the entire dataset or the minimum scores associated with a hashtag or topic analysis.
#'
#' @param DataFrameTidyScores DataFrame of Twitter Data that has been tidy'd and scored.
#' @param HT_Topic If using hashtag data select: "hashtag". If using topic data select: "topic".
#' @param HT_Topic_Selection THe hashtag or topic to be investigated. NULL will find min across entire dataframe.
#'
#' @importFrom dplyr arrange filter quo
#' @importFrom plyr desc
#' @importFrom utils head
#'
#' @return A Tidy DataFrame.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' min_scores <- Min.Scores(DataFrameTidyScores = score_data,
#' HT_Topic = "hashtag")
#'
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' min_scores <- Min.Scores(DataFrameTidyScores = score_data,
#' HT_Topic = "hashtag",
#' HT_Topic_Selection = "icecream")
#' }
#' @export
Min.Scores <- function(DataFrameTidyScores, HT_Topic, HT_Topic_Selection = NULL) {
hashtag <- dplyr::quo(hashtag)
Topic <- dplyr::quo(Topic)
TweetSentimentScore <- dplyr::quo(TweetSentimentScore)
if(HT_Topic == "hashtag" & is.null(HT_Topic_Selection)) {
TD_HT_noSel_Min_Scores <- DataFrameTidyScores %>%
dplyr::arrange((TweetSentimentScore)) %>%
utils::head()
return(TD_HT_noSel_Min_Scores)
} else if(HT_Topic == "hashtag" & !is.null(HT_Topic_Selection)) {
TD_HT_Sel_Min_Scores <- DataFrameTidyScores %>%
dplyr::filter(hashtag == HT_Topic_Selection) %>%
dplyr::arrange((TweetSentimentScore)) %>%
utils::head()
return(TD_HT_Sel_Min_Scores)
} else if(HT_Topic == "topic" & is.null(HT_Topic_Selection)) {
TD_Topic_noSel_Min_Scores <- DataFrameTidyScores %>%
dplyr::arrange((TweetSentimentScore)) %>%
utils::head()
return(TD_Topic_noSel_Min_Scores)
} else {
TD_Topic_Sel_Min_Scores <- DataFrameTidyScores %>%
dplyr::filter(Topic == HT_Topic_Selection) %>%
dplyr::arrange((TweetSentimentScore)) %>%
utils::head()
return(TD_Topic_Sel_Min_Scores)
}
}
#' @title Twitter Data Maximum Scores
#'
#' @description Determines the Maximum scores for either the entire dataset or the Maximum scores associated with a hashtag or topic analysis.
#'
#' @param DataFrameTidyScores DataFrame of Twitter Data that has been tidy'd and scored.
#' @param HT_Topic If using hashtag data select: "hashtag". If using topic data select: "topic".
#' @param HT_Topic_Selection THe hashtag or topic to be investigated. NULL will find min across entire dataframe.
#'
#' @importFrom dplyr arrange filter quo
#' @importFrom plyr desc
#' @importFrom utils head
#'
#' @return A Tidy DataFrame.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' min_scores <- Max.Scores(DataFrameTidyScores = score_data,
#' HT_Topic = "hashtag")
#'
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' min_scores <- Max.Scores(DataFrameTidyScores = score_data,
#' HT_Topic = "hashtag",
#' HT_Topic_Selection = "icecream")
#' }
#' @export
Max.Scores <- function(DataFrameTidyScores, HT_Topic, HT_Topic_Selection = NULL) {
hashtag <- dplyr::quo(hashtag)
Topic <- dplyr::quo(Topic)
TweetSentimentScore <- dplyr::quo(TweetSentimentScore)
if(HT_Topic == "hashtag" & is.null(HT_Topic_Selection)) {
TD_HT_noSel_Max_Scores <- DataFrameTidyScores %>%
dplyr::arrange(plyr::desc(TweetSentimentScore)) %>%
utils::head()
return(TD_HT_noSel_Max_Scores)
} else if(HT_Topic == "hashtag" & !is.null(HT_Topic_Selection)) {
TD_HT_Sel_Max_Scores <- DataFrameTidyScores %>%
dplyr::filter(hashtag == HT_Topic_Selection) %>%
dplyr::arrange(plyr::desc(TweetSentimentScore)) %>%
utils::head()
return(TD_HT_Sel_Max_Scores)
} else if(HT_Topic == "topic" & is.null(HT_Topic_Selection)) {
TD_Topic_noSel_Max_Scores <- DataFrameTidyScores %>%
dplyr::arrange(plyr::desc(TweetSentimentScore)) %>%
utils::head()
return(TD_Topic_noSel_Max_Scores)
} else {
TD_Topic_Sel_Max_Scores <- DataFrameTidyScores %>%
dplyr::filter(Topic == HT_Topic_Selection) %>%
dplyr::arrange(plyr::desc(TweetSentimentScore)) %>%
utils::head()
return(TD_Topic_Sel_Max_Scores)
}
}
# Visualization -----------------------------------------------------------
#' @title Twitter Corpus Distribution
#'
#' @description Determines the scores distribution for the entire Twitter data corpus.
#'
#' @param DataFrameTidyScores DataFrame of Twitter Data that has been tidy'd and scored.
#' @param color The user selected color to highlight the bins.
#' @param fill The interior color of the bins.
#'
#' @import ggplot2
#' @importFrom dplyr quo
#'
#' @return A ggplot.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' Corp_Dist <- Corpus.Distribution(DataFrameTidyScores = score_data,
#' color = "black",
#' fill = "white")
#' Corp_Dist
#' }
#' @export
Corpus.Distribution <- function(DataFrameTidyScores, color = "black", fill = "white") {
TweetSentimentScore<-dplyr::quo(TweetSentimentScore)
TD_Corups_Distribution <- DataFrameTidyScores %>%
ggplot2::ggplot(ggplot2::aes(TweetSentimentScore)) +
ggplot2::geom_bar(stat = "count", colour = color, fill = fill) +
ggplot2::theme(legend.position = "none") +
ggplot2::ggtitle("Sentiment Score Distribution") +
ggplot2::xlab('Sentiment') +
ggplot2::ylab('Count')
return(TD_Corups_Distribution)
}
#' @title Twitter Hashtag or Topic Distribution
#'
#' @description Determines the scores distribution by hashtag or topic for Twitter data.
#'
#' @param DataFrameTidyScores DataFrame of Twitter Data that has been tidy'd and scored.
#' @param HT_Topic If using hashtag data select: "hashtag". If using topic data select: "topic".
#' @param color The user selected color to highlight the bins.
#' @param fill The interior color of the bins.
#'
#' @import ggplot2
#'
#' @return A facet wrap ggplot.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' Dist <- Distribution(DataFrameTidyScores = score_data,
#' HT_Topic = "hashtag",
#' color = "black",
#' fill = "white")
#' Dist
#' }
#' @export
Distribution <- function(DataFrameTidyScores, HT_Topic, color = "black", fill = "white") {
if(HT_Topic == "hashtag") {
TD_HT_Distribution <- DataFrameTidyScores %>%
ggplot2::ggplot(ggplot2::aes("TweetSentimentScore")) +
ggplot2::geom_bar(stat = "count", colour = color, fill = fill) +
ggplot2::facet_wrap(~hashtag, ncol = 2) +
ggplot2::theme(legend.position = "none") +
ggplot2::ggtitle("Sentiment Score Distribution Across all #Hashtags") +
ggplot2::xlab('Sentiment') +
ggplot2:: ylab('Count')
return(TD_HT_Distribution)
} else {
TD_Topic_Distribution <- DataFrameTidyScores %>%
ggplot2::ggplot(ggplot2::aes("TweetSentimentScore")) +
ggplot2::geom_bar(stat = "count", colour = color, fill = fill) +
ggplot2::facet_wrap(~Topic, ncol = 2) +
ggplot2::theme(legend.position = "none") +
ggplot2::ggtitle("Sentiment Score Distribution Across all Topics") +
ggplot2::xlab('Sentiment') +
ggplot2::ylab('Count')
return(TD_Topic_Distribution)
}
}
#' @title Twitter Data Box Plot
#'
#' @description Displays the distribution scores of either hashtag or topic Twitter data.
#'
#' @param DataFrameTidyScores DataFrame of Twitter Data that has been tidy'd and scored.
#' @param HT_Topic If using hashtag data select: "hashtag". If using topic data select: "topic".
#'
#' @import ggplot2
#' @importFrom dplyr quo
#'
#' @return A ggplot box plot.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' ht_box <- BoxPlot(DataFrameTidyScores = score_data,
#' HT_Topic = "hashtag")
#' ht_box
#'
#' data <- raw_tweets
#' TD_Topics <- SAoTD::Tweet.Topics(DataFrame = data,
#' clusters = 5,
#' method = "Gibbs",
#' set_seed = 1234,
#' num_terms = 10)
#'
#' TD_Topics <- TD_Topics %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^1$", "travel")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^2$", "recreation")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^3$", "hiring")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^4$", "cats")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^5$", "random"))
#'
#' tidy_data <- Tidy(DataFrame = TD_Topics)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "topic")
#' topic_box <- BoxPlot(DataFrameTidyScores = score_data,
#' HT_Topic = "topic")
#' topic_box
#' }
#' @export
BoxPlot <- function(DataFrameTidyScores, HT_Topic) {
hashtag <- dplyr::quo(hashtag)
TweetSentimentScore <- dplyr::quo(hashtag)
Topic <- dplyr::quo(Topic)
if(HT_Topic == "hashtag") {
TD_HT_BoxPlot <- DataFrameTidyScores %>%
ggplot2::ggplot(ggplot2::aes(hashtag, TweetSentimentScore)) +
ggplot2::geom_boxplot() +
ggplot2::theme(legend.position = "none") +
ggplot2::ggtitle("Sentiment Scores Across each #Hashtag") +
ggplot2::xlab('#Hashtag') +
ggplot2::ylab('Sentiment') +
ggplot2::coord_flip()
return(TD_HT_BoxPlot)
} else {
TD_Topic_BoxPlot <- DataFrameTidyScores %>%
ggplot2::ggplot(ggplot2::aes(Topic, TweetSentimentScore)) +
ggplot2::geom_boxplot() +
ggplot2::theme(legend.position = "none") +
ggplot2::ggtitle("Sentiment Scores Across each Topic") +
ggplot2::xlab('Topic') +
ggplot2::ylab('Sentiment') +
ggplot2::coord_flip()
return(TD_Topic_BoxPlot)
}
}
#' @title Twitter Data Violin Plot
#'
#' @description Displays the distribution scores of either hashtag or topic Twitter data.
#'
#' @param DataFrameTidyScores DataFrame of Twitter Data that has been tidy'd and scored.
#' @param HT_Topic If using hashtag data select: "hashtag". If using topic data select: "topic".
#'
#' @import ggplot2
#' @importFrom dplyr quo
#' @importFrom stats median
#'
#' @return A ggplot violin plot.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' ht_violin <- ViolinPlot(DataFrameTidyScores = score_data,
#' HT_Topic = "hashtag")
#' ht_violin
#'
#' data <- raw_tweets
#' TD_Topics <- SAoTD::Tweet.Topics(DataFrame = data,
#' clusters = 5,
#' method = "Gibbs",
#' set_seed = 1234,
#' num_terms = 10)
#'
#' TD_Topics <- TD_Topics %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^1$", "travel")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^2$", "recreation")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^3$", "hiring")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^4$", "cats")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^5$", "random"))
#'
#' tidy_data <- Tidy(DataFrame = TD_Topics)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "topic")
#' topic_violin <- ViolinPlot(DataFrameTidyScores = score_data,
#' HT_Topic = "topic")
#' topic_violin
#' }
#' @export
ViolinPlot <- function(DataFrameTidyScores, HT_Topic) {
hashtag <- dplyr::quo(hashtag)
TweetSentimentScore <- dplyr::quo(TweetSentimentScore)
Topic <- dplyr::quo(Topic)
if(HT_Topic == "hashtag") {
TD_HT_ViolinPlot <- DataFrameTidyScores %>%
ggplot2:: ggplot(ggplot2::aes(hashtag, TweetSentimentScore)) +
ggplot2::geom_violin(scale = "area") +
ggplot2::stat_summary(fun.y = stats::median, geom = "point", shape = 23, size = 2) +
ggplot2::ggtitle("Sentiment Scores Across each #Hashtag") +
ggplot2::xlab('#Hashtag') +
ggplot2::ylab('Sentiment') +
ggplot2::coord_flip()
return(TD_HT_ViolinPlot)
} else{
TD_Topic_ViolinPlot <- DataFrameTidyScores %>%
ggplot2:: ggplot(ggplot2::aes(Topic, TweetSentimentScore)) +
ggplot2::geom_violin(scale = "area") +
ggplot2::stat_summary(fun.y = stats::median, geom = "point", shape = 23, size = 2) +
ggplot2::ggtitle("Sentiment Scores Across each Topic") +
ggplot2::xlab('Topic') +
ggplot2::ylab('Sentiment') +
ggplot2::coord_flip()
return(TD_Topic_ViolinPlot)
}
}
#' @title Twitter Data Timeseries Plot.
#'
#' @description Displays the Twitter data sentiment scores through time. The sentiment scores by hashtag or topic are summed per day and plotted to show the change in sentiment through time.
#'
#' @param DataFrameTidyScores DataFrame of Twitter Data that has been tidy'd and scored.
#' @param HT_Topic If using hashtag data select: "hashtag". If using topic data select: "topic".
#'
#' @importFrom dplyr summarize group_by quo
#' @import ggplot2
#'
#' @return A ggplot plot.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "hashtag")
#' ht_time <- TimeScale(DataFrameTidyScores = score_data,
#' HT_Topic = "hashtag")
#' ht_time
#'
#' data <- raw_tweets
#' TD_Topics <- SAoTD::Tweet.Topics(DataFrame = data,
#' clusters = 5,
#' method = "Gibbs",
#' set_seed = 1234,
#' num_terms = 10)
#'
#' TD_Topics <- TD_Topics %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^1$", "travel")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^2$", "recreation")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^3$", "hiring")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^4$", "cats")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^5$", "random"))
#'
#' tidy_data <- Tidy(DataFrame = TD_Topics)
#' score_data <- Scores(DataFrameTidy = tidy_data,
#' HT_Topic = "topic")
#' topic_time <- TimeScale(DataFrameTidyScores = score_data,
#' HT_Topic = "topic")
#' topic_time
#' }
#' @export
TimeScale <- function(DataFrameTidyScores, HT_Topic) {
hashtag <- dplyr::quo(hashtag)
TweetSentimentScore <- dplyr::quo(TweetSentimentScore)
DayScore <- dplyr::quo(DayScore)
Topic <- dplyr::quo(Topic)
if(HT_Topic == "hashtag") {
TD_HT_TimeScale <- DataFrameTidyScores %>%
dplyr::group_by(hashtag, date) %>%
dplyr::summarise(DayScore = sum(TweetSentimentScore)) %>%
ggplot2::ggplot(ggplot2::aes(x = factor(date), y = DayScore, colour = hashtag)) +
ggplot2::geom_point() +
ggplot2::geom_path(ggplot2::aes(group=1)) +
ggplot2::geom_hline(yintercept = 0, color = "black") +
ggplot2::facet_wrap(~hashtag, ncol = 2, scales = "free_y") +
ggplot2::theme(legend.position = "none") +
ggplot2::ggtitle("Sentiment Scores Across all #Hashtags") +
ggplot2::xlab('Day') +
ggplot2::ylab('Daily Sentiment Score') +
ggplot2::theme(axis.text.x = element_text(angle = 45, hjust = 1))
return(TD_HT_TimeScale)
} else {
TD_Topic_TimeScale <- DataFrameTidyScores %>%
dplyr::group_by(Topic, date) %>%
dplyr::summarise(DayScore = sum(TweetSentimentScore)) %>%
ggplot2::ggplot(ggplot2::aes(x = factor(date), y = DayScore, colour = Topic)) +
ggplot2::geom_point() +
ggplot2::geom_path(ggplot2::aes(group=1)) +
ggplot2::geom_hline(yintercept = 0, color = "black") +
ggplot2::facet_wrap(~Topic, ncol = 2, scales = "free_y") +
ggplot2::theme(legend.position = "none") +
ggplot2::ggtitle("Sentiment Scores Across all Topics") +
ggplot2::xlab('Day') +
ggplot2::ylab('Daily Sentiment Score') +
ggplot2::theme(axis.text.x = element_text(angle = 45, hjust = 1))
return(TD_Topic_TimeScale)
}
}
#' @title Twitter Data Worldmap Plot.
#'
#' @description Displays the location of a tweet across the globe by hashtag or topic.
#'
#' @param DataFrame DataFrame of Twitter Data that has been tidy'd and scored.
#' @param HT_Topic If using hashtag data select: "hashtag". If using topic data select: "topic".
#'
#' @importFrom ggplot2 ggplot map_data
#' @importFrom dplyr quo
#'
#' @return A ggplot plot.
#'
#' @examples
#' \dontrun{
#' library(SAoTD)
#' data <- raw_tweets
#' tidy_data <- Tidy(DataFrame = data)
#' ht_map <- WorldMap(DataFrame = tidy_data,
#' HT_Topic = "hashtag")
#' ht_map
#'
#' data <- raw_tweets
#' TD_Topics <- SAoTD::Tweet.Topics(DataFrame = data,
#' clusters = 5,
#' method = "Gibbs",
#' set_seed = 1234,
#' num_terms = 10)
#'
#' TD_Topics <- TD_Topics %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^1$", "travel")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^2$", "recreation")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^3$", "hiring")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^4$", "cats")) %>%
#' dplyr::mutate(Topic = stringr::str_replace_all(Topic, "^5$", "random"))
#'
#' tidy_data <- Tidy(DataFrame = TD_Topics)
#' topic_map <- WorldMap(DataFrame = tidy_data,
#' HT_Topic = "topic")
#' topic_map
#' }
#' @export
WorldMap <- function(DataFrame, HT_Topic) {
long <- dplyr::quo(long)
lat <- dplyr::quo(lat)
group <- dplyr::quo(group)
longitude <- dplyr::quo(longitude)
latitude <- dplyr::quo(latitude)
hashtag <- dplyr::quo(hashtag)
Topic <- dplyr::quo(Topic)
if(HT_Topic == "hashtag") {
TD_HT_WorldMap <- ggplot2::map_data("world") %>%
ggplot2::ggplot() +
ggplot2::geom_polygon(ggplot2::aes(x = long, y = lat, group = group), colour = "black", fill = "white") +
ggplot2::geom_jitter(data = DataFrame,
ggplot2::aes(x = as.numeric(longitude),
y = as.numeric(latitude),
colour = hashtag)) +
ggplot2::ggtitle("World Map of Tweets") +
ggplot2::theme(legend.position = "bottom") +
ggplot2::scale_fill_continuous(guide = guide_legend(title = NULL)) +
ggplot2::coord_quickmap()
return(TD_HT_WorldMap)
} else {
TD_Topic_WorldMap <- ggplot2::map_data("world") %>%
ggplot2::ggplot() +
ggplot2::geom_polygon(ggplot2::aes(x = long, y = lat, group = group), colour = "black", fill = "white") +
ggplot2::geom_jitter(data = DataFrame,
ggplot2::aes(x = as.numeric(longitude),
y = as.numeric(latitude),
colour = Topic)) +
ggplot2::ggtitle("World Map of Tweets") +
ggplot2::theme(legend.position = "bottom") +
ggplot2::scale_fill_continuous(guide = guide_legend(title = NULL)) +
ggplot2::coord_quickmap()
return(TD_Topic_WorldMap)
}
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{ip2long}
\alias{ip2long}
\title{Character (dotted-decimal) IPv4 Address Conversion to long integer}
\usage{
ip2long(ip)
}
\arguments{
\item{ip}{input character vector of IPv4 addresses (dotted-decimal)}
}
\value{
vector of equivalent long integer IP addresses
}
\description{
Convert IP addresses in character (dotted-decimal) notation to long integers
}
\examples{
ip2long("24.0.5.11")
ip2long(c("24.0.5.11", "211.3.77.96"))
}
| /man/ip2long.Rd | no_license | rOpenSec/ipv4heatmap | R | false | false | 535 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{ip2long}
\alias{ip2long}
\title{Character (dotted-decimal) IPv4 Address Conversion to long integer}
\usage{
ip2long(ip)
}
\arguments{
\item{ip}{input character vector of IPv4 addresses (dotted-decimal)}
}
\value{
vector of equivalent long integer IP addresses
}
\description{
Convert IP addresses in character (dotted-decimal) notation to long integers
}
\examples{
ip2long("24.0.5.11")
ip2long(c("24.0.5.11", "211.3.77.96"))
}
|
\name{xscat}
\alias{xscat}
\title{Concatenate sequences contained in XString, XStringSet
and/or XStringViews objects}
\description{
This function mimics the semantic of \code{paste(..., sep="")}
but accepts \link{XString}, \link{XStringSet} or \link{XStringViews}
arguments and returns an \link{XString} or \link{XStringSet} object.
}
\usage{
xscat(...)
}
\arguments{
\item{\dots}{
One or more character vectors (with no NAs), \link{XString},
\link{XStringSet} or \link{XStringViews} objects.
}
}
\value{
An \link{XString} object if all the arguments are either \link{XString}
objects or character strings.
An \link{XStringSet} object otherwise.
}
\author{H. Pagès}
\seealso{
\link{XString-class},
\link{XStringSet-class},
\link{XStringViews-class},
\code{\link[base]{paste}}
}
\examples{
## Return a BString object:
xscat(BString("abc"), BString("EF"))
xscat(BString("abc"), "EF")
xscat("abc", "EF")
## Return a BStringSet object:
xscat(BStringSet("abc"), "EF")
## Return a DNAStringSet object:
xscat(c("t", "a"), DNAString("N"))
## Arguments are recycled to the length of the longest argument:
res1a <- xscat("x", LETTERS, c("3", "44", "555"))
res1b <- paste0("x", LETTERS, c("3", "44", "555"))
stopifnot(identical(as.character(res1a), as.character(res1b)))
## Concatenating big XStringSet objects:
library(drosophila2probe)
probes <- DNAStringSet(drosophila2probe)
mm <- complement(narrow(probes, start=13, end=13))
left <- narrow(probes, end=12)
right <- narrow(probes, start=14)
xscat(left, mm, right)
## Collapsing an XStringSet (or XStringViews) object with a small
## number of elements:
probes1000 <- as.list(probes[1:1000])
y1 <- do.call(xscat, probes1000)
y2 <- do.call(c, probes1000) # slightly faster than the above
y1 == y2 # TRUE
## Note that this method won't be efficient when the number of
## elements to collapse is big (> 10000) so we need to provide a
## collapse() (or xscollapse()) function in Biostrings that will be
## efficient at doing this. Please request this on the Bioconductor
## mailing list (http://bioconductor.org/help/mailing-list/) if you
## need it.
}
\keyword{methods}
\keyword{manip}
| /man/xscat.Rd | no_license | Bioconductor/Biostrings | R | false | false | 2,179 | rd | \name{xscat}
\alias{xscat}
\title{Concatenate sequences contained in XString, XStringSet
and/or XStringViews objects}
\description{
This function mimics the semantic of \code{paste(..., sep="")}
but accepts \link{XString}, \link{XStringSet} or \link{XStringViews}
arguments and returns an \link{XString} or \link{XStringSet} object.
}
\usage{
xscat(...)
}
\arguments{
\item{\dots}{
One or more character vectors (with no NAs), \link{XString},
\link{XStringSet} or \link{XStringViews} objects.
}
}
\value{
An \link{XString} object if all the arguments are either \link{XString}
objects or character strings.
An \link{XStringSet} object otherwise.
}
\author{H. Pagès}
\seealso{
\link{XString-class},
\link{XStringSet-class},
\link{XStringViews-class},
\code{\link[base]{paste}}
}
\examples{
## Return a BString object:
xscat(BString("abc"), BString("EF"))
xscat(BString("abc"), "EF")
xscat("abc", "EF")
## Return a BStringSet object:
xscat(BStringSet("abc"), "EF")
## Return a DNAStringSet object:
xscat(c("t", "a"), DNAString("N"))
## Arguments are recycled to the length of the longest argument:
res1a <- xscat("x", LETTERS, c("3", "44", "555"))
res1b <- paste0("x", LETTERS, c("3", "44", "555"))
stopifnot(identical(as.character(res1a), as.character(res1b)))
## Concatenating big XStringSet objects:
library(drosophila2probe)
probes <- DNAStringSet(drosophila2probe)
mm <- complement(narrow(probes, start=13, end=13))
left <- narrow(probes, end=12)
right <- narrow(probes, start=14)
xscat(left, mm, right)
## Collapsing an XStringSet (or XStringViews) object with a small
## number of elements:
probes1000 <- as.list(probes[1:1000])
y1 <- do.call(xscat, probes1000)
y2 <- do.call(c, probes1000) # slightly faster than the above
y1 == y2 # TRUE
## Note that this method won't be efficient when the number of
## elements to collapse is big (> 10000) so we need to provide a
## collapse() (or xscollapse()) function in Biostrings that will be
## efficient at doing this. Please request this on the Bioconductor
## mailing list (http://bioconductor.org/help/mailing-list/) if you
## need it.
}
\keyword{methods}
\keyword{manip}
|
\name{anRpackage-package}
\alias{anRpackage-package}
\alias{anRpackage}
\docType{package}
\title{
\packageTitle{anRpackage}
}
\description{
\packageDescription{anRpackage}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{anRpackage}
\packageIndices{anRpackage}
~~ An overview of how to use the package, including the most ~~
~~ important functions ~~
}
\author{
\packageAuthor{anRpackage}
Maintainer: \packageMaintainer{anRpackage}
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from ~~
~~ file KEYWORDS in the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
| /anRpackage/man/anRpackage-package.Rd | no_license | Phoebekan/created.r.package | R | false | false | 826 | rd | \name{anRpackage-package}
\alias{anRpackage-package}
\alias{anRpackage}
\docType{package}
\title{
\packageTitle{anRpackage}
}
\description{
\packageDescription{anRpackage}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{anRpackage}
\packageIndices{anRpackage}
~~ An overview of how to use the package, including the most ~~
~~ important functions ~~
}
\author{
\packageAuthor{anRpackage}
Maintainer: \packageMaintainer{anRpackage}
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from ~~
~~ file KEYWORDS in the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_functions.R
\name{square_it_mat}
\alias{square_it_mat}
\title{Expands unit-squares on points.}
\usage{
square_it_mat(X)
}
\arguments{
\item{X}{A data.frame with two columns, named x and y respectively.}
}
\value{
Takes a set of gridpoints and returns the the vertices of the unit
square it the points as its center.
}
\description{
Expands unit-squares on points.
}
\examples{
X <- data.frame(x = c(1, 1, 1),
y = c(1, 2, 3))
square_it_mat(X)
}
| /man/square_it_mat.Rd | permissive | EmilHvitfeldt/ggtetris | R | false | true | 549 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_functions.R
\name{square_it_mat}
\alias{square_it_mat}
\title{Expands unit-squares on points.}
\usage{
square_it_mat(X)
}
\arguments{
\item{X}{A data.frame with two columns, named x and y respectively.}
}
\value{
Takes a set of gridpoints and returns the the vertices of the unit
square it the points as its center.
}
\description{
Expands unit-squares on points.
}
\examples{
X <- data.frame(x = c(1, 1, 1),
y = c(1, 2, 3))
square_it_mat(X)
}
|
# devtools::install_github("rstudio/keras")
library(keras)
install_tensorflow()
install_tensorflow(gpu = T)
| /keras.R | no_license | mikeyangpro/Projects | R | false | false | 109 | r | # devtools::install_github("rstudio/keras")
library(keras)
install_tensorflow()
install_tensorflow(gpu = T)
|
\name{read.simmap}
\alias{read.simmap}
\title{Read SIMMAP style trees from file}
\usage{
read.simmap(file="", text, format="nexus", rev.order=TRUE, version=1)
}
\arguments{
\item{file}{name of text file with one or multiple SIMMAP v1.0 or v1.5 style trees containing the mapped history of a discrete character.}
\item{text}{character string containing the tree. If \code{version=1.5} this argument is ignored. (This format tree can only be read from file in the present version.)}
\item{format}{format of the trees: either \code{"phylip"} or \code{"nexus"} - the latter is the default output from SIMMAP. If \code{version=1.5} this argument is ignored.}
\item{rev.order}{a logical value indicating whether the states and times along each branch is given (from root to tip) in right-to-left order (if TRUE) or in left-to-right order. If \code{version=1.5} this argument is ignored.}
\item{version}{version of SIMMAP for input tree. If the tree(s) was/were simulated in SIMMAP v1.0 or written to file by \code{link{make.simmap}} then \code{version=1.0}; if the tree(s) was/were simulated using SIMMAP v1.5 then \code{version=1.5}.}
}
\description{
This reads one or multiple SIMMAP style trees from file.
}
\details{
This function now accepts trees in both SIMMAP v1.0 and SIMMAP v1.5 format. In addition, it can read a more flexible format than is produced by SIMMAP (for instance, multi-character mapped states and more than 7 mapped states).
The function uses some modified code from \code{\link{read.nexus}} from the \pkg{ape} package to read the NEXUS block created by SIMMAP. Also creates the attribute \code{"map.order"} which indicates whether the stochastic map was read in from left to right or right to left. This attribute is used by default by \code{\link{write.simmap}} to write the tree in the same order.
}
\value{
An object of class \code{"simmap"} (or list of such objects with class \code{"multiSimmap"}), consisting of a modified object of class \code{"phylo"} with at least the following additional elements:
\item{maps}{a list of named vectors containing the times spent in each state on each branch, in the order in which they occur.}
\item{mapped.edge}{a matrix containing the total time spent in each state along each edge of the tree.}
}
\references{
Bollback, J. P. (2006) Stochastic character mapping of discrete traits on phylogenies. \emph{BMC Bioinformatics}, \bold{7}, 88.
Paradis, E., J. Claude, and K. Strimmer (2004) APE: Analyses of phylogenetics and evolution in R language. \emph{Bioinformatics}, \bold{20}, 289-290.
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\seealso{
\code{\link{brownie.lite}}, \code{\link{evol.vcv}}, \code{\link{read.tree}}, \code{\link{read.nexus}}
}
\keyword{phylogenetics}
\keyword{comparative method}
\keyword{input/output}
\keyword{discrete character}
| /man/read.simmap.Rd | no_license | cran/phytools | R | false | false | 3,033 | rd | \name{read.simmap}
\alias{read.simmap}
\title{Read SIMMAP style trees from file}
\usage{
read.simmap(file="", text, format="nexus", rev.order=TRUE, version=1)
}
\arguments{
\item{file}{name of text file with one or multiple SIMMAP v1.0 or v1.5 style trees containing the mapped history of a discrete character.}
\item{text}{character string containing the tree. If \code{version=1.5} this argument is ignored. (This format tree can only be read from file in the present version.)}
\item{format}{format of the trees: either \code{"phylip"} or \code{"nexus"} - the latter is the default output from SIMMAP. If \code{version=1.5} this argument is ignored.}
\item{rev.order}{a logical value indicating whether the states and times along each branch is given (from root to tip) in right-to-left order (if TRUE) or in left-to-right order. If \code{version=1.5} this argument is ignored.}
\item{version}{version of SIMMAP for input tree. If the tree(s) was/were simulated in SIMMAP v1.0 or written to file by \code{link{make.simmap}} then \code{version=1.0}; if the tree(s) was/were simulated using SIMMAP v1.5 then \code{version=1.5}.}
}
\description{
This reads one or multiple SIMMAP style trees from file.
}
\details{
This function now accepts trees in both SIMMAP v1.0 and SIMMAP v1.5 format. In addition, it can read a more flexible format than is produced by SIMMAP (for instance, multi-character mapped states and more than 7 mapped states).
The function uses some modified code from \code{\link{read.nexus}} from the \pkg{ape} package to read the NEXUS block created by SIMMAP. Also creates the attribute \code{"map.order"} which indicates whether the stochastic map was read in from left to right or right to left. This attribute is used by default by \code{\link{write.simmap}} to write the tree in the same order.
}
\value{
An object of class \code{"simmap"} (or list of such objects with class \code{"multiSimmap"}), consisting of a modified object of class \code{"phylo"} with at least the following additional elements:
\item{maps}{a list of named vectors containing the times spent in each state on each branch, in the order in which they occur.}
\item{mapped.edge}{a matrix containing the total time spent in each state along each edge of the tree.}
}
\references{
Bollback, J. P. (2006) Stochastic character mapping of discrete traits on phylogenies. \emph{BMC Bioinformatics}, \bold{7}, 88.
Paradis, E., J. Claude, and K. Strimmer (2004) APE: Analyses of phylogenetics and evolution in R language. \emph{Bioinformatics}, \bold{20}, 289-290.
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\seealso{
\code{\link{brownie.lite}}, \code{\link{evol.vcv}}, \code{\link{read.tree}}, \code{\link{read.nexus}}
}
\keyword{phylogenetics}
\keyword{comparative method}
\keyword{input/output}
\keyword{discrete character}
|
#' Estabilish a connection to Servolab Database
#'
#' The function connects to sybase using a JDBC driver. It takes in the host IP address
#' @param servolabIPaddress the ipAddress of the server
#' @param username database username
#' @param password database password
#' @import RJDBC
#' @return returns a jdbc connection to Servolab
#' @usage ServolabGetConnection(servolabIPaddress,username,password)
#' @examples
#' host <-"172.11.11.11"
#' us <-"servo"
#' pass <- "pwd"
#' con <- ServolabGetConnection(host,us,pass)
#' @export
ServolabGetConnection <-
function(servolabIPaddress, username, password) {
conn <- NULL
# JDBC function has two purposes. One is to initialize the Java VM and load a Java JDBC driver
# (not to be confused with the JDBCDriver R object which is actually a DBI driver).
# The second purpose is to create a proxy R object which can be used to a call dbConnect which
# actually creates a connection.
drv <-
JDBC(
driverClass = "net.sourceforge.jtds.jdbc.Driver",
classPath = paste0(
path.package("ServolabR", quiet = FALSE),
"/java/jtds-1.3.1.jar"
),
identifier.quote = "'"
)
conn <-
dbConnect(
drv,
paste0("jdbc:jtds:sybase://", servolabIPaddress, ":5000/LAB"),
username,
password
)
return(conn)
}
| /ServolabR/R/servolabConnection.R | no_license | joebrew/hotspots | R | false | false | 1,382 | r | #' Estabilish a connection to Servolab Database
#'
#' The function connects to sybase using a JDBC driver. It takes in the host IP address
#' @param servolabIPaddress the ipAddress of the server
#' @param username database username
#' @param password database password
#' @import RJDBC
#' @return returns a jdbc connection to Servolab
#' @usage ServolabGetConnection(servolabIPaddress,username,password)
#' @examples
#' host <-"172.11.11.11"
#' us <-"servo"
#' pass <- "pwd"
#' con <- ServolabGetConnection(host,us,pass)
#' @export
ServolabGetConnection <-
function(servolabIPaddress, username, password) {
conn <- NULL
# JDBC function has two purposes. One is to initialize the Java VM and load a Java JDBC driver
# (not to be confused with the JDBCDriver R object which is actually a DBI driver).
# The second purpose is to create a proxy R object which can be used to a call dbConnect which
# actually creates a connection.
drv <-
JDBC(
driverClass = "net.sourceforge.jtds.jdbc.Driver",
classPath = paste0(
path.package("ServolabR", quiet = FALSE),
"/java/jtds-1.3.1.jar"
),
identifier.quote = "'"
)
conn <-
dbConnect(
drv,
paste0("jdbc:jtds:sybase://", servolabIPaddress, ":5000/LAB"),
username,
password
)
return(conn)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OnlyTransferredPatients.R
\name{OnlyTransferredPatients}
\alias{OnlyTransferredPatients}
\title{Only transferred patients}
\usage{
OnlyTransferredPatients(study.sample, transfer.variable.name = "tran",
transfer.value = "Yes", remove.missing = TRUE)
}
\arguments{
\item{study.sample}{Data frame. The study sample. No default.}
\item{transfer.variable.name}{Character vector of length 1. The name of the
transfer variable. Defaults to "tran".}
\item{transfer.value}{Character or numeric vector of length 1. The value of
the transfer variable that indicates that a patients was
transferred. Defaults to "Yes".}
\item{remove.missing}{Logical vector of length 1. If TRUE all observations
with missing transfer, as detected by is.na, are removed from the
sample. Defaults to TRUE.}
}
\description{
Keeps only patients who were transferred from another health facility in the
sample.
}
| /man/OnlyTransferredPatients.Rd | permissive | martingerdin/bengaltiger | R | false | true | 962 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OnlyTransferredPatients.R
\name{OnlyTransferredPatients}
\alias{OnlyTransferredPatients}
\title{Only transferred patients}
\usage{
OnlyTransferredPatients(study.sample, transfer.variable.name = "tran",
transfer.value = "Yes", remove.missing = TRUE)
}
\arguments{
\item{study.sample}{Data frame. The study sample. No default.}
\item{transfer.variable.name}{Character vector of length 1. The name of the
transfer variable. Defaults to "tran".}
\item{transfer.value}{Character or numeric vector of length 1. The value of
the transfer variable that indicates that a patients was
transferred. Defaults to "Yes".}
\item{remove.missing}{Logical vector of length 1. If TRUE all observations
with missing transfer, as detected by is.na, are removed from the
sample. Defaults to TRUE.}
}
\description{
Keeps only patients who were transferred from another health facility in the
sample.
}
|
#' Mice Weight Data for Paired-Samples Mean Test
#'
#'@description contains the weight of 10 mice before and after the treatment.
#'
#' A paired-samples t-test can be performed to answer to this question.
#'@name mice2
#'@docType data
#'@usage data("mice2")
#'@format A data frame with 10 rows and 3 columns
#' @examples
#' data(mice2)
#' head(as.data.frame(mice2))
NULL
| /R/mice2.R | no_license | snijesh/datarium | R | false | false | 371 | r | #' Mice Weight Data for Paired-Samples Mean Test
#'
#'@description contains the weight of 10 mice before and after the treatment.
#'
#' A paired-samples t-test can be performed to answer to this question.
#'@name mice2
#'@docType data
#'@usage data("mice2")
#'@format A data frame with 10 rows and 3 columns
#' @examples
#' data(mice2)
#' head(as.data.frame(mice2))
NULL
|
test_that("KernelRidge", {
# Check kernel ridge with linear kernel == ridge regression
# Make toy dataset
n <- 200
n_test <- 100
X_train <- matrix(runif(n, -2,4), nrow = n, ncol = 1)
y_train <- as.vector(sin(X_train)) + rnorm(n, sd = 0.3) + 10
X_test <- matrix(seq(from = -2, to = 4, length.out = n_test), nrow = n_test, ncol = 1)
kr <- KernelRidge$new("linear", lambda = 0.1)
kr$fit(X_train = X_train, y_train)
y_hat_linear <- kr$predict(X_test)
# Ridge regression
X_train <- cbind(1, X_train)
X_test <- cbind(1, X_test)
XXt_lamda_I <- crossprod(X_train) + diag(0.1, nrow = ncol(X_train))
Xyt <- crossprod(X_train, y_train)
w <- solve(XXt_lamda_I, Xyt)
result <- c(X_test %*% w)
expect_equal(result, y_hat_linear)
# Rough tests that other kernel functions work as expected
my_mat <- matrix(c(1, 2, 3, 4), nrow=2)
kr <- KernelRidge$new("polynomial", lambda = 1, 2, 2)
similarity <- kr$kernel_function(my_mat[1,], my_mat)
expect_equal(similarity, c(144, 256))
kr <- KernelRidge$new("rbf", lambda = 1, 1)
similarity <- kr$kernel_function(my_mat[1,], my_mat)
expect_equal(similarity[1], 1)
})
test_that("PoissonGAM", {
# Make a data set for fitting
df <- test_data %>% convert_dates(exclude = "hour", filter_week = TRUE) %>%
get_count_data(time_period = "week", region = "community_area",
crime_type = "fbi_code")
X_train <- df[, -5]
y_train <- df[, 5]
# Initialise PoissonGAM object
pg <- PoissonGAM$new(time_period = "week", region = "community_area",
crime_type = "fbi_code", include_nb = TRUE)
pg$fit(X_train, y_train, n_threads = 7)
pg$predict(quiet = TRUE)
# Do the same analysis
data("community_bounds")
nb_list <- spdep::poly2nb(community_bounds, row.names = community_bounds$community)
names(nb_list) <- attr(nb_list, "region.id")
gam_fit <- mgcv::gam(n ~ s(as.numeric(week), bs = "cc") + fbi_code +
s(community_area, bs = "mrf", xt = list(nb = nb_list)),
data = df, family = "poisson",
control = gam.control(nthreads = 7))
gam_predict <- predict(gam_fit, type = "response")
# Expect the fitted model summaries (except formula which will never be equal) to be equal
expect_equal(summary(gam_fit)[-12], summary(pg$gam_fitted)[-12])
# Expect the predicted values to be equal
expect_equal(pg$predictions, gam_predict)
})
| /chigcrim/tests/testthat/test-regression.R | no_license | shannon-wms/chicago-crime | R | false | false | 2,447 | r | test_that("KernelRidge", {
# Check kernel ridge with linear kernel == ridge regression
# Make toy dataset
n <- 200
n_test <- 100
X_train <- matrix(runif(n, -2,4), nrow = n, ncol = 1)
y_train <- as.vector(sin(X_train)) + rnorm(n, sd = 0.3) + 10
X_test <- matrix(seq(from = -2, to = 4, length.out = n_test), nrow = n_test, ncol = 1)
kr <- KernelRidge$new("linear", lambda = 0.1)
kr$fit(X_train = X_train, y_train)
y_hat_linear <- kr$predict(X_test)
# Ridge regression
X_train <- cbind(1, X_train)
X_test <- cbind(1, X_test)
XXt_lamda_I <- crossprod(X_train) + diag(0.1, nrow = ncol(X_train))
Xyt <- crossprod(X_train, y_train)
w <- solve(XXt_lamda_I, Xyt)
result <- c(X_test %*% w)
expect_equal(result, y_hat_linear)
# Rough tests that other kernel functions work as expected
my_mat <- matrix(c(1, 2, 3, 4), nrow=2)
kr <- KernelRidge$new("polynomial", lambda = 1, 2, 2)
similarity <- kr$kernel_function(my_mat[1,], my_mat)
expect_equal(similarity, c(144, 256))
kr <- KernelRidge$new("rbf", lambda = 1, 1)
similarity <- kr$kernel_function(my_mat[1,], my_mat)
expect_equal(similarity[1], 1)
})
test_that("PoissonGAM", {
# Make a data set for fitting
df <- test_data %>% convert_dates(exclude = "hour", filter_week = TRUE) %>%
get_count_data(time_period = "week", region = "community_area",
crime_type = "fbi_code")
X_train <- df[, -5]
y_train <- df[, 5]
# Initialise PoissonGAM object
pg <- PoissonGAM$new(time_period = "week", region = "community_area",
crime_type = "fbi_code", include_nb = TRUE)
pg$fit(X_train, y_train, n_threads = 7)
pg$predict(quiet = TRUE)
# Do the same analysis
data("community_bounds")
nb_list <- spdep::poly2nb(community_bounds, row.names = community_bounds$community)
names(nb_list) <- attr(nb_list, "region.id")
gam_fit <- mgcv::gam(n ~ s(as.numeric(week), bs = "cc") + fbi_code +
s(community_area, bs = "mrf", xt = list(nb = nb_list)),
data = df, family = "poisson",
control = gam.control(nthreads = 7))
gam_predict <- predict(gam_fit, type = "response")
# Expect the fitted model summaries (except formula which will never be equal) to be equal
expect_equal(summary(gam_fit)[-12], summary(pg$gam_fitted)[-12])
# Expect the predicted values to be equal
expect_equal(pg$predictions, gam_predict)
})
|
#' Baixar todas as publicações do dje São Paulo.
#'
#' @param processo número do processo com ou sem pontos e hífen.
#' @param diretorio default para o diretório atual.
#'
#' @details Esta função não é vetorizada.
#' @return publicações em html
#' @export
#'
baixar_publicacoes_dje <- function(processo, diretorio = ".") {
## Assegura que o o número do processo está no formato cnj.
processo <- stringr::str_remove_all(processo, "\\D+") %>%
stringr::str_pad(width = 20, "left", "0") %>%
abjutils::build_id()
## Verifica o número de meses entre a data atual e o mês de janeiro do ano
## da distribuição do processo.
inicio <- stringr::str_extract(processo, "(?<=.{11})\\d{4}") %>%
paste0(., "-01-01") %>%
lubridate::ymd() %>%
lubridate::interval(lubridate::today()) %>%
lubridate::time_length("month")
## Cria um vetor o primeiro dia de cada mês desde o início até o mês anterior
## ao atual.
inicios <- lubridate::floor_date(lubridate::today(), "month") - months(1:inicio)
## Cria um vetor com o último dia de cada mês desde o início até o mês anterior
## ao mês atual.
finais <- lubridate::ceiling_date(inicios, "months") - 1
## Converte para o formato dd/mm/yyyy
inicios <- format(inicios, "%d/%m/%Y")
## idem
finais <- format(finais, "%d/%m/%Y")
## cria objeto apenas com o número do processo para
## nomear o arquivo mais adiante.
p <- stringr::str_remove_all(processo, "\\D+")
## Coloca aspas no número do processo.
processo <- deparse(processo)
## Inicía a requisição
purrr::walk2(inicios, finais, ~ {
body <- list(
dadosConsulta.dtInicio = .x,
dadosConsulta.dtFim = .y,
dadosConsulta.cdCaderno = "-11",
dadosConsulta.pesquisaLivre = processo,
pagina = ""
)
## Cria o nome do arquivo com a primeira dia do mês, último dia do mês
## e número do processo.
i <- stringr::str_replace_all(.x, "/", "_")
f <- stringr::str_replace_all(.y, "/", "_")
arquivo <- sprintf("%s/%s_%s_%s.html", diretorio, i, f, p)
content <-
httr::POST(
"http://www.dje.tjsp.jus.br/cdje/consultaAvancada.do",
encode = "form",
body = body
) %>%
httr::content()
## Salva somente os arquivos em que constam publicações.
if (xml2::xml_find_first(content, "boolean(//tr[@class='ementaClass'])")) {
xml2::write_html(content, arquivo)
}
})
}
| /R/baixar_publicacoes_dje.R | permissive | jjesusfilho/tjsp | R | false | false | 2,455 | r | #' Baixar todas as publicações do dje São Paulo.
#'
#' @param processo número do processo com ou sem pontos e hífen.
#' @param diretorio default para o diretório atual.
#'
#' @details Esta função não é vetorizada.
#' @return publicações em html
#' @export
#'
baixar_publicacoes_dje <- function(processo, diretorio = ".") {
## Assegura que o o número do processo está no formato cnj.
processo <- stringr::str_remove_all(processo, "\\D+") %>%
stringr::str_pad(width = 20, "left", "0") %>%
abjutils::build_id()
## Verifica o número de meses entre a data atual e o mês de janeiro do ano
## da distribuição do processo.
inicio <- stringr::str_extract(processo, "(?<=.{11})\\d{4}") %>%
paste0(., "-01-01") %>%
lubridate::ymd() %>%
lubridate::interval(lubridate::today()) %>%
lubridate::time_length("month")
## Cria um vetor o primeiro dia de cada mês desde o início até o mês anterior
## ao atual.
inicios <- lubridate::floor_date(lubridate::today(), "month") - months(1:inicio)
## Cria um vetor com o último dia de cada mês desde o início até o mês anterior
## ao mês atual.
finais <- lubridate::ceiling_date(inicios, "months") - 1
## Converte para o formato dd/mm/yyyy
inicios <- format(inicios, "%d/%m/%Y")
## idem
finais <- format(finais, "%d/%m/%Y")
## cria objeto apenas com o número do processo para
## nomear o arquivo mais adiante.
p <- stringr::str_remove_all(processo, "\\D+")
## Coloca aspas no número do processo.
processo <- deparse(processo)
## Inicía a requisição
purrr::walk2(inicios, finais, ~ {
body <- list(
dadosConsulta.dtInicio = .x,
dadosConsulta.dtFim = .y,
dadosConsulta.cdCaderno = "-11",
dadosConsulta.pesquisaLivre = processo,
pagina = ""
)
## Cria o nome do arquivo com a primeira dia do mês, último dia do mês
## e número do processo.
i <- stringr::str_replace_all(.x, "/", "_")
f <- stringr::str_replace_all(.y, "/", "_")
arquivo <- sprintf("%s/%s_%s_%s.html", diretorio, i, f, p)
content <-
httr::POST(
"http://www.dje.tjsp.jus.br/cdje/consultaAvancada.do",
encode = "form",
body = body
) %>%
httr::content()
## Salva somente os arquivos em que constam publicações.
if (xml2::xml_find_first(content, "boolean(//tr[@class='ementaClass'])")) {
xml2::write_html(content, arquivo)
}
})
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.